max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
Library.py | verkaufer/CliLibrary | 0 | 6631151 | import argparse
#import library as Library
from Book import Book
from Bookshelf import Bookshelf
bookshelf = Bookshelf()
def addBook(args):
book = Book(args.title, args.author)
bookshelf.add(book)
def removeBook(args):
if(args.all):
print "all books"
elif(args.title):
print "remove book with title"
def readBook(args):
bookshelf.read(args.title)
def showBooks(args):
pass
def main():
parser = argparse.ArgumentParser(description="Command line utility to add/remove/list/update books in a virtual library.")
subparsers = parser.add_subparsers(help='subcommand help')
## Add book
parser_add = subparsers.add_parser('add', help='Add new book to library')
parser_add.add_argument('title', help='Title of the book')
parser_add.add_argument('author', help='Author of the book')
parser_add.set_defaults(func=addBook)
## Remove book(s)
parser_remove = subparsers.add_parser('remove', help='Remove book(s) from library')
r_group = parser_remove.add_mutually_exclusive_group(required=True)
r_group.add_argument('-all', '-a', help='Remove all books', action='store_true')
r_group.add_argument('-title', '-t', help='Remove specific book with title')
parser_remove.set_defaults(func=removeBook) ## this will print 'hello' and the -title argument
## Read book
parser_readBook = subparsers.add_parser('read', help='Read book with given title')
parser_readBook.add_argument('title', help='Title of book to read')
parser_readBook.set_defaults(func=readBook)
## Show book(s)
parser_show = subparsers.add_parser('show', help='Show list of books in library')
parser_show.add_argument('-author', help='Show books by author')
show_group = parser_show.add_mutually_exclusive_group()
show_group.add_argument('-unread', '-u', help='Show unread books', action='store_true')
show_group.add_argument('-read', '-r', help='Show read books', action='store_true')
# Bookmark book
parser_bookmark = subparsers.add_parser('bookmark', help='Add bookmark to a book')
parser_bookmark.add_argument('title', help='Title of the book')
parser_bookmark.add_argument('page', help='Page to set bookmark', type=int)
## final setup
args = parser.parse_args()
#print args
args.func(args)
if __name__ == "__main__":
main()
| import argparse
#import library as Library
from Book import Book
from Bookshelf import Bookshelf
bookshelf = Bookshelf()
def addBook(args):
book = Book(args.title, args.author)
bookshelf.add(book)
def removeBook(args):
if(args.all):
print "all books"
elif(args.title):
print "remove book with title"
def readBook(args):
bookshelf.read(args.title)
def showBooks(args):
pass
def main():
parser = argparse.ArgumentParser(description="Command line utility to add/remove/list/update books in a virtual library.")
subparsers = parser.add_subparsers(help='subcommand help')
## Add book
parser_add = subparsers.add_parser('add', help='Add new book to library')
parser_add.add_argument('title', help='Title of the book')
parser_add.add_argument('author', help='Author of the book')
parser_add.set_defaults(func=addBook)
## Remove book(s)
parser_remove = subparsers.add_parser('remove', help='Remove book(s) from library')
r_group = parser_remove.add_mutually_exclusive_group(required=True)
r_group.add_argument('-all', '-a', help='Remove all books', action='store_true')
r_group.add_argument('-title', '-t', help='Remove specific book with title')
parser_remove.set_defaults(func=removeBook) ## this will print 'hello' and the -title argument
## Read book
parser_readBook = subparsers.add_parser('read', help='Read book with given title')
parser_readBook.add_argument('title', help='Title of book to read')
parser_readBook.set_defaults(func=readBook)
## Show book(s)
parser_show = subparsers.add_parser('show', help='Show list of books in library')
parser_show.add_argument('-author', help='Show books by author')
show_group = parser_show.add_mutually_exclusive_group()
show_group.add_argument('-unread', '-u', help='Show unread books', action='store_true')
show_group.add_argument('-read', '-r', help='Show read books', action='store_true')
# Bookmark book
parser_bookmark = subparsers.add_parser('bookmark', help='Add bookmark to a book')
parser_bookmark.add_argument('title', help='Title of the book')
parser_bookmark.add_argument('page', help='Page to set bookmark', type=int)
## final setup
args = parser.parse_args()
#print args
args.func(args)
if __name__ == "__main__":
main()
| en | 0.56992 | #import library as Library ## Add book ## Remove book(s) ## this will print 'hello' and the -title argument ## Read book ## Show book(s) # Bookmark book ## final setup #print args | 3.640239 | 4 |
clickhouse/tests/test_clickhouse.py | isaachui/integrations-core | 0 | 6631152 | # (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.clickhouse import ClickhouseCheck
from .common import CLICKHOUSE_VERSION
from .metrics import ALL_METRICS
pytestmark = [pytest.mark.integration, pytest.mark.usefixtures('dd_environment')]
def test_check(aggregator, instance):
# We do not do aggregator.assert_all_metrics_covered() because depending on timing, some other metrics may appear
check = ClickhouseCheck('clickhouse', {}, [instance])
check.run()
server_tag = 'server:{}'.format(instance['server'])
port_tag = 'port:{}'.format(instance['port'])
for metric in ALL_METRICS:
aggregator.assert_metric_has_tag(metric, server_tag)
aggregator.assert_metric_has_tag(metric, port_tag)
aggregator.assert_metric_has_tag(metric, 'db:default')
aggregator.assert_metric_has_tag(metric, 'foo:bar')
aggregator.assert_metric('clickhouse.table.replicated.total', 2)
aggregator.assert_metric(
'clickhouse.dictionary.item.current', tags=[server_tag, port_tag, 'db:default', 'foo:bar', 'dictionary:test']
)
aggregator.assert_service_check("clickhouse.can_connect", count=1)
def test_can_connect(aggregator, instance):
"""
Regression test: a copy of the `can_connect` service check must be submitted for each check run.
(It used to be submitted only once on check init, which led to customer seeing "no data" in the UI.)
"""
check = ClickhouseCheck('clickhouse', {}, [instance])
num_runs = 3
for _ in range(num_runs):
check.run()
aggregator.assert_service_check("clickhouse.can_connect", count=num_runs)
def test_custom_queries(aggregator, instance):
instance['custom_queries'] = [
{
'tags': ['test:clickhouse'],
'query': 'SELECT COUNT(*) FROM system.settings WHERE changed',
'columns': [{'name': 'settings.changed', 'type': 'gauge'}],
}
]
check = ClickhouseCheck('clickhouse', {}, [instance])
check.run()
aggregator.assert_metric(
'clickhouse.settings.changed',
metric_type=0,
tags=[
'server:{}'.format(instance['server']),
'port:{}'.format(instance['port']),
'db:default',
'foo:bar',
'test:clickhouse',
],
)
@pytest.mark.skipif(CLICKHOUSE_VERSION == 'latest', reason='Version `latest` is ever-changing, skipping')
def test_version_metadata(instance, datadog_agent):
check = ClickhouseCheck('clickhouse', {}, [instance])
check.check_id = 'test:123'
check.run()
datadog_agent.assert_metadata('test:123', {'version.scheme': 'calver', 'version.year': CLICKHOUSE_VERSION})
| # (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.clickhouse import ClickhouseCheck
from .common import CLICKHOUSE_VERSION
from .metrics import ALL_METRICS
pytestmark = [pytest.mark.integration, pytest.mark.usefixtures('dd_environment')]
def test_check(aggregator, instance):
# We do not do aggregator.assert_all_metrics_covered() because depending on timing, some other metrics may appear
check = ClickhouseCheck('clickhouse', {}, [instance])
check.run()
server_tag = 'server:{}'.format(instance['server'])
port_tag = 'port:{}'.format(instance['port'])
for metric in ALL_METRICS:
aggregator.assert_metric_has_tag(metric, server_tag)
aggregator.assert_metric_has_tag(metric, port_tag)
aggregator.assert_metric_has_tag(metric, 'db:default')
aggregator.assert_metric_has_tag(metric, 'foo:bar')
aggregator.assert_metric('clickhouse.table.replicated.total', 2)
aggregator.assert_metric(
'clickhouse.dictionary.item.current', tags=[server_tag, port_tag, 'db:default', 'foo:bar', 'dictionary:test']
)
aggregator.assert_service_check("clickhouse.can_connect", count=1)
def test_can_connect(aggregator, instance):
"""
Regression test: a copy of the `can_connect` service check must be submitted for each check run.
(It used to be submitted only once on check init, which led to customer seeing "no data" in the UI.)
"""
check = ClickhouseCheck('clickhouse', {}, [instance])
num_runs = 3
for _ in range(num_runs):
check.run()
aggregator.assert_service_check("clickhouse.can_connect", count=num_runs)
def test_custom_queries(aggregator, instance):
instance['custom_queries'] = [
{
'tags': ['test:clickhouse'],
'query': 'SELECT COUNT(*) FROM system.settings WHERE changed',
'columns': [{'name': 'settings.changed', 'type': 'gauge'}],
}
]
check = ClickhouseCheck('clickhouse', {}, [instance])
check.run()
aggregator.assert_metric(
'clickhouse.settings.changed',
metric_type=0,
tags=[
'server:{}'.format(instance['server']),
'port:{}'.format(instance['port']),
'db:default',
'foo:bar',
'test:clickhouse',
],
)
@pytest.mark.skipif(CLICKHOUSE_VERSION == 'latest', reason='Version `latest` is ever-changing, skipping')
def test_version_metadata(instance, datadog_agent):
check = ClickhouseCheck('clickhouse', {}, [instance])
check.check_id = 'test:123'
check.run()
datadog_agent.assert_metadata('test:123', {'version.scheme': 'calver', 'version.year': CLICKHOUSE_VERSION})
| en | 0.807738 | # (C) Datadog, Inc. 2019-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) # We do not do aggregator.assert_all_metrics_covered() because depending on timing, some other metrics may appear Regression test: a copy of the `can_connect` service check must be submitted for each check run. (It used to be submitted only once on check init, which led to customer seeing "no data" in the UI.) | 2.118991 | 2 |
tests/test_message_handler/test_strategies/test_handle_rpc_query_message.py | Zapix/mtpylon | 9 | 6631153 | <reponame>Zapix/mtpylon
# -*- coding: utf-8 -*-
from unittest.mock import patch, MagicMock, AsyncMock
import pytest
from mtpylon import long
from mtpylon.messages import EncryptedMessage
from mtpylon.serialization import CallableFunc
from mtpylon.message_handler.strategies.handle_rpc_query_message import (
handle_rpc_query_message,
run_rpc_query
)
from mtpylon.service_schema.constructors import RpcResult, RpcError, Message
from mtpylon.contextvars import server_salt_var, session_id_var
from tests.simpleschema import get_task, set_task, Task
msg_id = long(0x51e57ac42770964a)
server_salt = long(16009147158398906513)
session_id = long(11520911270507767959)
@pytest.mark.asyncio
async def test_handle_rpc_query_create_task():
request = MagicMock()
sender = MagicMock(send_encrypted_message=AsyncMock())
message = EncryptedMessage(
message_id=msg_id,
session_id=session_id,
salt=server_salt,
seq_no=0,
message_data=CallableFunc(
func=set_task,
params={'content': 'hello world'}
)
)
create_task = MagicMock()
with patch(
'mtpylon.message_handler.strategies.handle_rpc_query_message.create_task', # noqa
create_task
):
await handle_rpc_query_message([], sender, request, message)
create_task.assert_called()
@pytest.mark.asyncio
@pytest.mark.parametrize(
'message',
[
pytest.param(
EncryptedMessage(
message_id=msg_id,
session_id=session_id,
salt=server_salt,
seq_no=0,
message_data=CallableFunc(
func=set_task,
params={'content': 'hello world'}
)
),
id='encrypted message'
),
pytest.param(
Message(
msg_id=msg_id,
seqno=9,
bytes=16,
body=CallableFunc(
func=set_task,
params={'content': 'hello world'}
)
),
id='message constructor'
),
]
)
async def test_run_rpc_query_success(message):
request = MagicMock()
sender = MagicMock(send_encrypted_message=AsyncMock())
server_salt_var.set(server_salt)
session_id_var.set(session_id)
await run_rpc_query([], sender, request, message)
sender.send_encrypted_message.assert_awaited()
args = sender.send_encrypted_message.await_args[0]
server_salt_encrypt = args[1]
assert server_salt_encrypt == server_salt
rpc_result = args[3]
assert isinstance(rpc_result, RpcResult)
assert rpc_result.req_msg_id == msg_id
task = rpc_result.result
assert isinstance(task, Task)
assert task.content == 'hello world'
assert task.id == 1
@pytest.mark.asyncio
async def test_run_rpc_query_error():
request = MagicMock()
sender = MagicMock(send_encrypted_message=AsyncMock())
message = EncryptedMessage(
message_id=msg_id,
session_id=session_id,
salt=server_salt,
seq_no=0,
message_data=CallableFunc(
func=get_task,
params={'task_id': 4},
)
)
server_salt_var.set(server_salt)
session_id_var.set(session_id)
await run_rpc_query([], sender, request, message)
sender.send_encrypted_message.assert_awaited()
args = sender.send_encrypted_message.await_args[0]
rpc_result = args[3]
assert isinstance(rpc_result, RpcResult)
assert rpc_result.req_msg_id == msg_id
error = rpc_result.result
assert isinstance(error, RpcError)
assert error.error_code == 404
@pytest.mark.asyncio
async def test_run_rpc_unexpected_error():
request = MagicMock()
sender = MagicMock(send_encrypted_message=AsyncMock())
message = EncryptedMessage(
message_id=msg_id,
session_id=session_id,
salt=server_salt,
seq_no=0,
message_data=CallableFunc(
func=get_task,
params={'task_id': 3},
)
)
server_salt_var.set(server_salt)
session_id_var.set(session_id)
await run_rpc_query([], sender, request, message)
sender.send_encrypted_message.assert_awaited()
args = sender.send_encrypted_message.await_args[0]
rpc_result = args[3]
assert isinstance(rpc_result, RpcResult)
assert rpc_result.req_msg_id == msg_id
error = rpc_result.result
assert isinstance(error, RpcError)
assert error.error_code == 0
| # -*- coding: utf-8 -*-
from unittest.mock import patch, MagicMock, AsyncMock
import pytest
from mtpylon import long
from mtpylon.messages import EncryptedMessage
from mtpylon.serialization import CallableFunc
from mtpylon.message_handler.strategies.handle_rpc_query_message import (
handle_rpc_query_message,
run_rpc_query
)
from mtpylon.service_schema.constructors import RpcResult, RpcError, Message
from mtpylon.contextvars import server_salt_var, session_id_var
from tests.simpleschema import get_task, set_task, Task
msg_id = long(0x51e57ac42770964a)
server_salt = long(16009147158398906513)
session_id = long(11520911270507767959)
@pytest.mark.asyncio
async def test_handle_rpc_query_create_task():
request = MagicMock()
sender = MagicMock(send_encrypted_message=AsyncMock())
message = EncryptedMessage(
message_id=msg_id,
session_id=session_id,
salt=server_salt,
seq_no=0,
message_data=CallableFunc(
func=set_task,
params={'content': 'hello world'}
)
)
create_task = MagicMock()
with patch(
'mtpylon.message_handler.strategies.handle_rpc_query_message.create_task', # noqa
create_task
):
await handle_rpc_query_message([], sender, request, message)
create_task.assert_called()
@pytest.mark.asyncio
@pytest.mark.parametrize(
'message',
[
pytest.param(
EncryptedMessage(
message_id=msg_id,
session_id=session_id,
salt=server_salt,
seq_no=0,
message_data=CallableFunc(
func=set_task,
params={'content': 'hello world'}
)
),
id='encrypted message'
),
pytest.param(
Message(
msg_id=msg_id,
seqno=9,
bytes=16,
body=CallableFunc(
func=set_task,
params={'content': 'hello world'}
)
),
id='message constructor'
),
]
)
async def test_run_rpc_query_success(message):
request = MagicMock()
sender = MagicMock(send_encrypted_message=AsyncMock())
server_salt_var.set(server_salt)
session_id_var.set(session_id)
await run_rpc_query([], sender, request, message)
sender.send_encrypted_message.assert_awaited()
args = sender.send_encrypted_message.await_args[0]
server_salt_encrypt = args[1]
assert server_salt_encrypt == server_salt
rpc_result = args[3]
assert isinstance(rpc_result, RpcResult)
assert rpc_result.req_msg_id == msg_id
task = rpc_result.result
assert isinstance(task, Task)
assert task.content == 'hello world'
assert task.id == 1
@pytest.mark.asyncio
async def test_run_rpc_query_error():
request = MagicMock()
sender = MagicMock(send_encrypted_message=AsyncMock())
message = EncryptedMessage(
message_id=msg_id,
session_id=session_id,
salt=server_salt,
seq_no=0,
message_data=CallableFunc(
func=get_task,
params={'task_id': 4},
)
)
server_salt_var.set(server_salt)
session_id_var.set(session_id)
await run_rpc_query([], sender, request, message)
sender.send_encrypted_message.assert_awaited()
args = sender.send_encrypted_message.await_args[0]
rpc_result = args[3]
assert isinstance(rpc_result, RpcResult)
assert rpc_result.req_msg_id == msg_id
error = rpc_result.result
assert isinstance(error, RpcError)
assert error.error_code == 404
@pytest.mark.asyncio
async def test_run_rpc_unexpected_error():
request = MagicMock()
sender = MagicMock(send_encrypted_message=AsyncMock())
message = EncryptedMessage(
message_id=msg_id,
session_id=session_id,
salt=server_salt,
seq_no=0,
message_data=CallableFunc(
func=get_task,
params={'task_id': 3},
)
)
server_salt_var.set(server_salt)
session_id_var.set(session_id)
await run_rpc_query([], sender, request, message)
sender.send_encrypted_message.assert_awaited()
args = sender.send_encrypted_message.await_args[0]
rpc_result = args[3]
assert isinstance(rpc_result, RpcResult)
assert rpc_result.req_msg_id == msg_id
error = rpc_result.result
assert isinstance(error, RpcError)
assert error.error_code == 0 | en | 0.744791 | # -*- coding: utf-8 -*- # noqa | 2.026654 | 2 |
setup.py | MikeSmithLabTeam/particletracker | 2 | 6631154 | <gh_stars>1-10
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='particletracker',
version='2.0',
packages=setuptools.find_packages(
exclude=('tests', 'docs')
),
url='https://github.com/MikeSmithLabTeam/particletracker',
install_requires=[
'opencv-python',
'numpy',
'matplotlib',
'qimage2ndarray',
'tqdm',
'pandas',
'trackpy',
'tables',
'labvision @ git+https://github.com/MikeSmithLabTeam/labvision',
'filehandling @ git+https://github.com/MikeSmithLabTeam/filehandling'
],
include_package_data=True,
# dependency_links=[
# 'https://github.com/MikeSmithLabTeam/labvision/tarball/repo/master#egg=package-1.0',
# 'https://github.com/MikeSmithLabTeam/filehandling/tarball/repo/master#egg=package-1.0',
# ],
)
| import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='particletracker',
version='2.0',
packages=setuptools.find_packages(
exclude=('tests', 'docs')
),
url='https://github.com/MikeSmithLabTeam/particletracker',
install_requires=[
'opencv-python',
'numpy',
'matplotlib',
'qimage2ndarray',
'tqdm',
'pandas',
'trackpy',
'tables',
'labvision @ git+https://github.com/MikeSmithLabTeam/labvision',
'filehandling @ git+https://github.com/MikeSmithLabTeam/filehandling'
],
include_package_data=True,
# dependency_links=[
# 'https://github.com/MikeSmithLabTeam/labvision/tarball/repo/master#egg=package-1.0',
# 'https://github.com/MikeSmithLabTeam/filehandling/tarball/repo/master#egg=package-1.0',
# ],
) | en | 0.318585 | # dependency_links=[ # 'https://github.com/MikeSmithLabTeam/labvision/tarball/repo/master#egg=package-1.0', # 'https://github.com/MikeSmithLabTeam/filehandling/tarball/repo/master#egg=package-1.0', # ], | 1.188243 | 1 |
build/lib/gradio/outputs.py | Chetan8000/gradio | 1 | 6631155 | <filename>build/lib/gradio/outputs.py
"""
This module defines various classes that can serve as the `output` to an interface. Each class must inherit from
`AbstractOutput`, and each class must define a path to its template. All of the subclasses of `AbstractOutput` are
automatically added to a registry, which allows them to be easily referenced in other parts of the code.
"""
from abc import ABC, abstractmethod
import numpy as np
import json
from gradio import preprocessing_utils
import datetime
import operator
from numbers import Number
# Where to find the static resources associated with each template.
BASE_OUTPUT_INTERFACE_JS_PATH = 'static/js/interfaces/output/{}.js'
class AbstractOutput(ABC):
"""
An abstract class for defining the methods that all gradio inputs should have.
When this is subclassed, it is automatically added to the registry
"""
def __init__(self, label):
self.label = label
def get_template_context(self):
"""
:return: a dictionary with context variables for the javascript file associated with the context
"""
return {"label": self.label}
def postprocess(self, prediction):
"""
Any postprocessing needed to be performed on function output.
"""
return prediction
@classmethod
def get_shortcut_implementations(cls):
"""
Return dictionary of shortcut implementations
"""
return {}
class Label(AbstractOutput):
LABEL_KEY = "label"
CONFIDENCE_KEY = "confidence"
CONFIDENCES_KEY = "confidences"
def __init__(self, num_top_classes=None, label=None):
self.num_top_classes = num_top_classes
super().__init__(label)
def postprocess(self, prediction):
if isinstance(prediction, str) or isinstance(prediction, Number):
return {"label": str(prediction)}
elif isinstance(prediction, dict):
sorted_pred = sorted(
prediction.items(),
key=operator.itemgetter(1),
reverse=True
)
if self.num_top_classes is not None:
sorted_pred = sorted_pred[:self.num_top_classes]
return {
self.LABEL_KEY: sorted_pred[0][0],
self.CONFIDENCES_KEY: [
{
self.LABEL_KEY: pred[0],
self.CONFIDENCE_KEY: pred[1]
} for pred in sorted_pred
]
}
elif isinstance(prediction, int) or isinstance(prediction, float):
return {self.LABEL_KEY: str(prediction)}
else:
raise ValueError("The `Label` output interface expects one of: a string label, or an int label, a "
"float label, or a dictionary whose keys are labels and values are confidences.")
@classmethod
def get_shortcut_implementations(cls):
return {
"label": {},
}
class KeyValues(AbstractOutput):
def __init__(self, label=None):
super().__init__(label)
def postprocess(self, prediction):
if isinstance(prediction, dict):
return prediction
else:
raise ValueError("The `KeyValues` output interface expects an output that is a dictionary whose keys are "
"labels and values are corresponding values.")
@classmethod
def get_shortcut_implementations(cls):
return {
"key_values": {},
}
class Textbox(AbstractOutput):
def __init__(self, label=None):
super().__init__(label)
def get_template_context(self):
return {
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"text": {},
"textbox": {},
"number": {},
}
def postprocess(self, prediction):
if isinstance(prediction, str) or isinstance(prediction, int) or isinstance(prediction, float):
return str(prediction)
else:
raise ValueError("The `Textbox` output interface expects an output that is one of: a string, or"
"an int/float that can be converted to a string.")
class Image(AbstractOutput):
def __init__(self, plot=False, label=None):
self.plot = plot
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {
"image": {},
"plot": {"plot": True}
}
def postprocess(self, prediction):
"""
"""
if self.plot:
try:
return preprocessing_utils.encode_plot_to_base64(prediction)
except:
raise ValueError("The `Image` output interface expects a `matplotlib.pyplot` object"
"if plt=True.")
else:
try:
return preprocessing_utils.encode_array_to_base64(prediction)
except:
raise ValueError("The `Image` output interface (with plt=False) expects a numpy array.")
def rebuild_flagged(self, dir, msg):
"""
Default rebuild method to decode a base64 image
"""
im = preprocessing_utils.decode_base64_to_image(msg)
timestamp = datetime.datetime.now()
filename = 'output_{}.png'.format(timestamp.
strftime("%Y-%m-%d-%H-%M-%S"))
im.save('{}/{}'.format(dir, filename), 'PNG')
return filename
# Automatically adds all shortcut implementations in AbstractInput into a dictionary.
shortcuts = {}
for cls in AbstractOutput.__subclasses__():
for shortcut, parameters in cls.get_shortcut_implementations().items():
shortcuts[shortcut] = cls(**parameters)
| <filename>build/lib/gradio/outputs.py
"""
This module defines various classes that can serve as the `output` to an interface. Each class must inherit from
`AbstractOutput`, and each class must define a path to its template. All of the subclasses of `AbstractOutput` are
automatically added to a registry, which allows them to be easily referenced in other parts of the code.
"""
from abc import ABC, abstractmethod
import numpy as np
import json
from gradio import preprocessing_utils
import datetime
import operator
from numbers import Number
# Where to find the static resources associated with each template.
BASE_OUTPUT_INTERFACE_JS_PATH = 'static/js/interfaces/output/{}.js'
class AbstractOutput(ABC):
"""
An abstract class for defining the methods that all gradio inputs should have.
When this is subclassed, it is automatically added to the registry
"""
def __init__(self, label):
self.label = label
def get_template_context(self):
"""
:return: a dictionary with context variables for the javascript file associated with the context
"""
return {"label": self.label}
def postprocess(self, prediction):
"""
Any postprocessing needed to be performed on function output.
"""
return prediction
@classmethod
def get_shortcut_implementations(cls):
"""
Return dictionary of shortcut implementations
"""
return {}
class Label(AbstractOutput):
LABEL_KEY = "label"
CONFIDENCE_KEY = "confidence"
CONFIDENCES_KEY = "confidences"
def __init__(self, num_top_classes=None, label=None):
self.num_top_classes = num_top_classes
super().__init__(label)
def postprocess(self, prediction):
if isinstance(prediction, str) or isinstance(prediction, Number):
return {"label": str(prediction)}
elif isinstance(prediction, dict):
sorted_pred = sorted(
prediction.items(),
key=operator.itemgetter(1),
reverse=True
)
if self.num_top_classes is not None:
sorted_pred = sorted_pred[:self.num_top_classes]
return {
self.LABEL_KEY: sorted_pred[0][0],
self.CONFIDENCES_KEY: [
{
self.LABEL_KEY: pred[0],
self.CONFIDENCE_KEY: pred[1]
} for pred in sorted_pred
]
}
elif isinstance(prediction, int) or isinstance(prediction, float):
return {self.LABEL_KEY: str(prediction)}
else:
raise ValueError("The `Label` output interface expects one of: a string label, or an int label, a "
"float label, or a dictionary whose keys are labels and values are confidences.")
@classmethod
def get_shortcut_implementations(cls):
return {
"label": {},
}
class KeyValues(AbstractOutput):
def __init__(self, label=None):
super().__init__(label)
def postprocess(self, prediction):
if isinstance(prediction, dict):
return prediction
else:
raise ValueError("The `KeyValues` output interface expects an output that is a dictionary whose keys are "
"labels and values are corresponding values.")
@classmethod
def get_shortcut_implementations(cls):
return {
"key_values": {},
}
class Textbox(AbstractOutput):
def __init__(self, label=None):
super().__init__(label)
def get_template_context(self):
return {
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"text": {},
"textbox": {},
"number": {},
}
def postprocess(self, prediction):
if isinstance(prediction, str) or isinstance(prediction, int) or isinstance(prediction, float):
return str(prediction)
else:
raise ValueError("The `Textbox` output interface expects an output that is one of: a string, or"
"an int/float that can be converted to a string.")
class Image(AbstractOutput):
def __init__(self, plot=False, label=None):
self.plot = plot
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {
"image": {},
"plot": {"plot": True}
}
def postprocess(self, prediction):
"""
"""
if self.plot:
try:
return preprocessing_utils.encode_plot_to_base64(prediction)
except:
raise ValueError("The `Image` output interface expects a `matplotlib.pyplot` object"
"if plt=True.")
else:
try:
return preprocessing_utils.encode_array_to_base64(prediction)
except:
raise ValueError("The `Image` output interface (with plt=False) expects a numpy array.")
def rebuild_flagged(self, dir, msg):
"""
Default rebuild method to decode a base64 image
"""
im = preprocessing_utils.decode_base64_to_image(msg)
timestamp = datetime.datetime.now()
filename = 'output_{}.png'.format(timestamp.
strftime("%Y-%m-%d-%H-%M-%S"))
im.save('{}/{}'.format(dir, filename), 'PNG')
return filename
# Automatically adds all shortcut implementations in AbstractInput into a dictionary.
shortcuts = {}
for cls in AbstractOutput.__subclasses__():
for shortcut, parameters in cls.get_shortcut_implementations().items():
shortcuts[shortcut] = cls(**parameters)
| en | 0.831247 | This module defines various classes that can serve as the `output` to an interface. Each class must inherit from `AbstractOutput`, and each class must define a path to its template. All of the subclasses of `AbstractOutput` are automatically added to a registry, which allows them to be easily referenced in other parts of the code. # Where to find the static resources associated with each template. An abstract class for defining the methods that all gradio inputs should have. When this is subclassed, it is automatically added to the registry :return: a dictionary with context variables for the javascript file associated with the context Any postprocessing needed to be performed on function output. Return dictionary of shortcut implementations Default rebuild method to decode a base64 image # Automatically adds all shortcut implementations in AbstractInput into a dictionary. | 2.455218 | 2 |
enforce_constants/transformer.py | aroberge/import-experiments | 0 | 6631156 | <reponame>aroberge/import-experiments
import re
# For this example, we use simple regular expressions to identify
# lines of code that correspond to variable assignments. It is assumed
# that each assignment is done on a single line of code.
# This approach can change values within triple-quoted strings
# and does not capture all the possible cases for variable assignments.
# It is simply used as a quick demonstration.
# A basic assignement pattern we look for is something like
# python_identifier = whatever
# which can be an indented statement.
assignment_pattern = re.compile(r"^\s*([\w][\w\d]*)\s*=\s*(.+)")
# Note that the regex used for Python identifiers might not cover all
# possible valid identifiers with non-ascii characters.
# We also include something like
# python_identifier : Final = whatever
# but assume that it would not be indented.
final_declaration_pattern = re.compile(r"^([\w][\w\d]*)\s*:\s*Final\s*=\s*(.+)")
def transform_assignment(source):
"""Identifies simple assignments, including those with a Final type
hint, and replace them by a special function call.
So, something like
name = value
gets replaced by something like
sys.modules[__name__].__setattr__(name, value)
"""
# We are going to add an import to Python's sys module and want to make
# sure that it won't conflict with any variable in the source
if "sys" not in source:
sys_name = "sys"
else:
i = 0
while True:
sys_name = "sys" + str(i)
if sys_name not in source:
break
i += 1
lines = source.split("\n")
new_lines = ["import sys as %s" % sys_name]
for line in lines:
match = re.search(assignment_pattern, line)
match_final = re.search(final_declaration_pattern, line)
if match:
name = match.group(1)
indent = len(line) - len(line.lstrip())
value = match.group(2)
new_lines.append(
" " * indent
+ "%s.modules[__name__].__setattr__(" % sys_name
+ "'%s', (%s))" % (name, value)
)
elif match_final:
name = match_final.group(1)
value = match_final.group(2)
new_lines.append(
"%s.modules[__name__].__setattr__(" % sys_name
+ "'%s', (%s), final=True)" % (name, value)
)
else:
new_lines.append(line)
return "\n".join(new_lines)
| import re
# For this example, we use simple regular expressions to identify
# lines of code that correspond to variable assignments. It is assumed
# that each assignment is done on a single line of code.
# This approach can change values within triple-quoted strings
# and does not capture all the possible cases for variable assignments.
# It is simply used as a quick demonstration.
# A basic assignement pattern we look for is something like
# python_identifier = whatever
# which can be an indented statement.
assignment_pattern = re.compile(r"^\s*([\w][\w\d]*)\s*=\s*(.+)")
# Note that the regex used for Python identifiers might not cover all
# possible valid identifiers with non-ascii characters.
# We also include something like
# python_identifier : Final = whatever
# but assume that it would not be indented.
final_declaration_pattern = re.compile(r"^([\w][\w\d]*)\s*:\s*Final\s*=\s*(.+)")
def transform_assignment(source):
"""Identifies simple assignments, including those with a Final type
hint, and replace them by a special function call.
So, something like
name = value
gets replaced by something like
sys.modules[__name__].__setattr__(name, value)
"""
# We are going to add an import to Python's sys module and want to make
# sure that it won't conflict with any variable in the source
if "sys" not in source:
sys_name = "sys"
else:
i = 0
while True:
sys_name = "sys" + str(i)
if sys_name not in source:
break
i += 1
lines = source.split("\n")
new_lines = ["import sys as %s" % sys_name]
for line in lines:
match = re.search(assignment_pattern, line)
match_final = re.search(final_declaration_pattern, line)
if match:
name = match.group(1)
indent = len(line) - len(line.lstrip())
value = match.group(2)
new_lines.append(
" " * indent
+ "%s.modules[__name__].__setattr__(" % sys_name
+ "'%s', (%s))" % (name, value)
)
elif match_final:
name = match_final.group(1)
value = match_final.group(2)
new_lines.append(
"%s.modules[__name__].__setattr__(" % sys_name
+ "'%s', (%s), final=True)" % (name, value)
)
else:
new_lines.append(line)
return "\n".join(new_lines) | en | 0.911405 | # For this example, we use simple regular expressions to identify # lines of code that correspond to variable assignments. It is assumed # that each assignment is done on a single line of code. # This approach can change values within triple-quoted strings # and does not capture all the possible cases for variable assignments. # It is simply used as a quick demonstration. # A basic assignement pattern we look for is something like # python_identifier = whatever # which can be an indented statement. # Note that the regex used for Python identifiers might not cover all # possible valid identifiers with non-ascii characters. # We also include something like # python_identifier : Final = whatever # but assume that it would not be indented. Identifies simple assignments, including those with a Final type hint, and replace them by a special function call. So, something like name = value gets replaced by something like sys.modules[__name__].__setattr__(name, value) # We are going to add an import to Python's sys module and want to make # sure that it won't conflict with any variable in the source | 3.811078 | 4 |
cosmos/galaxies/settings/__init__.py | MrFreemanser/Bot | 0 | 6631157 | from .administrator import AdministratorSettings
__all__ = [
AdministratorSettings,
]
def setup(bot):
bot.plugins.setup(__file__)
| from .administrator import AdministratorSettings
__all__ = [
AdministratorSettings,
]
def setup(bot):
bot.plugins.setup(__file__)
| none | 1 | 1.261777 | 1 |
|
hedgehog/bayes_net.py | dormanh/hedgehog | 0 | 6631158 | <reponame>dormanh/hedgehog
import collections
import functools
import itertools
import graphlib
import queue
import typing
import numpy as np
import pandas as pd
import vose
__all__ = ['BayesNet']
@pd.api.extensions.register_series_accessor('cdt')
class CDTAccessor:
"""
Adds utilities to a pandas.Series to help manipulate it as a conditional probability
table (CDT).
"""
def __init__(self, series: pd.Series):
self.series = series
self.sampler = None
def sample(self):
"""Sample a row at random.
The `sample` method of a Series is very slow. Additionally, it is not designed to be used
repetitively and requires O(n) steps every time it is called. Instead, we use a Cython
implemention of Vose's alias method that takes O(n) time to build and O(1) time to query.
"""
if self.sampler is None:
self.sampler = vose.Sampler(
weights=self.series.to_numpy(dtype=float),
seed=np.random.randint(2 ** 16)
)
idx = self.sampler.sample()
return self.series.index[idx]
@functools.lru_cache(maxsize=256)
def __getitem__(self, idx):
"""Cached row accessor.
Accessing a row of pandas.Series is very inefficient. This method caches the row accesses
and therefore circumvents the issue.
"""
return self.series[idx]
def sum_out(self, *variables):
"""Sums out a variable from a multi-indexed series.
Examples
--------
Example taken from figure 14.10 of Artificial Intelligence: A Modern Approach.
>>> a = pd.Series({
... ('T', 'T'): .3,
... ('T', 'F'): .7,
... ('F', 'T'): .9,
... ('F', 'F'): .1
... })
>>> a.index.names = ['A', 'B']
>>> b = pd.Series({
... ('T', 'T'): .2,
... ('T', 'F'): .8,
... ('F', 'T'): .6,
... ('F', 'F'): .4
... })
>>> b.index.names = ['B', 'C']
>>> ab = pointwise_mul_two(a, b)
>>> ab
B A C
F T T 0.42
F 0.28
F T 0.06
F 0.04
T T T 0.06
F 0.24
F T 0.18
F 0.72
dtype: float64
>>> ab.cdt.sum_out('B')
A C
F F 0.76
T 0.24
T F 0.52
T 0.48
dtype: float64
"""
nodes = list(self.series.index.names)
for var in variables:
nodes.remove(var)
return self.series.groupby(nodes).sum()
def pointwise_mul_two(left: pd.Series, right: pd.Series):
"""Pointwise multiplication of two series.
Examples
--------
Example taken from figure 14.10 of Artificial Intelligence: A Modern Approach.
>>> a = pd.Series({
... ('T', 'T'): .3,
... ('T', 'F'): .7,
... ('F', 'T'): .9,
... ('F', 'F'): .1
... })
>>> a.index.names = ['A', 'B']
>>> b = pd.Series({
... ('T', 'T'): .2,
... ('T', 'F'): .8,
... ('F', 'T'): .6,
... ('F', 'F'): .4
... })
>>> b.index.names = ['B', 'C']
>>> pointwise_mul_two(a, b)
B A C
F T T 0.42
F 0.28
F T 0.06
F 0.04
T T T 0.06
F 0.24
F T 0.18
F 0.72
dtype: float64
This method returns the Cartesion product in case two don't share any part of their index
in common.
>>> a = pd.Series({
... ('T', 'T'): .3,
... ('T', 'F'): .7,
... ('F', 'T'): .9,
... ('F', 'F'): .1
... })
>>> a.index.names = ['A', 'B']
>>> b = pd.Series({
... ('T', 'T'): .2,
... ('T', 'F'): .8,
... ('F', 'T'): .6,
... ('F', 'F'): .4
... })
>>> b.index.names = ['C', 'D']
>>> pointwise_mul_two(a, b)
A B C D
T T F F 0.12
T 0.18
T F 0.24
T 0.06
F F F 0.28
T 0.42
T F 0.56
T 0.14
F T F F 0.36
T 0.54
T F 0.72
T 0.18
F F F 0.04
T 0.06
T F 0.08
T 0.02
dtype: float64
Here is an example where both series have a one-dimensional index:
>>> a = pd.Series({
... 'T': .3,
... 'F': .7
... })
>>> a.index.names = ['A']
>>> b = pd.Series({
... 'T': .2,
... 'F': .8
... })
>>> b.index.names = ['B']
>>> pointwise_mul_two(a, b)
A B
T T 0.06
F 0.24
F T 0.14
F 0.56
dtype: float64
Finally, here is an example when only one of the series has a MultiIndex.
>>> a = pd.Series({
... 'T': .3,
... 'F': .7
... })
>>> a.index.names = ['A']
>>> b = pd.Series({
... ('T', 'T'): .2,
... ('T', 'F'): .8,
... ('F', 'T'): .6,
... ('F', 'F'): .4
... })
>>> b.index.names = ['B', 'C']
>>> pointwise_mul_two(a, b)
A B C
T F F 0.12
T 0.18
T F 0.24
T 0.06
F F F 0.28
T 0.42
T F 0.56
T 0.14
dtype: float64
"""
# Return the Cartesion product if the index names have nothing in common with each other
if not set(left.index.names) & set(right.index.names):
cart = pd.DataFrame(np.outer(left, right), index=left.index, columns=right.index)
return cart.stack(list(range(cart.columns.nlevels)))
index, l_idx, r_idx, = left.index.join(right.index, how='inner', return_indexers=True)
if l_idx is None:
l_idx = np.arange(len(left))
if r_idx is None:
r_idx = np.arange(len(right))
return pd.Series(left.iloc[l_idx].values * right.iloc[r_idx].values, index=index)
def pointwise_mul(cdts, keep_zeros=False):
if not keep_zeros:
cdts = (cdt[cdt > 0] for cdt in cdts)
return functools.reduce(pointwise_mul_two, cdts)
class BayesNet:
"""Bayesian network.
Parameters
----------
structure (list of tuples)
Each tuple denotes a (parent, child) connection. A CycleError is raised if the
structure is not acyclic.
prior_count (int)
If provided, artificial samples will be used to compute each conditional
probability distribution, in addition to provided samples. As a consequence, each
combination of parent(s)/child(ren) values will appear prior_count times. The
justification for doing so is related to Laplace's rule of succession and to Bayesian
statistics in general.
Attributes
----------
nodes (list)
The node names sorted in topological order. Iterating over this is equivalent to performing
a breadth-first search.
"""
def __init__(self, *structure, prior_count: int = None):
self.prior_count = prior_count
def coerce_list(obj):
if isinstance(obj, list):
return obj
return [obj]
# The structure is made up of nodes (scalars) and edges (tuples)
edges = (e for e in structure if isinstance(e, tuple))
nodes = set(e for e in structure if not isinstance(e, tuple))
# Convert edges into children and parent connections
self.parents = collections.defaultdict(set)
self.children = collections.defaultdict(set)
for parents, children in edges:
for parent, child in itertools.product(coerce_list(parents), coerce_list(children)):
self.parents[child].add(parent)
self.children[parent].add(child)
# collections.defaultdict(set) -> dict(list)
self.parents = {node: list(sorted(parents)) for node, parents in self.parents.items()}
self.children = {node: list(sorted(children)) for node, children in self.children.items()}
# The nodes are sorted in topological order. Nodes of the same level are sorted in
# lexicographic order.
ts = graphlib.TopologicalSorter()
for node in sorted({*self.parents.keys(), *self.children.keys(), *nodes}):
ts.add(node, *self.parents.get(node, []))
self.nodes = list(ts.static_order())
self.P = {}
self._P_sizes = {}
def prepare(self):
"""Perform house-keeping.
It is highly recommended to call this method whenever the structure and/or the parameters
of the Bayesian network are set manually.
"""
for node, P in self.P.items():
P.sort_index(inplace=True)
P.index.rename(
[*self.parents[node], node] if node in self.parents else node,
inplace=True
)
P.name = (
f'P({node} | {", ".join(map(str, self.parents[node]))})'
if node in self.parents else
f'P({node})'
)
def _forward_sample(self, init: dict = None):
"""Perform forward sampling.
This is also known as "ancestral sampling", as well as "prior sampling".
"""
init = init or {}
while True:
sample = {}
likelihood = 1.
for node in self.nodes:
# Access P(node | parents(node))
P = self.P[node]
if node in self.parents:
condition = tuple(sample[parent] for parent in self.parents[node])
P = P.cdt[condition]
if node in init:
node_value = init[node]
else:
node_value = P.cdt.sample()
sample[node] = node_value
likelihood *= P.get(node_value, 0)
yield sample, likelihood
def _flood_fill_sample(self, init: dict = None):
# We first define an order in which we'll loop over the nodes
init = init or {}
def walk(node, visited):
if node in visited:
return
yield node, visited
visited.add(node)
for parent in self.parents.get(node, []):
yield from walk(parent, visited)
for child in self.children.get(node, []):
yield from walk(child, visited)
# We start by building P(node | blanket ∩ walk) for each node. That is, the distribution of
# the node's values with respect to the intersection of the node's Markov blanket and the
# nodes that have been looped over.
P = {}
for node, visited in walk(node=self.roots[0], visited=set()):
p = self.P[node]
if node in init:
p = p[p.index.get_level_values(node) == init[node]]
if conditioning := list(visited.intersection(self.markov_boundary(node))):
p = pointwise_mul([p, pointwise_mul(self.P[c] for c in conditioning)])
p = p.groupby([*conditioning, node]).sum()
p = p.groupby(conditioning).apply(lambda g: g / g.sum())
P[node] = p
while True:
sample = init.copy()
for node, visited in walk(node=self.roots[0], visited=set()):
p = P[node]
if visited:
condition = tuple(sample[c] for c in p.index.names[:-1])
p = p.cdt[condition]
sample[node] = p.cdt.sample()
yield sample
def sample(self, n=1):
"""Generate a new sample at random by using forward sampling.
Although the idea is to implement forward sampling, the implementation
actually works backwards, starting from the leaf nodes. For every node, we recursively
check that values have been sampled for each parent node. Once a value has been chosen for
each parent, we can pick the according distribution and sample from it.
Parameters:
n: Number of samples to produce. A DataFrame is returned if `n > 1`. A dictionary is
returned if not.
"""
samples = (sample for sample, _ in self._forward_sample())
if n > 1:
return pd.DataFrame(next(samples) for _ in range(n)).sort_index(axis='columns')
return next(samples)
def partial_fit(self, X: pd.DataFrame):
"""Update the parameters of each conditional distribution."""
# Compute the conditional distribution for each node that has parents
for child, parents in self.parents.items():
# If a P already exists, then we update it incrementally...
if child in self.P:
old_counts = self.P[child] * self._P_sizes[child]
new_counts = X.groupby(parents + [child]).size()
counts = old_counts.add(new_counts, fill_value=0)
# ... else we compute it from scratch
else:
counts = X.groupby(parents + [child]).size()
if self.prior_count:
combos = itertools.product(*[X[var].unique() for var in parents + [child]])
prior = pd.Series(1, pd.MultiIndex.from_tuples(combos, names=parents + [child]))
counts = counts.add(prior, fill_value=0)
# Normalize
self._P_sizes[child] = counts.groupby(parents).sum()
self.P[child] = counts / self._P_sizes[child]
# Compute the distribution for each root
for root in self.roots:
# Incremental update
if root in self.P:
old_counts = self.P[root] * self._P_sizes[root]
new_counts = X[root].value_counts()
counts = old_counts.add(new_counts, fill_value=0)
self._P_sizes[root] += len(X)
self.P[root] = counts / self._P_sizes[root]
# From scratch
else:
self._P_sizes[root] = len(X)
self.P[root] = X[root].value_counts(normalize=True)
self.prepare()
return self
def fit(self, X: pd.DataFrame):
"""Find the values of each conditional distribution."""
self.P = {}
self._P_sizes = {}
return self.partial_fit(X)
def _rejection_sampling(self, *query, event, n_iterations):
"""Answer a query using rejection sampling.
This is probably the easiest approximate inference method to understand. The idea is simply
to produce a random sample and keep it if it satisfies the specified event. The sample is
rejected if any part of the event is not consistent with the sample. The downside of this
method is that it can potentially reject many samples, and therefore requires a large `n`
in order to produce reliable estimates.
Examples
--------
>>> import hedgehog as hh
>>> import numpy as np
>>> np.random.seed(42)
>>> bn = hh.examples.sprinkler()
>>> event = {'Sprinkler': True}
>>> bn.query('Rain', event=event, algorithm='rejection', n_iterations=100)
Rain
False 0.678571
True 0.321429
Name: P(Rain), dtype: float64
"""
# We don't know many samples we won't reject, therefore we cannot preallocate arrays
samples = {var: [] for var in query}
for _ in range(n_iterations):
sample = self.sample()
# Reject if the sample is not consistent with the specified events
if any(sample[var] != val for var, val in event.items()):
continue
for var in query:
samples[var].append(sample[var])
# Aggregate and normalize the obtained samples
samples = pd.DataFrame(samples)
return samples.groupby(list(query)).size() / len(samples)
def _llh_weighting(self, *query, event, n_iterations):
"""Likelihood weighting.
Likelihood weighting is a particular instance of importance sampling. The idea is to
produce random samples, and weight each sample according to its likelihood.
Examples
--------
>>> import hedgehog as hh
>>> import numpy as np
>>> np.random.seed(42)
>>> bn = hh.examples.sprinkler()
>>> event = {'Sprinkler': True}
>>> bn.query('Rain', event=event, algorithm='likelihood', n_iterations=500)
Rain
False 0.765995
True 0.234005
Name: P(Rain), dtype: float64
"""
samples = {var: [None] * n_iterations for var in query}
likelihoods = [None] * n_iterations
sampler = self._forward_sample(init=event)
for i in range(n_iterations):
# Sample by using the events as fixed values
sample, likelihood = next(sampler)
# Compute the likelihood of this sample
for var in query:
samples[var][i] = sample[var]
likelihoods[i] = likelihood
# Now we aggregate the resulting samples according to their associated likelihoods
results = pd.DataFrame({'likelihood': likelihoods, **samples})
results = results.groupby(list(query))['likelihood'].mean()
results /= results.sum()
return results
def _gibbs_sampling(self, *query, event, n_iterations):
"""Gibbs sampling.
The mathematical details of why this works are quite involved, but the idea is quite
simple. We start with a random sample where the event variables are specified. Every
iteration, we pick a random variable that is not part of the event variables, and sample it
randomly. The sampling is conditionned on the current state of the sample, which requires
computing the conditional distribution of each variable with respect to it's Markov
blanket. Every time a random value is sampled, we update the current state and record it.
Examples
--------
>>> import hedgehog as hh
>>> import numpy as np
>>> np.random.seed(42)
>>> bn = hh.examples.sprinkler()
>>> event = {'Sprinkler': True}
>>> bn.query('Rain', event=event, algorithm='gibbs', n_iterations=500)
Rain
False 0.726
True 0.274
Name: P(Rain), dtype: float64
"""
# We start by computing the conditional distributions for each node that is not part of
# the event. Each relevant node is therefore conditioned on its Markov boundary. Refer to
# equation 14.12 of Artificial Intelligence: A Modern Approach for more detail.
posteriors = {}
boundaries = {}
nonevents = sorted(set(self.nodes) - set(event))
for node in nonevents:
post = pointwise_mul(self.P[node] for node in [node, *self.children.get(node, [])])
if boundary := self.markov_boundary(node):
post = post.groupby(boundary).apply(lambda g: g / g.sum())
post = post.reorder_levels([*boundary, node])
post = post.sort_index()
posteriors[node] = post
boundaries[node] = boundary
# Start with a random sample
state = next(self._forward_sample(init=event))[0]
samples = {var: [None] * n_iterations for var in query}
cycle = itertools.cycle(nonevents) # arbitrary order, it doesn't matter
for i in range(n_iterations):
# Go to the next variable
var = next(cycle)
# Sample from P(var | boundary(var))
P = posteriors[var]
condition = tuple(state[node] for node in boundaries[var])
if condition:
P = P.cdt[condition]
state[var] = P.cdt.sample()
# Record the current state
for var in query:
samples[var][i] = state[var]
# Aggregate and normalize the obtained samples
samples = pd.DataFrame(samples)
return samples.groupby(list(query)).size() / len(samples)
def _variable_elimination(self, *query, event):
"""Variable elimination.
See figure 14.11 of Artificial Intelligence: A Modern Approach for more detail.
Examples
--------
>>> import hedgehog as hh
>>> bn = hh.examples.sprinkler()
>>> bn.query('Rain', event={'Sprinkler': True}, algorithm='exact')
Rain
False 0.7
True 0.3
Name: P(Rain), dtype: float64
"""
# We start by determining which nodes can be discarded. We can remove any leaf node that is
# part of query variable(s) or the event variable(s). After a leaf node has been removed,
# there might be some more leaf nodes to be remove, etc. Said otherwise, we can ignore each
# node that isn't an ancestor of the query variable(s) or the event variable(s).
relevant = {*query, *event}
for node in list(relevant):
relevant |= self.ancestors(node)
hidden = relevant - {*query, *event}
factors = []
for node in relevant:
factor = self.P[node].copy()
# Filter each factor according to the event
for var, val in event.items():
if var in factor.index.names:
factor = factor[factor.index.get_level_values(var) == val]
factors.append(factor)
# Sum-out the hidden variables from the factors in which they appear
for node in hidden:
prod = pointwise_mul(
factors.pop(i)
for i in reversed(range(len(factors)))
if node in factors[i].index.names
)
prod = prod.cdt.sum_out(node)
factors.append(prod)
# Pointwise multiply the rest of the factors and normalize the result
posterior = pointwise_mul(factors)
posterior = posterior / posterior.sum()
posterior.index = posterior.index.droplevel(list(set(posterior.index.names) - set(query)))
return posterior
def ancestors(self, node):
"""Return a node's ancestors."""
parents = self.parents.get(node, ())
if parents:
return set(parents) | set.union(*[self.ancestors(p) for p in parents])
return set()
@property
def roots(self):
"""Return the network's roots.
A root is a node that has no parent.
"""
return [node for node in self.nodes if node not in self.parents]
def query(self, *query: typing.Tuple[str], event: dict, algorithm='exact',
n_iterations=100) -> pd.Series:
"""Answer a probabilistic query.
Exact inference is performed by default. However, this might be too slow depending on the
graph structure. In that case, it is more suitable to use one of the approximate inference
methods. Provided `n` is "large enough", approximate inference methods are usually very
reliable.
Parameters
----------
query
The variables for which the posterior distribution is inferred.
event
The information on which to condition the answer. This can also called the "evidence".
algorithm
Inference method to use. Possible choices are: exact, gibbs, likelihood, rejection.
n_iterations
Number of iterations to perform when using an approximate inference method.
Examples
--------
>>> import hedgehog as hh
>>> bn = hh.examples.asia()
>>> event = {'Visit to Asia': True, 'Smoker': True}
>>> bn.query('Lung cancer', 'Tuberculosis', event=event)
Lung cancer Tuberculosis
False False 0.855
True 0.045
True False 0.095
True 0.005
Name: P(Lung cancer, Tuberculosis), dtype: float64
"""
if not query:
raise ValueError('At least one query variable has to be specified')
for q in query:
if q in event:
raise ValueError('A query variable cannot be part of the event')
if algorithm == 'exact':
answer = self._variable_elimination(*query, event=event)
elif algorithm == 'gibbs':
answer = self._gibbs_sampling(*query, event=event, n_iterations=n_iterations)
elif algorithm == 'likelihood':
answer = self._llh_weighting(*query, event=event, n_iterations=n_iterations)
elif algorithm == 'rejection':
answer = self._rejection_sampling(*query, event=event, n_iterations=n_iterations)
else:
raise ValueError('Unknown algorithm, must be one of: exact, gibbs, likelihood, ' +
'rejection')
answer = answer.rename(f'P({", ".join(query)})')
# We sort the index levels if there are multiple query variables
if isinstance(answer.index, pd.MultiIndex):
answer = answer.reorder_levels(sorted(answer.index.names))
return answer.sort_index()
def impute(self, sample: dict, **query_params) -> dict:
"""Replace missing values with the most probable possibility.
This method returns a fresh copy and does not modify the input.
Parameters
----------
sample
The sample for which the missing values need replacing. The missing values are expected
to be represented with `None`.
query_params
The rest of the keyword arguments for specifying what parameters to call the `query`
method with.
"""
# Determine which variables are missing and which ones are not
missing = []
event = sample.copy()
for k, v in sample.items():
if v is None:
missing.append(k)
del event[k]
# Compute the likelihood of each possibility
posterior = self.query(*missing, event=event, **query_params)
# Replace the missing values with the most likely values
for k, v in zip(posterior.index.names, posterior.idxmax()):
event[k] = v
return event
def graphviz(self):
"""Export to Graphviz.
The graphviz module is imported during this function call. Therefore it isn't a hard
requirement. Instead the user has to install it by herself.
"""
import graphviz
G = graphviz.Digraph()
for node in self.nodes:
G.node(str(node))
for node, children in self.children.items():
for child in children:
G.edge(str(node), str(child))
return G
def _repr_svg_(self):
return self.graphviz()._repr_svg_()
def full_joint_dist(self, *select, keep_zeros=False) -> pd.DataFrame:
"""Return the full joint distribution.
The full joint distribution is obtained by pointwise multiplying all the conditional
probability tables with each other and normalizing the result.
Parameters
----------
keep_zeros
Determines whether or not to include value combinations that don't occur together.
Examples
--------
>>> import hedgehog as hh
>>> bn = hh.examples.sprinkler()
>>> bn.full_joint_dist()
Cloudy Rain Sprinkler Wet grass
False False False False 0.2000
True False 0.0200
True 0.1800
True False False 0.0050
True 0.0450
True False 0.0005
True 0.0495
True False False False 0.0900
True False 0.0010
True 0.0090
True False False 0.0360
True 0.3240
True False 0.0004
True 0.0396
Name: P(Cloudy, Rain, Sprinkler, Wet grass), dtype: float64
The cases that don't occur are excluded by default. They can be included by setting
the `keep_zeros` parameter to `True`.
>>> bn.full_joint_dist(keep_zeros=True)
Cloudy Rain Sprinkler Wet grass
False False False False 0.2000
True 0.0000
True False 0.0200
True 0.1800
True False False 0.0050
True 0.0450
True False 0.0005
True 0.0495
True False False False 0.0900
True 0.0000
True False 0.0010
True 0.0090
True False False 0.0360
True 0.3240
True False 0.0004
True 0.0396
Name: P(Cloudy, Rain, Sprinkler, Wet grass), dtype: float64
"""
fjd = pointwise_mul(self.P.values(), keep_zeros=keep_zeros)
fjd = fjd.reorder_levels(sorted(fjd.index.names))
fjd = fjd.sort_index()
fjd.name = f'P({", ".join(fjd.index.names)})'
return fjd / fjd.sum()
def predict_proba(self, X: typing.Union[dict, pd.DataFrame]):
"""Return likelihood estimates.
The probabilities are obtained by first computing the full joint distribution. Then, the
likelihood of a sample is retrieved by accessing the relevant row in the full joint
distribution.
This method is a stepping stone for other functionalities, such as computing the
log-likelihood. The latter can in turn be used for structure learning.
Parameters
----------
X
One or more samples.
"""
if isinstance(X, dict):
return self.predict_proba(pd.DataFrame([X])).iloc[0]
fjd = self.full_joint_dist().reorder_levels(X.columns)
return fjd[pd.MultiIndex.from_frame(X)]
def predict_log_proba(self, X: typing.Union[dict, pd.DataFrame]):
"""Return log-likelihood estimates.
Parameters
----------
X
One or more samples.
"""
return np.log(self.predict_proba(X))
@property
def is_tree(self):
"""Indicate whether or not the network is a tree.
Each node in a tree has at most one parent. Therefore, the network is not a tree if any of
its nodes has two or more parents.
Examples
--------
>>> import hedgehog as hh
>>> hh.BayesNet(
... ('a', 'b'),
... ('a', 'c')
... ).is_tree
True
>>> hh.BayesNet(
... ('a', 'c'),
... ('b', 'c')
... ).is_tree
False
"""
return not any(len(parents) > 1 for parents in self.parents.values())
def markov_boundary(self, node):
"""Return the Markov boundary of a node.
In a Bayesian network, the Markov boundary is a minimal Markov blanket. The Markov boundary
of a node includes its parents, children and the other parents of all of its children.
Examples
--------
The following article is taken from the Markov blanket Wikipedia article.
>>> import hedgehog as hh
>>> bn = hh.BayesNet(
... (0, 3),
... (1, 4),
... (2, 5),
... (3, 6),
... (4, 6),
... (5, 8),
... (6, 8),
... (6, 9),
... (7, 9),
... (7, 10),
... (8, 11),
... (8, 12)
... )
>>> bn.markov_boundary(6) # corresponds to node A on Wikipedia
[3, 4, 5, 7, 8, 9]
"""
children = self.children.get(node, [])
return sorted(
set(self.parents.get(node, [])) |
set(children) |
set().union(*[self.parents[child] for child in children]) -
{node}
)
def iter_dfs(self):
"""Iterate over the nodes in depth-first search fashion.
Examples
--------
>>> import hedgehog as hh
>>> bn = hh.examples.asia()
>>> for node in bn.iter_dfs():
... print(node)
Smoker
Bronchitis
Dispnea
Lung cancer
TB or cancer
Positive X-ray
Visit to Asia
Tuberculosis
"""
def bfs(node, visited):
yield node
visited.add(node)
for child in self.children.get(node, []):
if child not in visited:
yield from bfs(child, visited)
visited = set()
for root in self.roots:
yield from bfs(root, visited)
| import collections
import functools
import itertools
import graphlib
import queue
import typing
import numpy as np
import pandas as pd
import vose
__all__ = ['BayesNet']
@pd.api.extensions.register_series_accessor('cdt')
class CDTAccessor:
"""
Adds utilities to a pandas.Series to help manipulate it as a conditional probability
table (CDT).
"""
def __init__(self, series: pd.Series):
self.series = series
self.sampler = None
def sample(self):
"""Sample a row at random.
The `sample` method of a Series is very slow. Additionally, it is not designed to be used
repetitively and requires O(n) steps every time it is called. Instead, we use a Cython
implemention of Vose's alias method that takes O(n) time to build and O(1) time to query.
"""
if self.sampler is None:
self.sampler = vose.Sampler(
weights=self.series.to_numpy(dtype=float),
seed=np.random.randint(2 ** 16)
)
idx = self.sampler.sample()
return self.series.index[idx]
@functools.lru_cache(maxsize=256)
def __getitem__(self, idx):
"""Cached row accessor.
Accessing a row of pandas.Series is very inefficient. This method caches the row accesses
and therefore circumvents the issue.
"""
return self.series[idx]
def sum_out(self, *variables):
"""Sums out a variable from a multi-indexed series.
Examples
--------
Example taken from figure 14.10 of Artificial Intelligence: A Modern Approach.
>>> a = pd.Series({
... ('T', 'T'): .3,
... ('T', 'F'): .7,
... ('F', 'T'): .9,
... ('F', 'F'): .1
... })
>>> a.index.names = ['A', 'B']
>>> b = pd.Series({
... ('T', 'T'): .2,
... ('T', 'F'): .8,
... ('F', 'T'): .6,
... ('F', 'F'): .4
... })
>>> b.index.names = ['B', 'C']
>>> ab = pointwise_mul_two(a, b)
>>> ab
B A C
F T T 0.42
F 0.28
F T 0.06
F 0.04
T T T 0.06
F 0.24
F T 0.18
F 0.72
dtype: float64
>>> ab.cdt.sum_out('B')
A C
F F 0.76
T 0.24
T F 0.52
T 0.48
dtype: float64
"""
nodes = list(self.series.index.names)
for var in variables:
nodes.remove(var)
return self.series.groupby(nodes).sum()
def pointwise_mul_two(left: pd.Series, right: pd.Series):
"""Pointwise multiplication of two series.
Examples
--------
Example taken from figure 14.10 of Artificial Intelligence: A Modern Approach.
>>> a = pd.Series({
... ('T', 'T'): .3,
... ('T', 'F'): .7,
... ('F', 'T'): .9,
... ('F', 'F'): .1
... })
>>> a.index.names = ['A', 'B']
>>> b = pd.Series({
... ('T', 'T'): .2,
... ('T', 'F'): .8,
... ('F', 'T'): .6,
... ('F', 'F'): .4
... })
>>> b.index.names = ['B', 'C']
>>> pointwise_mul_two(a, b)
B A C
F T T 0.42
F 0.28
F T 0.06
F 0.04
T T T 0.06
F 0.24
F T 0.18
F 0.72
dtype: float64
This method returns the Cartesion product in case two don't share any part of their index
in common.
>>> a = pd.Series({
... ('T', 'T'): .3,
... ('T', 'F'): .7,
... ('F', 'T'): .9,
... ('F', 'F'): .1
... })
>>> a.index.names = ['A', 'B']
>>> b = pd.Series({
... ('T', 'T'): .2,
... ('T', 'F'): .8,
... ('F', 'T'): .6,
... ('F', 'F'): .4
... })
>>> b.index.names = ['C', 'D']
>>> pointwise_mul_two(a, b)
A B C D
T T F F 0.12
T 0.18
T F 0.24
T 0.06
F F F 0.28
T 0.42
T F 0.56
T 0.14
F T F F 0.36
T 0.54
T F 0.72
T 0.18
F F F 0.04
T 0.06
T F 0.08
T 0.02
dtype: float64
Here is an example where both series have a one-dimensional index:
>>> a = pd.Series({
... 'T': .3,
... 'F': .7
... })
>>> a.index.names = ['A']
>>> b = pd.Series({
... 'T': .2,
... 'F': .8
... })
>>> b.index.names = ['B']
>>> pointwise_mul_two(a, b)
A B
T T 0.06
F 0.24
F T 0.14
F 0.56
dtype: float64
Finally, here is an example when only one of the series has a MultiIndex.
>>> a = pd.Series({
... 'T': .3,
... 'F': .7
... })
>>> a.index.names = ['A']
>>> b = pd.Series({
... ('T', 'T'): .2,
... ('T', 'F'): .8,
... ('F', 'T'): .6,
... ('F', 'F'): .4
... })
>>> b.index.names = ['B', 'C']
>>> pointwise_mul_two(a, b)
A B C
T F F 0.12
T 0.18
T F 0.24
T 0.06
F F F 0.28
T 0.42
T F 0.56
T 0.14
dtype: float64
"""
# Return the Cartesion product if the index names have nothing in common with each other
if not set(left.index.names) & set(right.index.names):
cart = pd.DataFrame(np.outer(left, right), index=left.index, columns=right.index)
return cart.stack(list(range(cart.columns.nlevels)))
index, l_idx, r_idx, = left.index.join(right.index, how='inner', return_indexers=True)
if l_idx is None:
l_idx = np.arange(len(left))
if r_idx is None:
r_idx = np.arange(len(right))
return pd.Series(left.iloc[l_idx].values * right.iloc[r_idx].values, index=index)
def pointwise_mul(cdts, keep_zeros=False):
if not keep_zeros:
cdts = (cdt[cdt > 0] for cdt in cdts)
return functools.reduce(pointwise_mul_two, cdts)
class BayesNet:
"""Bayesian network.
Parameters
----------
structure (list of tuples)
Each tuple denotes a (parent, child) connection. A CycleError is raised if the
structure is not acyclic.
prior_count (int)
If provided, artificial samples will be used to compute each conditional
probability distribution, in addition to provided samples. As a consequence, each
combination of parent(s)/child(ren) values will appear prior_count times. The
justification for doing so is related to Laplace's rule of succession and to Bayesian
statistics in general.
Attributes
----------
nodes (list)
The node names sorted in topological order. Iterating over this is equivalent to performing
a breadth-first search.
"""
def __init__(self, *structure, prior_count: int = None):
self.prior_count = prior_count
def coerce_list(obj):
if isinstance(obj, list):
return obj
return [obj]
# The structure is made up of nodes (scalars) and edges (tuples)
edges = (e for e in structure if isinstance(e, tuple))
nodes = set(e for e in structure if not isinstance(e, tuple))
# Convert edges into children and parent connections
self.parents = collections.defaultdict(set)
self.children = collections.defaultdict(set)
for parents, children in edges:
for parent, child in itertools.product(coerce_list(parents), coerce_list(children)):
self.parents[child].add(parent)
self.children[parent].add(child)
# collections.defaultdict(set) -> dict(list)
self.parents = {node: list(sorted(parents)) for node, parents in self.parents.items()}
self.children = {node: list(sorted(children)) for node, children in self.children.items()}
# The nodes are sorted in topological order. Nodes of the same level are sorted in
# lexicographic order.
ts = graphlib.TopologicalSorter()
for node in sorted({*self.parents.keys(), *self.children.keys(), *nodes}):
ts.add(node, *self.parents.get(node, []))
self.nodes = list(ts.static_order())
self.P = {}
self._P_sizes = {}
def prepare(self):
"""Perform house-keeping.
It is highly recommended to call this method whenever the structure and/or the parameters
of the Bayesian network are set manually.
"""
for node, P in self.P.items():
P.sort_index(inplace=True)
P.index.rename(
[*self.parents[node], node] if node in self.parents else node,
inplace=True
)
P.name = (
f'P({node} | {", ".join(map(str, self.parents[node]))})'
if node in self.parents else
f'P({node})'
)
def _forward_sample(self, init: dict = None):
"""Perform forward sampling.
This is also known as "ancestral sampling", as well as "prior sampling".
"""
init = init or {}
while True:
sample = {}
likelihood = 1.
for node in self.nodes:
# Access P(node | parents(node))
P = self.P[node]
if node in self.parents:
condition = tuple(sample[parent] for parent in self.parents[node])
P = P.cdt[condition]
if node in init:
node_value = init[node]
else:
node_value = P.cdt.sample()
sample[node] = node_value
likelihood *= P.get(node_value, 0)
yield sample, likelihood
def _flood_fill_sample(self, init: dict = None):
# We first define an order in which we'll loop over the nodes
init = init or {}
def walk(node, visited):
if node in visited:
return
yield node, visited
visited.add(node)
for parent in self.parents.get(node, []):
yield from walk(parent, visited)
for child in self.children.get(node, []):
yield from walk(child, visited)
# We start by building P(node | blanket ∩ walk) for each node. That is, the distribution of
# the node's values with respect to the intersection of the node's Markov blanket and the
# nodes that have been looped over.
P = {}
for node, visited in walk(node=self.roots[0], visited=set()):
p = self.P[node]
if node in init:
p = p[p.index.get_level_values(node) == init[node]]
if conditioning := list(visited.intersection(self.markov_boundary(node))):
p = pointwise_mul([p, pointwise_mul(self.P[c] for c in conditioning)])
p = p.groupby([*conditioning, node]).sum()
p = p.groupby(conditioning).apply(lambda g: g / g.sum())
P[node] = p
while True:
sample = init.copy()
for node, visited in walk(node=self.roots[0], visited=set()):
p = P[node]
if visited:
condition = tuple(sample[c] for c in p.index.names[:-1])
p = p.cdt[condition]
sample[node] = p.cdt.sample()
yield sample
def sample(self, n=1):
"""Generate a new sample at random by using forward sampling.
Although the idea is to implement forward sampling, the implementation
actually works backwards, starting from the leaf nodes. For every node, we recursively
check that values have been sampled for each parent node. Once a value has been chosen for
each parent, we can pick the according distribution and sample from it.
Parameters:
n: Number of samples to produce. A DataFrame is returned if `n > 1`. A dictionary is
returned if not.
"""
samples = (sample for sample, _ in self._forward_sample())
if n > 1:
return pd.DataFrame(next(samples) for _ in range(n)).sort_index(axis='columns')
return next(samples)
def partial_fit(self, X: pd.DataFrame):
"""Update the parameters of each conditional distribution."""
# Compute the conditional distribution for each node that has parents
for child, parents in self.parents.items():
# If a P already exists, then we update it incrementally...
if child in self.P:
old_counts = self.P[child] * self._P_sizes[child]
new_counts = X.groupby(parents + [child]).size()
counts = old_counts.add(new_counts, fill_value=0)
# ... else we compute it from scratch
else:
counts = X.groupby(parents + [child]).size()
if self.prior_count:
combos = itertools.product(*[X[var].unique() for var in parents + [child]])
prior = pd.Series(1, pd.MultiIndex.from_tuples(combos, names=parents + [child]))
counts = counts.add(prior, fill_value=0)
# Normalize
self._P_sizes[child] = counts.groupby(parents).sum()
self.P[child] = counts / self._P_sizes[child]
# Compute the distribution for each root
for root in self.roots:
# Incremental update
if root in self.P:
old_counts = self.P[root] * self._P_sizes[root]
new_counts = X[root].value_counts()
counts = old_counts.add(new_counts, fill_value=0)
self._P_sizes[root] += len(X)
self.P[root] = counts / self._P_sizes[root]
# From scratch
else:
self._P_sizes[root] = len(X)
self.P[root] = X[root].value_counts(normalize=True)
self.prepare()
return self
def fit(self, X: pd.DataFrame):
"""Find the values of each conditional distribution."""
self.P = {}
self._P_sizes = {}
return self.partial_fit(X)
def _rejection_sampling(self, *query, event, n_iterations):
"""Answer a query using rejection sampling.
This is probably the easiest approximate inference method to understand. The idea is simply
to produce a random sample and keep it if it satisfies the specified event. The sample is
rejected if any part of the event is not consistent with the sample. The downside of this
method is that it can potentially reject many samples, and therefore requires a large `n`
in order to produce reliable estimates.
Examples
--------
>>> import hedgehog as hh
>>> import numpy as np
>>> np.random.seed(42)
>>> bn = hh.examples.sprinkler()
>>> event = {'Sprinkler': True}
>>> bn.query('Rain', event=event, algorithm='rejection', n_iterations=100)
Rain
False 0.678571
True 0.321429
Name: P(Rain), dtype: float64
"""
# We don't know many samples we won't reject, therefore we cannot preallocate arrays
samples = {var: [] for var in query}
for _ in range(n_iterations):
sample = self.sample()
# Reject if the sample is not consistent with the specified events
if any(sample[var] != val for var, val in event.items()):
continue
for var in query:
samples[var].append(sample[var])
# Aggregate and normalize the obtained samples
samples = pd.DataFrame(samples)
return samples.groupby(list(query)).size() / len(samples)
def _llh_weighting(self, *query, event, n_iterations):
"""Likelihood weighting.
Likelihood weighting is a particular instance of importance sampling. The idea is to
produce random samples, and weight each sample according to its likelihood.
Examples
--------
>>> import hedgehog as hh
>>> import numpy as np
>>> np.random.seed(42)
>>> bn = hh.examples.sprinkler()
>>> event = {'Sprinkler': True}
>>> bn.query('Rain', event=event, algorithm='likelihood', n_iterations=500)
Rain
False 0.765995
True 0.234005
Name: P(Rain), dtype: float64
"""
samples = {var: [None] * n_iterations for var in query}
likelihoods = [None] * n_iterations
sampler = self._forward_sample(init=event)
for i in range(n_iterations):
# Sample by using the events as fixed values
sample, likelihood = next(sampler)
# Compute the likelihood of this sample
for var in query:
samples[var][i] = sample[var]
likelihoods[i] = likelihood
# Now we aggregate the resulting samples according to their associated likelihoods
results = pd.DataFrame({'likelihood': likelihoods, **samples})
results = results.groupby(list(query))['likelihood'].mean()
results /= results.sum()
return results
def _gibbs_sampling(self, *query, event, n_iterations):
"""Gibbs sampling.
The mathematical details of why this works are quite involved, but the idea is quite
simple. We start with a random sample where the event variables are specified. Every
iteration, we pick a random variable that is not part of the event variables, and sample it
randomly. The sampling is conditionned on the current state of the sample, which requires
computing the conditional distribution of each variable with respect to it's Markov
blanket. Every time a random value is sampled, we update the current state and record it.
Examples
--------
>>> import hedgehog as hh
>>> import numpy as np
>>> np.random.seed(42)
>>> bn = hh.examples.sprinkler()
>>> event = {'Sprinkler': True}
>>> bn.query('Rain', event=event, algorithm='gibbs', n_iterations=500)
Rain
False 0.726
True 0.274
Name: P(Rain), dtype: float64
"""
# We start by computing the conditional distributions for each node that is not part of
# the event. Each relevant node is therefore conditioned on its Markov boundary. Refer to
# equation 14.12 of Artificial Intelligence: A Modern Approach for more detail.
posteriors = {}
boundaries = {}
nonevents = sorted(set(self.nodes) - set(event))
for node in nonevents:
post = pointwise_mul(self.P[node] for node in [node, *self.children.get(node, [])])
if boundary := self.markov_boundary(node):
post = post.groupby(boundary).apply(lambda g: g / g.sum())
post = post.reorder_levels([*boundary, node])
post = post.sort_index()
posteriors[node] = post
boundaries[node] = boundary
# Start with a random sample
state = next(self._forward_sample(init=event))[0]
samples = {var: [None] * n_iterations for var in query}
cycle = itertools.cycle(nonevents) # arbitrary order, it doesn't matter
for i in range(n_iterations):
# Go to the next variable
var = next(cycle)
# Sample from P(var | boundary(var))
P = posteriors[var]
condition = tuple(state[node] for node in boundaries[var])
if condition:
P = P.cdt[condition]
state[var] = P.cdt.sample()
# Record the current state
for var in query:
samples[var][i] = state[var]
# Aggregate and normalize the obtained samples
samples = pd.DataFrame(samples)
return samples.groupby(list(query)).size() / len(samples)
def _variable_elimination(self, *query, event):
"""Variable elimination.
See figure 14.11 of Artificial Intelligence: A Modern Approach for more detail.
Examples
--------
>>> import hedgehog as hh
>>> bn = hh.examples.sprinkler()
>>> bn.query('Rain', event={'Sprinkler': True}, algorithm='exact')
Rain
False 0.7
True 0.3
Name: P(Rain), dtype: float64
"""
# We start by determining which nodes can be discarded. We can remove any leaf node that is
# part of query variable(s) or the event variable(s). After a leaf node has been removed,
# there might be some more leaf nodes to be remove, etc. Said otherwise, we can ignore each
# node that isn't an ancestor of the query variable(s) or the event variable(s).
relevant = {*query, *event}
for node in list(relevant):
relevant |= self.ancestors(node)
hidden = relevant - {*query, *event}
factors = []
for node in relevant:
factor = self.P[node].copy()
# Filter each factor according to the event
for var, val in event.items():
if var in factor.index.names:
factor = factor[factor.index.get_level_values(var) == val]
factors.append(factor)
# Sum-out the hidden variables from the factors in which they appear
for node in hidden:
prod = pointwise_mul(
factors.pop(i)
for i in reversed(range(len(factors)))
if node in factors[i].index.names
)
prod = prod.cdt.sum_out(node)
factors.append(prod)
# Pointwise multiply the rest of the factors and normalize the result
posterior = pointwise_mul(factors)
posterior = posterior / posterior.sum()
posterior.index = posterior.index.droplevel(list(set(posterior.index.names) - set(query)))
return posterior
def ancestors(self, node):
"""Return a node's ancestors."""
parents = self.parents.get(node, ())
if parents:
return set(parents) | set.union(*[self.ancestors(p) for p in parents])
return set()
@property
def roots(self):
"""Return the network's roots.
A root is a node that has no parent.
"""
return [node for node in self.nodes if node not in self.parents]
def query(self, *query: typing.Tuple[str], event: dict, algorithm='exact',
n_iterations=100) -> pd.Series:
"""Answer a probabilistic query.
Exact inference is performed by default. However, this might be too slow depending on the
graph structure. In that case, it is more suitable to use one of the approximate inference
methods. Provided `n` is "large enough", approximate inference methods are usually very
reliable.
Parameters
----------
query
The variables for which the posterior distribution is inferred.
event
The information on which to condition the answer. This can also called the "evidence".
algorithm
Inference method to use. Possible choices are: exact, gibbs, likelihood, rejection.
n_iterations
Number of iterations to perform when using an approximate inference method.
Examples
--------
>>> import hedgehog as hh
>>> bn = hh.examples.asia()
>>> event = {'Visit to Asia': True, 'Smoker': True}
>>> bn.query('Lung cancer', 'Tuberculosis', event=event)
Lung cancer Tuberculosis
False False 0.855
True 0.045
True False 0.095
True 0.005
Name: P(Lung cancer, Tuberculosis), dtype: float64
"""
if not query:
raise ValueError('At least one query variable has to be specified')
for q in query:
if q in event:
raise ValueError('A query variable cannot be part of the event')
if algorithm == 'exact':
answer = self._variable_elimination(*query, event=event)
elif algorithm == 'gibbs':
answer = self._gibbs_sampling(*query, event=event, n_iterations=n_iterations)
elif algorithm == 'likelihood':
answer = self._llh_weighting(*query, event=event, n_iterations=n_iterations)
elif algorithm == 'rejection':
answer = self._rejection_sampling(*query, event=event, n_iterations=n_iterations)
else:
raise ValueError('Unknown algorithm, must be one of: exact, gibbs, likelihood, ' +
'rejection')
answer = answer.rename(f'P({", ".join(query)})')
# We sort the index levels if there are multiple query variables
if isinstance(answer.index, pd.MultiIndex):
answer = answer.reorder_levels(sorted(answer.index.names))
return answer.sort_index()
def impute(self, sample: dict, **query_params) -> dict:
"""Replace missing values with the most probable possibility.
This method returns a fresh copy and does not modify the input.
Parameters
----------
sample
The sample for which the missing values need replacing. The missing values are expected
to be represented with `None`.
query_params
The rest of the keyword arguments for specifying what parameters to call the `query`
method with.
"""
# Determine which variables are missing and which ones are not
missing = []
event = sample.copy()
for k, v in sample.items():
if v is None:
missing.append(k)
del event[k]
# Compute the likelihood of each possibility
posterior = self.query(*missing, event=event, **query_params)
# Replace the missing values with the most likely values
for k, v in zip(posterior.index.names, posterior.idxmax()):
event[k] = v
return event
def graphviz(self):
"""Export to Graphviz.
The graphviz module is imported during this function call. Therefore it isn't a hard
requirement. Instead the user has to install it by herself.
"""
import graphviz
G = graphviz.Digraph()
for node in self.nodes:
G.node(str(node))
for node, children in self.children.items():
for child in children:
G.edge(str(node), str(child))
return G
def _repr_svg_(self):
return self.graphviz()._repr_svg_()
def full_joint_dist(self, *select, keep_zeros=False) -> pd.DataFrame:
"""Return the full joint distribution.
The full joint distribution is obtained by pointwise multiplying all the conditional
probability tables with each other and normalizing the result.
Parameters
----------
keep_zeros
Determines whether or not to include value combinations that don't occur together.
Examples
--------
>>> import hedgehog as hh
>>> bn = hh.examples.sprinkler()
>>> bn.full_joint_dist()
Cloudy Rain Sprinkler Wet grass
False False False False 0.2000
True False 0.0200
True 0.1800
True False False 0.0050
True 0.0450
True False 0.0005
True 0.0495
True False False False 0.0900
True False 0.0010
True 0.0090
True False False 0.0360
True 0.3240
True False 0.0004
True 0.0396
Name: P(Cloudy, Rain, Sprinkler, Wet grass), dtype: float64
The cases that don't occur are excluded by default. They can be included by setting
the `keep_zeros` parameter to `True`.
>>> bn.full_joint_dist(keep_zeros=True)
Cloudy Rain Sprinkler Wet grass
False False False False 0.2000
True 0.0000
True False 0.0200
True 0.1800
True False False 0.0050
True 0.0450
True False 0.0005
True 0.0495
True False False False 0.0900
True 0.0000
True False 0.0010
True 0.0090
True False False 0.0360
True 0.3240
True False 0.0004
True 0.0396
Name: P(Cloudy, Rain, Sprinkler, Wet grass), dtype: float64
"""
fjd = pointwise_mul(self.P.values(), keep_zeros=keep_zeros)
fjd = fjd.reorder_levels(sorted(fjd.index.names))
fjd = fjd.sort_index()
fjd.name = f'P({", ".join(fjd.index.names)})'
return fjd / fjd.sum()
def predict_proba(self, X: typing.Union[dict, pd.DataFrame]):
"""Return likelihood estimates.
The probabilities are obtained by first computing the full joint distribution. Then, the
likelihood of a sample is retrieved by accessing the relevant row in the full joint
distribution.
This method is a stepping stone for other functionalities, such as computing the
log-likelihood. The latter can in turn be used for structure learning.
Parameters
----------
X
One or more samples.
"""
if isinstance(X, dict):
return self.predict_proba(pd.DataFrame([X])).iloc[0]
fjd = self.full_joint_dist().reorder_levels(X.columns)
return fjd[pd.MultiIndex.from_frame(X)]
def predict_log_proba(self, X: typing.Union[dict, pd.DataFrame]):
"""Return log-likelihood estimates.
Parameters
----------
X
One or more samples.
"""
return np.log(self.predict_proba(X))
@property
def is_tree(self):
"""Indicate whether or not the network is a tree.
Each node in a tree has at most one parent. Therefore, the network is not a tree if any of
its nodes has two or more parents.
Examples
--------
>>> import hedgehog as hh
>>> hh.BayesNet(
... ('a', 'b'),
... ('a', 'c')
... ).is_tree
True
>>> hh.BayesNet(
... ('a', 'c'),
... ('b', 'c')
... ).is_tree
False
"""
return not any(len(parents) > 1 for parents in self.parents.values())
def markov_boundary(self, node):
"""Return the Markov boundary of a node.
In a Bayesian network, the Markov boundary is a minimal Markov blanket. The Markov boundary
of a node includes its parents, children and the other parents of all of its children.
Examples
--------
The following article is taken from the Markov blanket Wikipedia article.
>>> import hedgehog as hh
>>> bn = hh.BayesNet(
... (0, 3),
... (1, 4),
... (2, 5),
... (3, 6),
... (4, 6),
... (5, 8),
... (6, 8),
... (6, 9),
... (7, 9),
... (7, 10),
... (8, 11),
... (8, 12)
... )
>>> bn.markov_boundary(6) # corresponds to node A on Wikipedia
[3, 4, 5, 7, 8, 9]
"""
children = self.children.get(node, [])
return sorted(
set(self.parents.get(node, [])) |
set(children) |
set().union(*[self.parents[child] for child in children]) -
{node}
)
def iter_dfs(self):
"""Iterate over the nodes in depth-first search fashion.
Examples
--------
>>> import hedgehog as hh
>>> bn = hh.examples.asia()
>>> for node in bn.iter_dfs():
... print(node)
Smoker
Bronchitis
Dispnea
Lung cancer
TB or cancer
Positive X-ray
Visit to Asia
Tuberculosis
"""
def bfs(node, visited):
yield node
visited.add(node)
for child in self.children.get(node, []):
if child not in visited:
yield from bfs(child, visited)
visited = set()
for root in self.roots:
yield from bfs(root, visited) | en | 0.822094 | Adds utilities to a pandas.Series to help manipulate it as a conditional probability table (CDT). Sample a row at random. The `sample` method of a Series is very slow. Additionally, it is not designed to be used repetitively and requires O(n) steps every time it is called. Instead, we use a Cython implemention of Vose's alias method that takes O(n) time to build and O(1) time to query. Cached row accessor. Accessing a row of pandas.Series is very inefficient. This method caches the row accesses and therefore circumvents the issue. Sums out a variable from a multi-indexed series. Examples -------- Example taken from figure 14.10 of Artificial Intelligence: A Modern Approach. >>> a = pd.Series({ ... ('T', 'T'): .3, ... ('T', 'F'): .7, ... ('F', 'T'): .9, ... ('F', 'F'): .1 ... }) >>> a.index.names = ['A', 'B'] >>> b = pd.Series({ ... ('T', 'T'): .2, ... ('T', 'F'): .8, ... ('F', 'T'): .6, ... ('F', 'F'): .4 ... }) >>> b.index.names = ['B', 'C'] >>> ab = pointwise_mul_two(a, b) >>> ab B A C F T T 0.42 F 0.28 F T 0.06 F 0.04 T T T 0.06 F 0.24 F T 0.18 F 0.72 dtype: float64 >>> ab.cdt.sum_out('B') A C F F 0.76 T 0.24 T F 0.52 T 0.48 dtype: float64 Pointwise multiplication of two series. Examples -------- Example taken from figure 14.10 of Artificial Intelligence: A Modern Approach. >>> a = pd.Series({ ... ('T', 'T'): .3, ... ('T', 'F'): .7, ... ('F', 'T'): .9, ... ('F', 'F'): .1 ... }) >>> a.index.names = ['A', 'B'] >>> b = pd.Series({ ... ('T', 'T'): .2, ... ('T', 'F'): .8, ... ('F', 'T'): .6, ... ('F', 'F'): .4 ... }) >>> b.index.names = ['B', 'C'] >>> pointwise_mul_two(a, b) B A C F T T 0.42 F 0.28 F T 0.06 F 0.04 T T T 0.06 F 0.24 F T 0.18 F 0.72 dtype: float64 This method returns the Cartesion product in case two don't share any part of their index in common. >>> a = pd.Series({ ... ('T', 'T'): .3, ... ('T', 'F'): .7, ... ('F', 'T'): .9, ... ('F', 'F'): .1 ... }) >>> a.index.names = ['A', 'B'] >>> b = pd.Series({ ... ('T', 'T'): .2, ... ('T', 'F'): .8, ... ('F', 'T'): .6, ... ('F', 'F'): .4 ... }) >>> b.index.names = ['C', 'D'] >>> pointwise_mul_two(a, b) A B C D T T F F 0.12 T 0.18 T F 0.24 T 0.06 F F F 0.28 T 0.42 T F 0.56 T 0.14 F T F F 0.36 T 0.54 T F 0.72 T 0.18 F F F 0.04 T 0.06 T F 0.08 T 0.02 dtype: float64 Here is an example where both series have a one-dimensional index: >>> a = pd.Series({ ... 'T': .3, ... 'F': .7 ... }) >>> a.index.names = ['A'] >>> b = pd.Series({ ... 'T': .2, ... 'F': .8 ... }) >>> b.index.names = ['B'] >>> pointwise_mul_two(a, b) A B T T 0.06 F 0.24 F T 0.14 F 0.56 dtype: float64 Finally, here is an example when only one of the series has a MultiIndex. >>> a = pd.Series({ ... 'T': .3, ... 'F': .7 ... }) >>> a.index.names = ['A'] >>> b = pd.Series({ ... ('T', 'T'): .2, ... ('T', 'F'): .8, ... ('F', 'T'): .6, ... ('F', 'F'): .4 ... }) >>> b.index.names = ['B', 'C'] >>> pointwise_mul_two(a, b) A B C T F F 0.12 T 0.18 T F 0.24 T 0.06 F F F 0.28 T 0.42 T F 0.56 T 0.14 dtype: float64 # Return the Cartesion product if the index names have nothing in common with each other Bayesian network. Parameters ---------- structure (list of tuples) Each tuple denotes a (parent, child) connection. A CycleError is raised if the structure is not acyclic. prior_count (int) If provided, artificial samples will be used to compute each conditional probability distribution, in addition to provided samples. As a consequence, each combination of parent(s)/child(ren) values will appear prior_count times. The justification for doing so is related to Laplace's rule of succession and to Bayesian statistics in general. Attributes ---------- nodes (list) The node names sorted in topological order. Iterating over this is equivalent to performing a breadth-first search. # The structure is made up of nodes (scalars) and edges (tuples) # Convert edges into children and parent connections # collections.defaultdict(set) -> dict(list) # The nodes are sorted in topological order. Nodes of the same level are sorted in # lexicographic order. Perform house-keeping. It is highly recommended to call this method whenever the structure and/or the parameters of the Bayesian network are set manually. Perform forward sampling. This is also known as "ancestral sampling", as well as "prior sampling". # Access P(node | parents(node)) # We first define an order in which we'll loop over the nodes # We start by building P(node | blanket ∩ walk) for each node. That is, the distribution of # the node's values with respect to the intersection of the node's Markov blanket and the # nodes that have been looped over. Generate a new sample at random by using forward sampling. Although the idea is to implement forward sampling, the implementation actually works backwards, starting from the leaf nodes. For every node, we recursively check that values have been sampled for each parent node. Once a value has been chosen for each parent, we can pick the according distribution and sample from it. Parameters: n: Number of samples to produce. A DataFrame is returned if `n > 1`. A dictionary is returned if not. Update the parameters of each conditional distribution. # Compute the conditional distribution for each node that has parents # If a P already exists, then we update it incrementally... # ... else we compute it from scratch # Normalize # Compute the distribution for each root # Incremental update # From scratch Find the values of each conditional distribution. Answer a query using rejection sampling. This is probably the easiest approximate inference method to understand. The idea is simply to produce a random sample and keep it if it satisfies the specified event. The sample is rejected if any part of the event is not consistent with the sample. The downside of this method is that it can potentially reject many samples, and therefore requires a large `n` in order to produce reliable estimates. Examples -------- >>> import hedgehog as hh >>> import numpy as np >>> np.random.seed(42) >>> bn = hh.examples.sprinkler() >>> event = {'Sprinkler': True} >>> bn.query('Rain', event=event, algorithm='rejection', n_iterations=100) Rain False 0.678571 True 0.321429 Name: P(Rain), dtype: float64 # We don't know many samples we won't reject, therefore we cannot preallocate arrays # Reject if the sample is not consistent with the specified events # Aggregate and normalize the obtained samples Likelihood weighting. Likelihood weighting is a particular instance of importance sampling. The idea is to produce random samples, and weight each sample according to its likelihood. Examples -------- >>> import hedgehog as hh >>> import numpy as np >>> np.random.seed(42) >>> bn = hh.examples.sprinkler() >>> event = {'Sprinkler': True} >>> bn.query('Rain', event=event, algorithm='likelihood', n_iterations=500) Rain False 0.765995 True 0.234005 Name: P(Rain), dtype: float64 # Sample by using the events as fixed values # Compute the likelihood of this sample # Now we aggregate the resulting samples according to their associated likelihoods Gibbs sampling. The mathematical details of why this works are quite involved, but the idea is quite simple. We start with a random sample where the event variables are specified. Every iteration, we pick a random variable that is not part of the event variables, and sample it randomly. The sampling is conditionned on the current state of the sample, which requires computing the conditional distribution of each variable with respect to it's Markov blanket. Every time a random value is sampled, we update the current state and record it. Examples -------- >>> import hedgehog as hh >>> import numpy as np >>> np.random.seed(42) >>> bn = hh.examples.sprinkler() >>> event = {'Sprinkler': True} >>> bn.query('Rain', event=event, algorithm='gibbs', n_iterations=500) Rain False 0.726 True 0.274 Name: P(Rain), dtype: float64 # We start by computing the conditional distributions for each node that is not part of # the event. Each relevant node is therefore conditioned on its Markov boundary. Refer to # equation 14.12 of Artificial Intelligence: A Modern Approach for more detail. # Start with a random sample # arbitrary order, it doesn't matter # Go to the next variable # Sample from P(var | boundary(var)) # Record the current state # Aggregate and normalize the obtained samples Variable elimination. See figure 14.11 of Artificial Intelligence: A Modern Approach for more detail. Examples -------- >>> import hedgehog as hh >>> bn = hh.examples.sprinkler() >>> bn.query('Rain', event={'Sprinkler': True}, algorithm='exact') Rain False 0.7 True 0.3 Name: P(Rain), dtype: float64 # We start by determining which nodes can be discarded. We can remove any leaf node that is # part of query variable(s) or the event variable(s). After a leaf node has been removed, # there might be some more leaf nodes to be remove, etc. Said otherwise, we can ignore each # node that isn't an ancestor of the query variable(s) or the event variable(s). # Filter each factor according to the event # Sum-out the hidden variables from the factors in which they appear # Pointwise multiply the rest of the factors and normalize the result Return a node's ancestors. Return the network's roots. A root is a node that has no parent. Answer a probabilistic query. Exact inference is performed by default. However, this might be too slow depending on the graph structure. In that case, it is more suitable to use one of the approximate inference methods. Provided `n` is "large enough", approximate inference methods are usually very reliable. Parameters ---------- query The variables for which the posterior distribution is inferred. event The information on which to condition the answer. This can also called the "evidence". algorithm Inference method to use. Possible choices are: exact, gibbs, likelihood, rejection. n_iterations Number of iterations to perform when using an approximate inference method. Examples -------- >>> import hedgehog as hh >>> bn = hh.examples.asia() >>> event = {'Visit to Asia': True, 'Smoker': True} >>> bn.query('Lung cancer', 'Tuberculosis', event=event) Lung cancer Tuberculosis False False 0.855 True 0.045 True False 0.095 True 0.005 Name: P(Lung cancer, Tuberculosis), dtype: float64 # We sort the index levels if there are multiple query variables Replace missing values with the most probable possibility. This method returns a fresh copy and does not modify the input. Parameters ---------- sample The sample for which the missing values need replacing. The missing values are expected to be represented with `None`. query_params The rest of the keyword arguments for specifying what parameters to call the `query` method with. # Determine which variables are missing and which ones are not # Compute the likelihood of each possibility # Replace the missing values with the most likely values Export to Graphviz. The graphviz module is imported during this function call. Therefore it isn't a hard requirement. Instead the user has to install it by herself. Return the full joint distribution. The full joint distribution is obtained by pointwise multiplying all the conditional probability tables with each other and normalizing the result. Parameters ---------- keep_zeros Determines whether or not to include value combinations that don't occur together. Examples -------- >>> import hedgehog as hh >>> bn = hh.examples.sprinkler() >>> bn.full_joint_dist() Cloudy Rain Sprinkler Wet grass False False False False 0.2000 True False 0.0200 True 0.1800 True False False 0.0050 True 0.0450 True False 0.0005 True 0.0495 True False False False 0.0900 True False 0.0010 True 0.0090 True False False 0.0360 True 0.3240 True False 0.0004 True 0.0396 Name: P(Cloudy, Rain, Sprinkler, Wet grass), dtype: float64 The cases that don't occur are excluded by default. They can be included by setting the `keep_zeros` parameter to `True`. >>> bn.full_joint_dist(keep_zeros=True) Cloudy Rain Sprinkler Wet grass False False False False 0.2000 True 0.0000 True False 0.0200 True 0.1800 True False False 0.0050 True 0.0450 True False 0.0005 True 0.0495 True False False False 0.0900 True 0.0000 True False 0.0010 True 0.0090 True False False 0.0360 True 0.3240 True False 0.0004 True 0.0396 Name: P(Cloudy, Rain, Sprinkler, Wet grass), dtype: float64 Return likelihood estimates. The probabilities are obtained by first computing the full joint distribution. Then, the likelihood of a sample is retrieved by accessing the relevant row in the full joint distribution. This method is a stepping stone for other functionalities, such as computing the log-likelihood. The latter can in turn be used for structure learning. Parameters ---------- X One or more samples. Return log-likelihood estimates. Parameters ---------- X One or more samples. Indicate whether or not the network is a tree. Each node in a tree has at most one parent. Therefore, the network is not a tree if any of its nodes has two or more parents. Examples -------- >>> import hedgehog as hh >>> hh.BayesNet( ... ('a', 'b'), ... ('a', 'c') ... ).is_tree True >>> hh.BayesNet( ... ('a', 'c'), ... ('b', 'c') ... ).is_tree False Return the Markov boundary of a node. In a Bayesian network, the Markov boundary is a minimal Markov blanket. The Markov boundary of a node includes its parents, children and the other parents of all of its children. Examples -------- The following article is taken from the Markov blanket Wikipedia article. >>> import hedgehog as hh >>> bn = hh.BayesNet( ... (0, 3), ... (1, 4), ... (2, 5), ... (3, 6), ... (4, 6), ... (5, 8), ... (6, 8), ... (6, 9), ... (7, 9), ... (7, 10), ... (8, 11), ... (8, 12) ... ) >>> bn.markov_boundary(6) # corresponds to node A on Wikipedia [3, 4, 5, 7, 8, 9] Iterate over the nodes in depth-first search fashion. Examples -------- >>> import hedgehog as hh >>> bn = hh.examples.asia() >>> for node in bn.iter_dfs(): ... print(node) Smoker Bronchitis Dispnea Lung cancer TB or cancer Positive X-ray Visit to Asia Tuberculosis | 2.590957 | 3 |
Object Oriented Programming/EmployeeClass.py | Williano/Solved-Practice-Questions | 0 | 6631159 | # Module: EmployeeClass.py
# Description: This module creates an Employee Class with data attributes
# and methods acting on the data.
# Programmer: <NAME>.
# Date: 01.03.17
class Employee:
"""Creating the Employee class with data attributes and methods.
"""
# Defining the __init__ method initializes the attributes.
def __init__(self, name, id_number, department, job_title):
self.__name = name
self.__id_number = id_number
self.__department = department
self.__job_title = job_title
# Defining the set_name method sets the name attributes.
def set_name(self, name):
self.__name = name
# Defining the set_id_number method sets the id_number attributes.
def set_id_number(self, id_number):
self.__id_number = id_number
# Defining the set_department method sets the department attributes.
def set_department(self, department):
self.__department = department
# Defining the set_job_title method sets the job_title attributes.
def set_job_title(self, job_title):
self.__job_title = job_title
# Defining the get_name method returns the name of the employee.
def get_name(self):
return self.__name
# Defining the get_id_number method returns the id_number of the employee.
def get_id_number(self):
return self.__id_number
# Defining the get_department method returns the department of the employee.
def get_department(self):
return self.__department
# Defining the get_job_title method returns the job_title of the employee.
def get_job_title(self):
return self.__job_title
| # Module: EmployeeClass.py
# Description: This module creates an Employee Class with data attributes
# and methods acting on the data.
# Programmer: <NAME>.
# Date: 01.03.17
class Employee:
"""Creating the Employee class with data attributes and methods.
"""
# Defining the __init__ method initializes the attributes.
def __init__(self, name, id_number, department, job_title):
self.__name = name
self.__id_number = id_number
self.__department = department
self.__job_title = job_title
# Defining the set_name method sets the name attributes.
def set_name(self, name):
self.__name = name
# Defining the set_id_number method sets the id_number attributes.
def set_id_number(self, id_number):
self.__id_number = id_number
# Defining the set_department method sets the department attributes.
def set_department(self, department):
self.__department = department
# Defining the set_job_title method sets the job_title attributes.
def set_job_title(self, job_title):
self.__job_title = job_title
# Defining the get_name method returns the name of the employee.
def get_name(self):
return self.__name
# Defining the get_id_number method returns the id_number of the employee.
def get_id_number(self):
return self.__id_number
# Defining the get_department method returns the department of the employee.
def get_department(self):
return self.__department
# Defining the get_job_title method returns the job_title of the employee.
def get_job_title(self):
return self.__job_title
| en | 0.706458 | # Module: EmployeeClass.py # Description: This module creates an Employee Class with data attributes # and methods acting on the data. # Programmer: <NAME>. # Date: 01.03.17 Creating the Employee class with data attributes and methods. # Defining the __init__ method initializes the attributes. # Defining the set_name method sets the name attributes. # Defining the set_id_number method sets the id_number attributes. # Defining the set_department method sets the department attributes. # Defining the set_job_title method sets the job_title attributes. # Defining the get_name method returns the name of the employee. # Defining the get_id_number method returns the id_number of the employee. # Defining the get_department method returns the department of the employee. # Defining the get_job_title method returns the job_title of the employee. | 4.161837 | 4 |
pubnub/endpoints/access/revoke.py | KaizenAPI/python | 4 | 6631160 | <filename>pubnub/endpoints/access/revoke.py<gh_stars>1-10
from pubnub.endpoints.access.grant import Grant
from pubnub.enums import PNOperationType
class Revoke(Grant):
def __init__(self, pubnub):
Grant.__init__(self, pubnub)
self._read = False
self._write = False
self._manage = False
self._get = False
self._update = False
self._join = False
self._sort_params = True
def read(self, flag):
raise NotImplementedError
def write(self, flag):
raise NotImplementedError
def manage(self, flag):
raise NotImplementedError
def operation_type(self):
return PNOperationType.PNAccessManagerRevoke
def name(self):
return "Revoke"
| <filename>pubnub/endpoints/access/revoke.py<gh_stars>1-10
from pubnub.endpoints.access.grant import Grant
from pubnub.enums import PNOperationType
class Revoke(Grant):
def __init__(self, pubnub):
Grant.__init__(self, pubnub)
self._read = False
self._write = False
self._manage = False
self._get = False
self._update = False
self._join = False
self._sort_params = True
def read(self, flag):
raise NotImplementedError
def write(self, flag):
raise NotImplementedError
def manage(self, flag):
raise NotImplementedError
def operation_type(self):
return PNOperationType.PNAccessManagerRevoke
def name(self):
return "Revoke"
| none | 1 | 2.386356 | 2 |
|
src/ralph/backends/mixins.py | p-bizouard/ralph | 5 | 6631161 | """Backend mixins for Ralph"""
import json
import logging
from ralph.defaults import HISTORY_FILE, LOCALE_ENCODING
logger = logging.getLogger(__name__)
class HistoryMixin:
"""Handle backend download history to avoid fetching same files multiple
times if they are already available."""
@property
def history(self):
"""Get backend history"""
logging.debug("Loading history file: %s", str(HISTORY_FILE))
if not hasattr(self, "_history"):
try:
with HISTORY_FILE.open(encoding=LOCALE_ENCODING) as history_file:
self._history = json.load(history_file)
except FileNotFoundError:
self._history = []
return self._history
# pylint: disable=no-self-use
def write_history(self, history):
"""Write given history as a JSON file"""
logging.debug("Writing history file: %s", str(HISTORY_FILE))
if not HISTORY_FILE.parent.exists():
HISTORY_FILE.parent.mkdir(parents=True)
with HISTORY_FILE.open("w", encoding=LOCALE_ENCODING) as history_file:
json.dump(history, history_file)
# Update history
self._history = history
def clean_history(self, selector):
"""Clean selected events from the history.
selector: a callable that selects events that need to be removed
"""
self._history = list(filter(lambda event: not selector(event), self.history))
self.write_history(self._history)
def append_to_history(self, event):
"""Append event to history"""
self.write_history(self.history + [event])
def get_command_history(self, backend_name, command):
"""Returns a set of entry ids from the history for a command and backend_name"""
return [
entry["id"]
for entry in filter(
lambda e: e["backend"] == backend_name and e["command"] == command,
self.history,
)
]
| """Backend mixins for Ralph"""
import json
import logging
from ralph.defaults import HISTORY_FILE, LOCALE_ENCODING
logger = logging.getLogger(__name__)
class HistoryMixin:
"""Handle backend download history to avoid fetching same files multiple
times if they are already available."""
@property
def history(self):
"""Get backend history"""
logging.debug("Loading history file: %s", str(HISTORY_FILE))
if not hasattr(self, "_history"):
try:
with HISTORY_FILE.open(encoding=LOCALE_ENCODING) as history_file:
self._history = json.load(history_file)
except FileNotFoundError:
self._history = []
return self._history
# pylint: disable=no-self-use
def write_history(self, history):
"""Write given history as a JSON file"""
logging.debug("Writing history file: %s", str(HISTORY_FILE))
if not HISTORY_FILE.parent.exists():
HISTORY_FILE.parent.mkdir(parents=True)
with HISTORY_FILE.open("w", encoding=LOCALE_ENCODING) as history_file:
json.dump(history, history_file)
# Update history
self._history = history
def clean_history(self, selector):
"""Clean selected events from the history.
selector: a callable that selects events that need to be removed
"""
self._history = list(filter(lambda event: not selector(event), self.history))
self.write_history(self._history)
def append_to_history(self, event):
"""Append event to history"""
self.write_history(self.history + [event])
def get_command_history(self, backend_name, command):
"""Returns a set of entry ids from the history for a command and backend_name"""
return [
entry["id"]
for entry in filter(
lambda e: e["backend"] == backend_name and e["command"] == command,
self.history,
)
]
| en | 0.882541 | Backend mixins for Ralph Handle backend download history to avoid fetching same files multiple times if they are already available. Get backend history # pylint: disable=no-self-use Write given history as a JSON file # Update history Clean selected events from the history. selector: a callable that selects events that need to be removed Append event to history Returns a set of entry ids from the history for a command and backend_name | 2.703999 | 3 |
v2/log_reader.py | h3nrikoo/system_on_sheep | 0 | 6631162 | from collections import namedtuple
from datetime import datetime
import folium
import webbrowser
import statistics
import random
import math
import numpy as np
import matplotlib.pyplot as plt
from geopy import distance
import pprint
center_coordinates = [63.406514, 10.476741]
CSV_TYPE = 0
CSV_TYPE_GPS = "GPS"
CSV_TYPE_TAG = "TAG"
CSV_GPS_DATE = 1
CSV_GPS_TIME = 2
CSV_GPS_LATITUDE = 3
CSV_GPS_LONGITUDE = 4
CSV_GPS_ALTITUDE = 5
CSV_GPS_GROUND_SPEED = 6
CSV_GPS_COURSE = 7
CSV_GPS_HDOP = 8
CSV_GPS_SATELLITES = 9
CSV_GPS_GEODIAL_SEPERATION = 10
CSV_TAG_TAG_ID = 1
CSV_TAG_GPS_DELAY = 2
CSV_TAG_PACKET_COUNT = 3
CSV_TAG_EXPECTED_PACKET_COUNT = 4
CSV_TAG_P_SAMPLES = 5
CSV_TAG_P_RSSI_SAMPLES = 6
GPSReading = namedtuple("GPSReading", ["datetime", "latitude", "longitude", "altitude_msl", "ground_speed", "course", "hdop", "satellites", "geodial_seperation"])
TagReading = namedtuple("TagReading", ["tag_id", "gps_delay", "packet_count", "expected_packet_count", "p_samples", "p_rssi_samples"])
LocationReading = namedtuple("LocationReading", ["tag_id", "distance", "latitude", "longitude", "altitude"])
Location = namedtuple("Location", ["latitude", "longitude", "altitude"])
def knots_to_meters_per_second(knots):
return 0.5144*knots
def coordinates_degrees(latitude, longitude):
lat_heading = 1 if latitude[0] == 'N' else -1
long_heading = 1 if longitude[0] == 'E' else -1
lat_deg = (int(latitude[1:3]) + float(latitude[3:10]) / 60) * lat_heading
long_deg = (int(longitude[1:4]) + float(longitude[4:11]) / 60) * long_heading
return lat_deg, long_deg
true_tag_lat_1, true_tag_long_1 = coordinates_degrees("N6324.2962", "E01028.6035")
true_tag_alt_msl_1 = 155.7+0.7
true_tag_lat_2, true_tag_long_2 = coordinates_degrees("N6324.3374", "E01028.5852")
true_tag_alt_msl_2 = 156.5+0.7
true_tag_locations = {
123: Location(true_tag_lat_1, true_tag_long_1, true_tag_alt_msl_1),
105: Location(true_tag_lat_1, true_tag_long_1, true_tag_alt_msl_1),
137: Location(true_tag_lat_1, true_tag_long_1, true_tag_alt_msl_1),
200: Location(true_tag_lat_1, true_tag_long_1, true_tag_alt_msl_1),
109: Location(true_tag_lat_2, true_tag_long_2, true_tag_alt_msl_2),
141: Location(true_tag_lat_2, true_tag_long_2, true_tag_alt_msl_2),
154: Location(true_tag_lat_2, true_tag_long_2, true_tag_alt_msl_2),
69: Location(true_tag_lat_2, true_tag_long_2, true_tag_alt_msl_2)
}
current_tag_id = 69
pprint.pprint(true_tag_locations)
class SearchLogReader:
def _create_reading(self, values):
type = values[CSV_TYPE]
if type == CSV_TYPE_GPS:
return self._create_GPSReading(values)
if type == CSV_TYPE_TAG:
return self._create_TagReading(values)
def _create_GPSReading(self, values):
date = values[CSV_GPS_DATE]
day, month, year = int(date[0:2]), int(date[2:4]), int(date[4:6])+2000
time = values[CSV_GPS_TIME]
hour, minute, second = int(time[0:2]), int(time[2:4]), int(time[4:6])
datetime_ = datetime(year, month, day, hour, minute, second)
latitude, longitude = coordinates_degrees(values[CSV_GPS_LATITUDE], values[CSV_GPS_LONGITUDE])
altitude = float(values[CSV_GPS_ALTITUDE])
speed_mps = knots_to_meters_per_second(float(values[CSV_GPS_GROUND_SPEED]))
course = float(values[CSV_GPS_COURSE])
hdop = float(values[CSV_GPS_HDOP])
satellites = int(values[CSV_GPS_SATELLITES])
geodial_seperation = float(values[CSV_GPS_GEODIAL_SEPERATION])
return GPSReading(datetime_, latitude, longitude, altitude, speed_mps, course, hdop, satellites, geodial_seperation)
def _create_TagReading(self, values):
tag_id = int(values[CSV_TAG_TAG_ID])
gps_delay = int(values[CSV_TAG_GPS_DELAY])
packet_count = int(values[CSV_TAG_PACKET_COUNT])
expected_packet_count = int(values[CSV_TAG_EXPECTED_PACKET_COUNT])
p_samples = [int(i) for i in values[CSV_TAG_P_SAMPLES].split(",")][0:packet_count]
p_rssi_samples = [int(i) for i in values[CSV_TAG_P_RSSI_SAMPLES].split(",")][0:packet_count]
return TagReading(tag_id, gps_delay, packet_count, expected_packet_count, p_samples, p_rssi_samples)
def read(self, filename):
with open(filename) as file:
readings = []
for line in file.readlines():
line = line.strip()
values = line.split(";")
readings.append(self._create_reading(values))
return SearchLog(readings)
class SearchLog:
def __init__(self, readings):
self.readings = readings
self.location_readings = []
def _generate_location_readings(self):
for reading in self.readings:
if isinstance(reading, GPSReading):
latitude, longitude, altitude = reading.latitude, reading.longitude, reading.altitude_msl
if isinstance(reading, TagReading):
tag_id = reading.tag_id
distance = math.sqrt((statistics.mean(reading.p_samples) * 9.37)**2 - (altitude-true_tag_locations[reading.tag_id].altitude)**2)
self.location_readings.append(LocationReading(tag_id, distance, latitude, longitude, altitude))
def get_location_readings(self):
if len(self.location_readings) == 0:
self._generate_location_readings()
return self.location_readings
def get_random_location_readings(self, n):
return random.sample(self.get_location_readings(), min(n, len(self.location_readings)))
def print(self):
for readings in self.readings:
print(readings)
class LaterationEstimator:
def __init__(self, search_log):
self.search_log = search_log
def get_estimate(self):
pass
def main():
global current_tag_id
current_tag_id = 69
search_log = SearchLogReader().read("data/raw/0019.CSV")
m = folium.Map(location=center_coordinates, zoom_start=16)
folium.Marker(location=[true_tag_locations[current_tag_id].latitude, true_tag_locations[current_tag_id].longitude]).add_to(m)
for reading in search_log.get_random_location_readings(6):
folium.Circle(radius=reading.distance, location=[reading.latitude, reading.longitude], color="crimson", fill=False).add_to(m)
m.save("map.html")
webbrowser.open("map.html")
main()
| from collections import namedtuple
from datetime import datetime
import folium
import webbrowser
import statistics
import random
import math
import numpy as np
import matplotlib.pyplot as plt
from geopy import distance
import pprint
center_coordinates = [63.406514, 10.476741]
CSV_TYPE = 0
CSV_TYPE_GPS = "GPS"
CSV_TYPE_TAG = "TAG"
CSV_GPS_DATE = 1
CSV_GPS_TIME = 2
CSV_GPS_LATITUDE = 3
CSV_GPS_LONGITUDE = 4
CSV_GPS_ALTITUDE = 5
CSV_GPS_GROUND_SPEED = 6
CSV_GPS_COURSE = 7
CSV_GPS_HDOP = 8
CSV_GPS_SATELLITES = 9
CSV_GPS_GEODIAL_SEPERATION = 10
CSV_TAG_TAG_ID = 1
CSV_TAG_GPS_DELAY = 2
CSV_TAG_PACKET_COUNT = 3
CSV_TAG_EXPECTED_PACKET_COUNT = 4
CSV_TAG_P_SAMPLES = 5
CSV_TAG_P_RSSI_SAMPLES = 6
GPSReading = namedtuple("GPSReading", ["datetime", "latitude", "longitude", "altitude_msl", "ground_speed", "course", "hdop", "satellites", "geodial_seperation"])
TagReading = namedtuple("TagReading", ["tag_id", "gps_delay", "packet_count", "expected_packet_count", "p_samples", "p_rssi_samples"])
LocationReading = namedtuple("LocationReading", ["tag_id", "distance", "latitude", "longitude", "altitude"])
Location = namedtuple("Location", ["latitude", "longitude", "altitude"])
def knots_to_meters_per_second(knots):
return 0.5144*knots
def coordinates_degrees(latitude, longitude):
lat_heading = 1 if latitude[0] == 'N' else -1
long_heading = 1 if longitude[0] == 'E' else -1
lat_deg = (int(latitude[1:3]) + float(latitude[3:10]) / 60) * lat_heading
long_deg = (int(longitude[1:4]) + float(longitude[4:11]) / 60) * long_heading
return lat_deg, long_deg
true_tag_lat_1, true_tag_long_1 = coordinates_degrees("N6324.2962", "E01028.6035")
true_tag_alt_msl_1 = 155.7+0.7
true_tag_lat_2, true_tag_long_2 = coordinates_degrees("N6324.3374", "E01028.5852")
true_tag_alt_msl_2 = 156.5+0.7
true_tag_locations = {
123: Location(true_tag_lat_1, true_tag_long_1, true_tag_alt_msl_1),
105: Location(true_tag_lat_1, true_tag_long_1, true_tag_alt_msl_1),
137: Location(true_tag_lat_1, true_tag_long_1, true_tag_alt_msl_1),
200: Location(true_tag_lat_1, true_tag_long_1, true_tag_alt_msl_1),
109: Location(true_tag_lat_2, true_tag_long_2, true_tag_alt_msl_2),
141: Location(true_tag_lat_2, true_tag_long_2, true_tag_alt_msl_2),
154: Location(true_tag_lat_2, true_tag_long_2, true_tag_alt_msl_2),
69: Location(true_tag_lat_2, true_tag_long_2, true_tag_alt_msl_2)
}
current_tag_id = 69
pprint.pprint(true_tag_locations)
class SearchLogReader:
def _create_reading(self, values):
type = values[CSV_TYPE]
if type == CSV_TYPE_GPS:
return self._create_GPSReading(values)
if type == CSV_TYPE_TAG:
return self._create_TagReading(values)
def _create_GPSReading(self, values):
date = values[CSV_GPS_DATE]
day, month, year = int(date[0:2]), int(date[2:4]), int(date[4:6])+2000
time = values[CSV_GPS_TIME]
hour, minute, second = int(time[0:2]), int(time[2:4]), int(time[4:6])
datetime_ = datetime(year, month, day, hour, minute, second)
latitude, longitude = coordinates_degrees(values[CSV_GPS_LATITUDE], values[CSV_GPS_LONGITUDE])
altitude = float(values[CSV_GPS_ALTITUDE])
speed_mps = knots_to_meters_per_second(float(values[CSV_GPS_GROUND_SPEED]))
course = float(values[CSV_GPS_COURSE])
hdop = float(values[CSV_GPS_HDOP])
satellites = int(values[CSV_GPS_SATELLITES])
geodial_seperation = float(values[CSV_GPS_GEODIAL_SEPERATION])
return GPSReading(datetime_, latitude, longitude, altitude, speed_mps, course, hdop, satellites, geodial_seperation)
def _create_TagReading(self, values):
tag_id = int(values[CSV_TAG_TAG_ID])
gps_delay = int(values[CSV_TAG_GPS_DELAY])
packet_count = int(values[CSV_TAG_PACKET_COUNT])
expected_packet_count = int(values[CSV_TAG_EXPECTED_PACKET_COUNT])
p_samples = [int(i) for i in values[CSV_TAG_P_SAMPLES].split(",")][0:packet_count]
p_rssi_samples = [int(i) for i in values[CSV_TAG_P_RSSI_SAMPLES].split(",")][0:packet_count]
return TagReading(tag_id, gps_delay, packet_count, expected_packet_count, p_samples, p_rssi_samples)
def read(self, filename):
with open(filename) as file:
readings = []
for line in file.readlines():
line = line.strip()
values = line.split(";")
readings.append(self._create_reading(values))
return SearchLog(readings)
class SearchLog:
def __init__(self, readings):
self.readings = readings
self.location_readings = []
def _generate_location_readings(self):
for reading in self.readings:
if isinstance(reading, GPSReading):
latitude, longitude, altitude = reading.latitude, reading.longitude, reading.altitude_msl
if isinstance(reading, TagReading):
tag_id = reading.tag_id
distance = math.sqrt((statistics.mean(reading.p_samples) * 9.37)**2 - (altitude-true_tag_locations[reading.tag_id].altitude)**2)
self.location_readings.append(LocationReading(tag_id, distance, latitude, longitude, altitude))
def get_location_readings(self):
if len(self.location_readings) == 0:
self._generate_location_readings()
return self.location_readings
def get_random_location_readings(self, n):
return random.sample(self.get_location_readings(), min(n, len(self.location_readings)))
def print(self):
for readings in self.readings:
print(readings)
class LaterationEstimator:
def __init__(self, search_log):
self.search_log = search_log
def get_estimate(self):
pass
def main():
global current_tag_id
current_tag_id = 69
search_log = SearchLogReader().read("data/raw/0019.CSV")
m = folium.Map(location=center_coordinates, zoom_start=16)
folium.Marker(location=[true_tag_locations[current_tag_id].latitude, true_tag_locations[current_tag_id].longitude]).add_to(m)
for reading in search_log.get_random_location_readings(6):
folium.Circle(radius=reading.distance, location=[reading.latitude, reading.longitude], color="crimson", fill=False).add_to(m)
m.save("map.html")
webbrowser.open("map.html")
main()
| none | 1 | 2.777218 | 3 |
|
evennia/contrib/tutorials/__init__.py | davidrideout/evennia | 0 | 6631163 | """
Contribs acting as tutorials, examples or supporting the documentation.
"""
| """
Contribs acting as tutorials, examples or supporting the documentation.
"""
| en | 0.96151 | Contribs acting as tutorials, examples or supporting the documentation. | 1.4098 | 1 |
level20.py | CoffeeTableEnnui/RedCircleGame | 0 | 6631164 | import rectangles as r
import circles as c
import games as g
import pygame
level = g.Game(724, 76, 724, 724)
level.addwall(102,750,102,698)
| import rectangles as r
import circles as c
import games as g
import pygame
level = g.Game(724, 76, 724, 724)
level.addwall(102,750,102,698)
| none | 1 | 2.613673 | 3 |
|
ModelerFolder/PlotBuilder.py | KTH-UrbanT/MUBES_UBEM | 8 | 6631165 | <gh_stars>1-10
# @Author : <NAME>
# @Email : <EMAIL>
import os
import sys
path2addgeom = os.path.join(os.path.dirname(os.path.dirname(os.getcwd())), 'geomeppy')
sys.path.append(path2addgeom)
sys.path.append("..")
import CoreFiles.GeneralFunctions as GrlFct
from BuildObject.DB_Building import BuildingList
import BuildObject.DB_Data as DB_Data
from BuildObject.DB_Filter4Simulations import checkBldFilter
import matplotlib.pyplot as plt
def LaunchProcess(SimDir, DataBaseInput, LogFile, bldidx, keyPath, nbcase, CorePerim=False, FloorZoning=False,
FigCenter=(0, 0), WindSize=50, PlotBuilding=False):
# process is launched for the considered building
msg = 'Building ' + str(nbBuild) + ' is starting\n'
print('#############################################')
print(msg[:-1])
GrlFct.Write2LogFile(msg, LogFile)
MainPath = os.getcwd()
epluspath = keyPath['epluspath']
os.chdir(SimDir)
StudiedCase = BuildingList()
# lets build the two main object we'll be playing with in the following'
idf_ref, building_ref = GrlFct.appendBuildCase(StudiedCase, epluspath, nbcase, DataBaseInput, MainPath, LogFile,
PlotOnly=True)
refName = 'Building_' + str(nbcase)
for key in building_ref.BuildID:
print(key + ' : ' + str(building_ref.BuildID[key]))
refName += '\n ' + key + str(building_ref.BuildID[key])
idf_ref.idfname = refName
# Rounds of check if we continue with this building or not, see DB_Filter4Simulation.py if other filter are to add
CaseOK = checkBldFilter(building_ref)
if not CaseOK:
msg = '[Error] This Building/bloc has either no height, height below 1, surface below 50m2 or no floors, process abort for this one\n'
print(msg[:-1])
os.chdir(MainPath)
GrlFct.Write2LogFile(msg, LogFile)
GrlFct.Write2LogFile('##############################################################\n', LogFile)
return FigCenter, WindSize
FigCenter.append(building_ref.RefCoord)
refx = sum([center[0] for center in FigCenter]) / len(FigCenter)
refy = sum([center[1] for center in FigCenter]) / len(FigCenter)
if not PlotBuilding:
a=1
#building_ref.MaxShadingDist = 0
# building_ref.shades = building_ref.getshade(DataBaseInput['Build'][nbcase], DataBaseInput['Shades'],
# DataBaseInput['Build'], DB_Data.GeomElement, [])#LogFile)
GrlFct.setBuildingLevel(idf_ref, building_ref, LogFile, CorePerim, FloorZoning, ForPlots=True)
GrlFct.setEnvelopeLevel(idf_ref, building_ref)
FigCentroid = building_ref.RefCoord if PlotBuilding else (refx, refy)
#we need to transform the prvious relatve coordinates into absolute one in order to make plot of several building keeping their location
idf_ref, building_ref = GrlFct.MakeAbsoluteCoord(idf_ref,building_ref)
# compåuting the window size for visualization
for poly in building_ref.footprint:
for vertex in poly:
WindSize = max(GrlFct.ComputeDistance(FigCentroid, vertex), WindSize)
surf = idf_ref.getsurfaces()
ok2plot = False
nbadiab = 0
adiabsurf = []
for s in surf:
if s.Outside_Boundary_Condition == 'adiabatic':
ok2plot = True
if s.Name[:s.Name.index('_')] not in adiabsurf:
adiabsurf.append(s.Name[:s.Name.index('_')])
nbadiab += 1
if ok2plot:
GrlFct.Write2LogFile('[Nb Adjacent_Walls] This building has '+str(nbadiab)+' walls with adiabatic surfaces\n', LogFile)
idf_ref.view_model(test=PlotBuilding, FigCenter=FigCentroid, WindSize=2 * WindSize)
GrlFct.Write2LogFile('##############################################################\n', LogFile)
# lets get back to the Main Folder we were at the very beginning
os.chdir(MainPath)
return (refx, refy), WindSize
if __name__ == '__main__':
######################################################################################################################
######## MAIN INPUT PART ##################################################################################
######################################################################################################################
# This file is only to make graphs of the building geometry given in the GoeJsonF
# BuildNum = [1,2,3,4] #list of numbers : number of the buildings to be simulated (order respecting the
# PathInputFile = 'String' #Name of the PathFile containing the paths to the data and to energyplus application (see ReadMe)
# CorePerim = False / True #True = create automatic core and perimeter zonning of each building. This options increases in a quite
# large amount both building process and simulation process.
# It can used with either one zone per floor or one zone per heated or none heated zone
# building will be generated first, all results will be saved in one single folder
# FloorZoning = False / True True = thermal zoning will be realized for each floor of the building, if false, there will be 1 zone
# for the heated volume and, if present, one zone for the basement (non heated volume
## PlotBuilding = False / True #True = after each building the building will be plotted for visual check of geometry and thermal zoning.
# It include the shadings, if False, all the building will be plotted wihtout the shadings
# ZoneOfInterest = 'String' #Text file with Building's ID that are to be considered withoin the BuildNum list, if '' than all building in BuildNum will be considered
BuildNum = []
PathInputFile = 'Pathways_Template.txt'
CorePerim = False
FloorZoning = False
PlotBuilding = False
ZoneOfInterest = ''
######################################################################################################################
######## LAUNCHING MULTIPROCESS PROCESS PART #################################################################
######################################################################################################################
CaseName = 'ForTest'
# reading the pathfiles and the geojsonfile
GlobKey =[GrlFct.readPathfile(PathInputFile)]
# lets see if the input file is a dir with several geojson files
multipleFiles = False
BuildingFiles,WallFiles = GrlFct.ReadGeoJsonDir(GlobKey[0])
if BuildingFiles:
multipleFiles = True
MainRootPath = GlobKey[0]['Buildingsfile']
GlobKey[0]['Buildingsfile'] = os.path.join(MainRootPath,BuildingFiles[0])
GlobKey[0]['Shadingsfile'] = os.path.join(MainRootPath, WallFiles[0])
for nb,file in enumerate(BuildingFiles[1:]):
GlobKey.append(GlobKey[-1].copy())
GlobKey[-1]['Buildingsfile'] = os.path.join(MainRootPath, file)
GlobKey[-1]['Shadingsfile'] = os.path.join(MainRootPath, WallFiles[nb+1])
for nbfile, keyPath in enumerate(GlobKey):
# if nbfile not in [0]:
# continue
if multipleFiles:
nb = len(GlobKey)
print('File number : '+str(nbfile) + ' which correspond to Area Ref : '+BuildingFiles[nbfile][:-18])
DataBaseInput = GrlFct.ReadGeoJsonFile(keyPath)
BuildNum2Launch = [i for i in range(len(DataBaseInput['Build']))]
if BuildNum:
BuildNum2Launch = BuildNum
if os.path.isfile(os.path.join(os.getcwd(), ZoneOfInterest)):
NewBuildNum2Launch = []
Bld2Keep = GrlFct.ReadZoneOfInterest(os.path.join(os.getcwd(), ZoneOfInterest), keyWord='<KEY>')
for bldNum, Bld in enumerate(DataBaseInput['Build']):
if Bld.properties['50A_UUID'] in Bld2Keep and bldNum in BuildNum2Launch:
NewBuildNum2Launch.append(bldNum)
BuildNum2Launch = NewBuildNum2Launch
if not BuildNum2Launch:
print('Sorry, but no building matches with the requirements....Please, check your ZoneOfInterest')
else:
if not plt.fignum_exists(0):
FigCenter = []
LogFile = []
CurrentPath = os.getcwd()
WindSize = 50
SimDir = CurrentPath
LogFile = open(os.path.join(SimDir, CaseName+'_Logs.log'), 'w')
if multipleFiles:
msg = '[New AREA] A new goejson file is open (num '+str(nbfile)+'), Area Id : '+BuildingFiles[nbfile][:-18]+'\n'
print(msg[:-1])
GrlFct.Write2LogFile(msg, LogFile)
for idx, nbBuild in enumerate(BuildNum2Launch):
if idx < len(DataBaseInput['Build']):
# getting through the mainfunction above :LaunchProcess() each building sees its idf done in a row within this function
try:
NewCentroid, WindSize = LaunchProcess(SimDir, DataBaseInput, LogFile, idx, keyPath, nbBuild,
CorePerim, FloorZoning,
FigCenter, WindSize, PlotBuilding)
except:
msg = '[Error] There was an error on this building, process aborted\n'
print(msg[:-1])
GrlFct.Write2LogFile(msg, LogFile)
GrlFct.Write2LogFile('##############################################################\n', LogFile)
os.chdir(CurrentPath)
# if choicies is done, once the building is finished parallel computing is launched for this one
else:
print('All buildings in the input file have been treated.')
print('###################################################')
break
if not multipleFiles:
LogFile.close()
plt.show()
if multipleFiles:
LogFile.close()
sys.path.remove(path2addgeom) | # @Author : <NAME>
# @Email : <EMAIL>
import os
import sys
path2addgeom = os.path.join(os.path.dirname(os.path.dirname(os.getcwd())), 'geomeppy')
sys.path.append(path2addgeom)
sys.path.append("..")
import CoreFiles.GeneralFunctions as GrlFct
from BuildObject.DB_Building import BuildingList
import BuildObject.DB_Data as DB_Data
from BuildObject.DB_Filter4Simulations import checkBldFilter
import matplotlib.pyplot as plt
def LaunchProcess(SimDir, DataBaseInput, LogFile, bldidx, keyPath, nbcase, CorePerim=False, FloorZoning=False,
FigCenter=(0, 0), WindSize=50, PlotBuilding=False):
# process is launched for the considered building
msg = 'Building ' + str(nbBuild) + ' is starting\n'
print('#############################################')
print(msg[:-1])
GrlFct.Write2LogFile(msg, LogFile)
MainPath = os.getcwd()
epluspath = keyPath['epluspath']
os.chdir(SimDir)
StudiedCase = BuildingList()
# lets build the two main object we'll be playing with in the following'
idf_ref, building_ref = GrlFct.appendBuildCase(StudiedCase, epluspath, nbcase, DataBaseInput, MainPath, LogFile,
PlotOnly=True)
refName = 'Building_' + str(nbcase)
for key in building_ref.BuildID:
print(key + ' : ' + str(building_ref.BuildID[key]))
refName += '\n ' + key + str(building_ref.BuildID[key])
idf_ref.idfname = refName
# Rounds of check if we continue with this building or not, see DB_Filter4Simulation.py if other filter are to add
CaseOK = checkBldFilter(building_ref)
if not CaseOK:
msg = '[Error] This Building/bloc has either no height, height below 1, surface below 50m2 or no floors, process abort for this one\n'
print(msg[:-1])
os.chdir(MainPath)
GrlFct.Write2LogFile(msg, LogFile)
GrlFct.Write2LogFile('##############################################################\n', LogFile)
return FigCenter, WindSize
FigCenter.append(building_ref.RefCoord)
refx = sum([center[0] for center in FigCenter]) / len(FigCenter)
refy = sum([center[1] for center in FigCenter]) / len(FigCenter)
if not PlotBuilding:
a=1
#building_ref.MaxShadingDist = 0
# building_ref.shades = building_ref.getshade(DataBaseInput['Build'][nbcase], DataBaseInput['Shades'],
# DataBaseInput['Build'], DB_Data.GeomElement, [])#LogFile)
GrlFct.setBuildingLevel(idf_ref, building_ref, LogFile, CorePerim, FloorZoning, ForPlots=True)
GrlFct.setEnvelopeLevel(idf_ref, building_ref)
FigCentroid = building_ref.RefCoord if PlotBuilding else (refx, refy)
#we need to transform the prvious relatve coordinates into absolute one in order to make plot of several building keeping their location
idf_ref, building_ref = GrlFct.MakeAbsoluteCoord(idf_ref,building_ref)
# compåuting the window size for visualization
for poly in building_ref.footprint:
for vertex in poly:
WindSize = max(GrlFct.ComputeDistance(FigCentroid, vertex), WindSize)
surf = idf_ref.getsurfaces()
ok2plot = False
nbadiab = 0
adiabsurf = []
for s in surf:
if s.Outside_Boundary_Condition == 'adiabatic':
ok2plot = True
if s.Name[:s.Name.index('_')] not in adiabsurf:
adiabsurf.append(s.Name[:s.Name.index('_')])
nbadiab += 1
if ok2plot:
GrlFct.Write2LogFile('[Nb Adjacent_Walls] This building has '+str(nbadiab)+' walls with adiabatic surfaces\n', LogFile)
idf_ref.view_model(test=PlotBuilding, FigCenter=FigCentroid, WindSize=2 * WindSize)
GrlFct.Write2LogFile('##############################################################\n', LogFile)
# lets get back to the Main Folder we were at the very beginning
os.chdir(MainPath)
return (refx, refy), WindSize
if __name__ == '__main__':
######################################################################################################################
######## MAIN INPUT PART ##################################################################################
######################################################################################################################
# This file is only to make graphs of the building geometry given in the GoeJsonF
# BuildNum = [1,2,3,4] #list of numbers : number of the buildings to be simulated (order respecting the
# PathInputFile = 'String' #Name of the PathFile containing the paths to the data and to energyplus application (see ReadMe)
# CorePerim = False / True #True = create automatic core and perimeter zonning of each building. This options increases in a quite
# large amount both building process and simulation process.
# It can used with either one zone per floor or one zone per heated or none heated zone
# building will be generated first, all results will be saved in one single folder
# FloorZoning = False / True True = thermal zoning will be realized for each floor of the building, if false, there will be 1 zone
# for the heated volume and, if present, one zone for the basement (non heated volume
## PlotBuilding = False / True #True = after each building the building will be plotted for visual check of geometry and thermal zoning.
# It include the shadings, if False, all the building will be plotted wihtout the shadings
# ZoneOfInterest = 'String' #Text file with Building's ID that are to be considered withoin the BuildNum list, if '' than all building in BuildNum will be considered
BuildNum = []
PathInputFile = 'Pathways_Template.txt'
CorePerim = False
FloorZoning = False
PlotBuilding = False
ZoneOfInterest = ''
######################################################################################################################
######## LAUNCHING MULTIPROCESS PROCESS PART #################################################################
######################################################################################################################
CaseName = 'ForTest'
# reading the pathfiles and the geojsonfile
GlobKey =[GrlFct.readPathfile(PathInputFile)]
# lets see if the input file is a dir with several geojson files
multipleFiles = False
BuildingFiles,WallFiles = GrlFct.ReadGeoJsonDir(GlobKey[0])
if BuildingFiles:
multipleFiles = True
MainRootPath = GlobKey[0]['Buildingsfile']
GlobKey[0]['Buildingsfile'] = os.path.join(MainRootPath,BuildingFiles[0])
GlobKey[0]['Shadingsfile'] = os.path.join(MainRootPath, WallFiles[0])
for nb,file in enumerate(BuildingFiles[1:]):
GlobKey.append(GlobKey[-1].copy())
GlobKey[-1]['Buildingsfile'] = os.path.join(MainRootPath, file)
GlobKey[-1]['Shadingsfile'] = os.path.join(MainRootPath, WallFiles[nb+1])
for nbfile, keyPath in enumerate(GlobKey):
# if nbfile not in [0]:
# continue
if multipleFiles:
nb = len(GlobKey)
print('File number : '+str(nbfile) + ' which correspond to Area Ref : '+BuildingFiles[nbfile][:-18])
DataBaseInput = GrlFct.ReadGeoJsonFile(keyPath)
BuildNum2Launch = [i for i in range(len(DataBaseInput['Build']))]
if BuildNum:
BuildNum2Launch = BuildNum
if os.path.isfile(os.path.join(os.getcwd(), ZoneOfInterest)):
NewBuildNum2Launch = []
Bld2Keep = GrlFct.ReadZoneOfInterest(os.path.join(os.getcwd(), ZoneOfInterest), keyWord='<KEY>')
for bldNum, Bld in enumerate(DataBaseInput['Build']):
if Bld.properties['50A_UUID'] in Bld2Keep and bldNum in BuildNum2Launch:
NewBuildNum2Launch.append(bldNum)
BuildNum2Launch = NewBuildNum2Launch
if not BuildNum2Launch:
print('Sorry, but no building matches with the requirements....Please, check your ZoneOfInterest')
else:
if not plt.fignum_exists(0):
FigCenter = []
LogFile = []
CurrentPath = os.getcwd()
WindSize = 50
SimDir = CurrentPath
LogFile = open(os.path.join(SimDir, CaseName+'_Logs.log'), 'w')
if multipleFiles:
msg = '[New AREA] A new goejson file is open (num '+str(nbfile)+'), Area Id : '+BuildingFiles[nbfile][:-18]+'\n'
print(msg[:-1])
GrlFct.Write2LogFile(msg, LogFile)
for idx, nbBuild in enumerate(BuildNum2Launch):
if idx < len(DataBaseInput['Build']):
# getting through the mainfunction above :LaunchProcess() each building sees its idf done in a row within this function
try:
NewCentroid, WindSize = LaunchProcess(SimDir, DataBaseInput, LogFile, idx, keyPath, nbBuild,
CorePerim, FloorZoning,
FigCenter, WindSize, PlotBuilding)
except:
msg = '[Error] There was an error on this building, process aborted\n'
print(msg[:-1])
GrlFct.Write2LogFile(msg, LogFile)
GrlFct.Write2LogFile('##############################################################\n', LogFile)
os.chdir(CurrentPath)
# if choicies is done, once the building is finished parallel computing is launched for this one
else:
print('All buildings in the input file have been treated.')
print('###################################################')
break
if not multipleFiles:
LogFile.close()
plt.show()
if multipleFiles:
LogFile.close()
sys.path.remove(path2addgeom) | en | 0.526491 | # @Author : <NAME> # @Email : <EMAIL> # process is launched for the considered building ############################################') # lets build the two main object we'll be playing with in the following' # Rounds of check if we continue with this building or not, see DB_Filter4Simulation.py if other filter are to add #############################################################\n', LogFile) #building_ref.MaxShadingDist = 0 # building_ref.shades = building_ref.getshade(DataBaseInput['Build'][nbcase], DataBaseInput['Shades'], # DataBaseInput['Build'], DB_Data.GeomElement, [])#LogFile) #we need to transform the prvious relatve coordinates into absolute one in order to make plot of several building keeping their location # compåuting the window size for visualization #############################################################\n', LogFile) # lets get back to the Main Folder we were at the very beginning ###################################################################################################################### ######## MAIN INPUT PART ################################################################################## ###################################################################################################################### # This file is only to make graphs of the building geometry given in the GoeJsonF # BuildNum = [1,2,3,4] #list of numbers : number of the buildings to be simulated (order respecting the # PathInputFile = 'String' #Name of the PathFile containing the paths to the data and to energyplus application (see ReadMe) # CorePerim = False / True #True = create automatic core and perimeter zonning of each building. This options increases in a quite # large amount both building process and simulation process. # It can used with either one zone per floor or one zone per heated or none heated zone # building will be generated first, all results will be saved in one single folder # FloorZoning = False / True True = thermal zoning will be realized for each floor of the building, if false, there will be 1 zone # for the heated volume and, if present, one zone for the basement (non heated volume ## PlotBuilding = False / True #True = after each building the building will be plotted for visual check of geometry and thermal zoning. # It include the shadings, if False, all the building will be plotted wihtout the shadings # ZoneOfInterest = 'String' #Text file with Building's ID that are to be considered withoin the BuildNum list, if '' than all building in BuildNum will be considered ###################################################################################################################### ######## LAUNCHING MULTIPROCESS PROCESS PART ################################################################# ###################################################################################################################### # reading the pathfiles and the geojsonfile # lets see if the input file is a dir with several geojson files # if nbfile not in [0]: # continue # getting through the mainfunction above :LaunchProcess() each building sees its idf done in a row within this function #############################################################\n', LogFile) # if choicies is done, once the building is finished parallel computing is launched for this one ##################################################') | 2.026158 | 2 |
2_if.py | dev-gmmahs/python-example | 0 | 6631166 | # 2. 조건문 if 사용법
a = 10
b = 12
if a is b:
print("a와 b는 같습니다")
else:
print("a와 b는 다릅니다")
str1 = "안녕"
str2 = "안녕"
# is 또는 ==
if str1 is str2:
print("str1와 str2는 같습니다")
else:
print("str1와 str2는 다릅니다")
user_id = "asd1234"
user_password = ""
if not (user_id and user_password):
print("아이디와 패스워드 모두 입력해주세요!")
new_id = "a123"
if len(new_id) < 6:
print("아이디는 6자리 이상으로 해주세요!")
| # 2. 조건문 if 사용법
a = 10
b = 12
if a is b:
print("a와 b는 같습니다")
else:
print("a와 b는 다릅니다")
str1 = "안녕"
str2 = "안녕"
# is 또는 ==
if str1 is str2:
print("str1와 str2는 같습니다")
else:
print("str1와 str2는 다릅니다")
user_id = "asd1234"
user_password = ""
if not (user_id and user_password):
print("아이디와 패스워드 모두 입력해주세요!")
new_id = "a123"
if len(new_id) < 6:
print("아이디는 6자리 이상으로 해주세요!")
| ko | 0.999503 | # 2. 조건문 if 사용법 # is 또는 == | 3.813934 | 4 |
tests/unittests/dumptools/test_var2mod.py | moschams/padl | 0 | 6631167 | import ast
from padl.dumptools import var2mod
class TestFindGlobals:
def test_find_same_name(self):
statement = 'a = run(a)'
tree = ast.parse(statement)
res = var2mod.find_globals(tree)
assert res == {('a', 1), ('run', 0)}
def test_find_in_assignment(self):
statement = 'a = run'
tree = ast.parse(statement)
res = var2mod.find_globals(tree)
assert res == {('run', 0)}
| import ast
from padl.dumptools import var2mod
class TestFindGlobals:
def test_find_same_name(self):
statement = 'a = run(a)'
tree = ast.parse(statement)
res = var2mod.find_globals(tree)
assert res == {('a', 1), ('run', 0)}
def test_find_in_assignment(self):
statement = 'a = run'
tree = ast.parse(statement)
res = var2mod.find_globals(tree)
assert res == {('run', 0)}
| none | 1 | 2.481836 | 2 |
|
DataScience/Matplotlib.py | AlPus108/Python_lessons | 0 | 6631168 | import numpy as np
import matplotlib.pyplot as plt
# pyplot - ключевой модуль библиотеки matplotlib
# Рисуем ф-ю y = x**2 * e**(-)x**2
# Создаем равномерно распределенное множество
X = np.linspace(0, 3, 1001, dtype=np.float32)
print(X) # [0. 0.003 0.006 ... 2.994 2.997 3. ]
# Возведем 'x' в квадрат
print(X**2)
# [0.000000e+00 9.000000e-06 3.600000e-05 ... 8.964036e+00 8.982009e+00
# 9.000000e+00]
# Операция происходит над каждым элементом массива в отдельности
# Все операции над массивами в NumPy совершаются с каждым членом отдельно.
# Если нужно иное, то на это есть соответствующие ф-и
# Дальше вычисляем ф-ю
Y = X**2 * np.exp( -(X**2) )
# Формулу применяем прямо над массивом целиком.
# Соответствующие циклы будут запрятаны внутрь бибилотеку numpy
# Посмотрим чему равен Y
print(Y)
# [0.0000000e+00 8.9999194e-06 3.5998706e-05 ... 1.1467591e-03 1.1285910e-03
# 1.1106882e-03]
# Видим, что сначала У растет, потом падает.
# Выведем еще одну ф-ю Z = sin(x) / e**x
Z = np.sin(X) / np.exp(X)
# Выводим Z
print(Z) # [0.0000000e+00 8.9999194e-06 3.5998706e-05 ... 1.1467591e-03 1.1285910e-03 1.1106882e-03]
# Дальше попробуем ее нарисовать
# Простейший способ вывода графика
plt.plot( X, Y ) # первый - массив Х, второй - массив Y. Они должны быть одинаковыми.
# При необходимости, здесь можно указать, каким цветом нужно рисовать их линии.
# 'b-' - 'b' - синяя, '-' - сплошная
# ф-я plot возвращает нам объект типа line2d
print(plt.plot(X, Z, 'r-')) # используем красную сплошную линию
# [<matplotlib.lines.Line2D object at 0x0970F4C0>] - тип
# Выводим на экран график
print(plt.show()) # получаем два графика на одних осях
# На вид график странный.
# Давайте посмотрим просто график sin(X)
S = np.sin(X)
plt.plot(X,S, 'g-')
plt.show()
# Здесь мы до числа pi не доехали. Синус меняет знак, когда х = pi. Тогда график норм.
# Вот так можно рисовать графики в простейшем случае.
# 1:22:58
| import numpy as np
import matplotlib.pyplot as plt
# pyplot - ключевой модуль библиотеки matplotlib
# Рисуем ф-ю y = x**2 * e**(-)x**2
# Создаем равномерно распределенное множество
X = np.linspace(0, 3, 1001, dtype=np.float32)
print(X) # [0. 0.003 0.006 ... 2.994 2.997 3. ]
# Возведем 'x' в квадрат
print(X**2)
# [0.000000e+00 9.000000e-06 3.600000e-05 ... 8.964036e+00 8.982009e+00
# 9.000000e+00]
# Операция происходит над каждым элементом массива в отдельности
# Все операции над массивами в NumPy совершаются с каждым членом отдельно.
# Если нужно иное, то на это есть соответствующие ф-и
# Дальше вычисляем ф-ю
Y = X**2 * np.exp( -(X**2) )
# Формулу применяем прямо над массивом целиком.
# Соответствующие циклы будут запрятаны внутрь бибилотеку numpy
# Посмотрим чему равен Y
print(Y)
# [0.0000000e+00 8.9999194e-06 3.5998706e-05 ... 1.1467591e-03 1.1285910e-03
# 1.1106882e-03]
# Видим, что сначала У растет, потом падает.
# Выведем еще одну ф-ю Z = sin(x) / e**x
Z = np.sin(X) / np.exp(X)
# Выводим Z
print(Z) # [0.0000000e+00 8.9999194e-06 3.5998706e-05 ... 1.1467591e-03 1.1285910e-03 1.1106882e-03]
# Дальше попробуем ее нарисовать
# Простейший способ вывода графика
plt.plot( X, Y ) # первый - массив Х, второй - массив Y. Они должны быть одинаковыми.
# При необходимости, здесь можно указать, каким цветом нужно рисовать их линии.
# 'b-' - 'b' - синяя, '-' - сплошная
# ф-я plot возвращает нам объект типа line2d
print(plt.plot(X, Z, 'r-')) # используем красную сплошную линию
# [<matplotlib.lines.Line2D object at 0x0970F4C0>] - тип
# Выводим на экран график
print(plt.show()) # получаем два графика на одних осях
# На вид график странный.
# Давайте посмотрим просто график sin(X)
S = np.sin(X)
plt.plot(X,S, 'g-')
plt.show()
# Здесь мы до числа pi не доехали. Синус меняет знак, когда х = pi. Тогда график норм.
# Вот так можно рисовать графики в простейшем случае.
# 1:22:58
| ru | 0.976292 | # pyplot - ключевой модуль библиотеки matplotlib # Рисуем ф-ю y = x**2 * e**(-)x**2 # Создаем равномерно распределенное множество # [0. 0.003 0.006 ... 2.994 2.997 3. ] # Возведем 'x' в квадрат # [0.000000e+00 9.000000e-06 3.600000e-05 ... 8.964036e+00 8.982009e+00 # 9.000000e+00] # Операция происходит над каждым элементом массива в отдельности # Все операции над массивами в NumPy совершаются с каждым членом отдельно. # Если нужно иное, то на это есть соответствующие ф-и # Дальше вычисляем ф-ю # Формулу применяем прямо над массивом целиком. # Соответствующие циклы будут запрятаны внутрь бибилотеку numpy # Посмотрим чему равен Y # [0.0000000e+00 8.9999194e-06 3.5998706e-05 ... 1.1467591e-03 1.1285910e-03 # 1.1106882e-03] # Видим, что сначала У растет, потом падает. # Выведем еще одну ф-ю Z = sin(x) / e**x # Выводим Z # [0.0000000e+00 8.9999194e-06 3.5998706e-05 ... 1.1467591e-03 1.1285910e-03 1.1106882e-03] # Дальше попробуем ее нарисовать # Простейший способ вывода графика # первый - массив Х, второй - массив Y. Они должны быть одинаковыми. # При необходимости, здесь можно указать, каким цветом нужно рисовать их линии. # 'b-' - 'b' - синяя, '-' - сплошная # ф-я plot возвращает нам объект типа line2d # используем красную сплошную линию # [<matplotlib.lines.Line2D object at 0x0970F4C0>] - тип # Выводим на экран график # получаем два графика на одних осях # На вид график странный. # Давайте посмотрим просто график sin(X) # Здесь мы до числа pi не доехали. Синус меняет знак, когда х = pi. Тогда график норм. # Вот так можно рисовать графики в простейшем случае. # 1:22:58 | 3.722407 | 4 |
examples/osrt_python/tvm_dlr/dlr_inference_example.py | LaudateCorpus1/edgeai-tidl-tools | 15 | 6631169 | <reponame>LaudateCorpus1/edgeai-tidl-tools
import time
import platform
import os
def load_labels():
with open('../../../test_data/labels.txt', 'r') as f:
return [line.strip() for line in f.readlines()]
if platform.machine() == 'aarch64':
numImages = 100
else :
numImages = 3
# preprocessing / postprocessing for tflite model
def preprocess_for_tflite_inceptionnetv3(image_path):
import cv2
import numpy as np
# read the image using openCV
img = cv2.imread(image_path)
# convert to RGB
img = img[:,:,::-1]
# This TFLite model is trained using 299x299 images.
# The general rule of thumb for classification models
# is to scale the input image while preserving
# the original aspect ratio, so we scale the short edge
# to 299 pixels, and then
# center-crop the scaled image to 224x224
orig_height, orig_width, _ = img.shape
short_edge = min(img.shape[:2])
new_height = (orig_height * 299) // short_edge
new_width = (orig_width * 299) // short_edge
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
startx = new_width//2 - (299//2)
starty = new_height//2 - (299//2)
img = img[starty:starty+299,startx:startx+299]
# apply scaling and mean subtraction.
# if your model is built with an input
# normalization layer, then you might
# need to skip this
img = img.astype('float32')
for mean, scale, ch in zip([128, 128, 128], [0.0078125, 0.0078125, 0.0078125], range(img.shape[2])):
img[:,:,ch] = ((img[:,:,ch] - mean) * scale)
# convert HWC to NHWC
img = np.expand_dims(img, axis=0)
return img
def postprocess_for_tflite_inceptionnetv3(res):
return res[0].flatten()[1:]
# preprocessing / postprocessing for onnx model
def preprocess_for_onnx_mobilenetv2(image_path):
import cv2
import numpy as np
# read the image using openCV
img = cv2.imread(image_path)
# convert to RGB
img = img[:,:,::-1]
# Most of the onnx models are trained using
# 224x224 images. The general rule of thumb
# is to scale the input image while preserving
# the original aspect ratio so that the
# short edge is 256 pixels, and then
# center-crop the scaled image to 224x224
orig_height, orig_width, _ = img.shape
short_edge = min(img.shape[:2])
new_height = (orig_height * 256) // short_edge
new_width = (orig_width * 256) // short_edge
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
startx = new_width//2 - (224//2)
starty = new_height//2 - (224//2)
img = img[starty:starty+224,startx:startx+224]
# apply scaling and mean subtraction.
# if your model is built with an input
# normalization layer, then you might
# need to skip this
img = img.astype('float32')
for mean, scale, ch in zip([123.675, 116.28, 103.53], [0.017125, 0.017507, 0.017429], range(img.shape[2])):
img[:,:,ch] = ((img.astype('float32')[:,:,ch] - mean) * scale)
# convert HWC to NCHW
img = np.expand_dims(np.transpose(img, (2,0,1)),axis=0)
return img
def postprocess_for_onnx_mobilenetv2(res):
return res[0].flatten()
def model_create_and_run(model_dir,
model_input_name,
preprocess_func,
postprocess_func, mIdx):
from dlr import DLRModel
import numpy
print(f'\n\nRunning Inference on Model - {model_dir}\n')
model = DLRModel(model_dir, 'cpu')
test_files = ['../../../test_data/airshow.jpg']
proc_time = 0.0
for i in range(numImages):
img_path = test_files[i%len(test_files)]
img = preprocess_func(img_path)
start_time = time.time()
res = model.run({model_input_name : img})
stop_time = time.time()
proc_time += (stop_time - start_time)*1000
print(f'\n Processing time in ms : {proc_time/numImages:10.1f}\n')
res = postprocess_func(res)
numpy.savetxt(os.path.join(model_dir,"output.txt"), res)
#get TOP-5, TOP-1 results
classes = res.argsort()[-5:][::-1]
imagenet_class_names = load_labels()
names = [imagenet_class_names[x+1].replace(",", "/") for x in classes]
print(f'results for {img_path}:')
for idx, (id, name) in enumerate(zip(classes, names)):
print(f'[{idx}] {id:03d}, {name}')
log = f'\n \nCompleted_Model : {mIdx+1:5d}, Name : {os.path.basename(model_dir):50s}, Total time : {proc_time/numImages:10.2f}, Offload Time : {proc_time/numImages:10.2f} , DDR RW MBs : 0, Output File : output.txt\n \n ' #{classes} \n \n'
print(log)
model_output_directory = '../../../model-artifacts/dlr/tflite_inceptionnetv3'
if platform.machine() == 'aarch64':
model_output_directory = model_output_directory+'_device'
model_create_and_run(model_output_directory, 'input',
preprocess_for_tflite_inceptionnetv3,
postprocess_for_tflite_inceptionnetv3, 0)
model_output_directory = '../../../model-artifacts/dlr/onnx_mobilenetv2'
if platform.machine() == 'aarch64':
model_output_directory = model_output_directory+'_device'
model_create_and_run('../../../model-artifacts/dlr/onnx_mobilenetv2', 'input.1',
preprocess_for_onnx_mobilenetv2,
postprocess_for_onnx_mobilenetv2, 1)
| import time
import platform
import os
def load_labels():
with open('../../../test_data/labels.txt', 'r') as f:
return [line.strip() for line in f.readlines()]
if platform.machine() == 'aarch64':
numImages = 100
else :
numImages = 3
# preprocessing / postprocessing for tflite model
def preprocess_for_tflite_inceptionnetv3(image_path):
import cv2
import numpy as np
# read the image using openCV
img = cv2.imread(image_path)
# convert to RGB
img = img[:,:,::-1]
# This TFLite model is trained using 299x299 images.
# The general rule of thumb for classification models
# is to scale the input image while preserving
# the original aspect ratio, so we scale the short edge
# to 299 pixels, and then
# center-crop the scaled image to 224x224
orig_height, orig_width, _ = img.shape
short_edge = min(img.shape[:2])
new_height = (orig_height * 299) // short_edge
new_width = (orig_width * 299) // short_edge
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
startx = new_width//2 - (299//2)
starty = new_height//2 - (299//2)
img = img[starty:starty+299,startx:startx+299]
# apply scaling and mean subtraction.
# if your model is built with an input
# normalization layer, then you might
# need to skip this
img = img.astype('float32')
for mean, scale, ch in zip([128, 128, 128], [0.0078125, 0.0078125, 0.0078125], range(img.shape[2])):
img[:,:,ch] = ((img[:,:,ch] - mean) * scale)
# convert HWC to NHWC
img = np.expand_dims(img, axis=0)
return img
def postprocess_for_tflite_inceptionnetv3(res):
return res[0].flatten()[1:]
# preprocessing / postprocessing for onnx model
def preprocess_for_onnx_mobilenetv2(image_path):
import cv2
import numpy as np
# read the image using openCV
img = cv2.imread(image_path)
# convert to RGB
img = img[:,:,::-1]
# Most of the onnx models are trained using
# 224x224 images. The general rule of thumb
# is to scale the input image while preserving
# the original aspect ratio so that the
# short edge is 256 pixels, and then
# center-crop the scaled image to 224x224
orig_height, orig_width, _ = img.shape
short_edge = min(img.shape[:2])
new_height = (orig_height * 256) // short_edge
new_width = (orig_width * 256) // short_edge
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
startx = new_width//2 - (224//2)
starty = new_height//2 - (224//2)
img = img[starty:starty+224,startx:startx+224]
# apply scaling and mean subtraction.
# if your model is built with an input
# normalization layer, then you might
# need to skip this
img = img.astype('float32')
for mean, scale, ch in zip([123.675, 116.28, 103.53], [0.017125, 0.017507, 0.017429], range(img.shape[2])):
img[:,:,ch] = ((img.astype('float32')[:,:,ch] - mean) * scale)
# convert HWC to NCHW
img = np.expand_dims(np.transpose(img, (2,0,1)),axis=0)
return img
def postprocess_for_onnx_mobilenetv2(res):
return res[0].flatten()
def model_create_and_run(model_dir,
model_input_name,
preprocess_func,
postprocess_func, mIdx):
from dlr import DLRModel
import numpy
print(f'\n\nRunning Inference on Model - {model_dir}\n')
model = DLRModel(model_dir, 'cpu')
test_files = ['../../../test_data/airshow.jpg']
proc_time = 0.0
for i in range(numImages):
img_path = test_files[i%len(test_files)]
img = preprocess_func(img_path)
start_time = time.time()
res = model.run({model_input_name : img})
stop_time = time.time()
proc_time += (stop_time - start_time)*1000
print(f'\n Processing time in ms : {proc_time/numImages:10.1f}\n')
res = postprocess_func(res)
numpy.savetxt(os.path.join(model_dir,"output.txt"), res)
#get TOP-5, TOP-1 results
classes = res.argsort()[-5:][::-1]
imagenet_class_names = load_labels()
names = [imagenet_class_names[x+1].replace(",", "/") for x in classes]
print(f'results for {img_path}:')
for idx, (id, name) in enumerate(zip(classes, names)):
print(f'[{idx}] {id:03d}, {name}')
log = f'\n \nCompleted_Model : {mIdx+1:5d}, Name : {os.path.basename(model_dir):50s}, Total time : {proc_time/numImages:10.2f}, Offload Time : {proc_time/numImages:10.2f} , DDR RW MBs : 0, Output File : output.txt\n \n ' #{classes} \n \n'
print(log)
model_output_directory = '../../../model-artifacts/dlr/tflite_inceptionnetv3'
if platform.machine() == 'aarch64':
model_output_directory = model_output_directory+'_device'
model_create_and_run(model_output_directory, 'input',
preprocess_for_tflite_inceptionnetv3,
postprocess_for_tflite_inceptionnetv3, 0)
model_output_directory = '../../../model-artifacts/dlr/onnx_mobilenetv2'
if platform.machine() == 'aarch64':
model_output_directory = model_output_directory+'_device'
model_create_and_run('../../../model-artifacts/dlr/onnx_mobilenetv2', 'input.1',
preprocess_for_onnx_mobilenetv2,
postprocess_for_onnx_mobilenetv2, 1) | en | 0.797437 | # preprocessing / postprocessing for tflite model # read the image using openCV # convert to RGB # This TFLite model is trained using 299x299 images. # The general rule of thumb for classification models # is to scale the input image while preserving # the original aspect ratio, so we scale the short edge # to 299 pixels, and then # center-crop the scaled image to 224x224 # apply scaling and mean subtraction. # if your model is built with an input # normalization layer, then you might # need to skip this # convert HWC to NHWC # preprocessing / postprocessing for onnx model # read the image using openCV # convert to RGB # Most of the onnx models are trained using # 224x224 images. The general rule of thumb # is to scale the input image while preserving # the original aspect ratio so that the # short edge is 256 pixels, and then # center-crop the scaled image to 224x224 # apply scaling and mean subtraction. # if your model is built with an input # normalization layer, then you might # need to skip this # convert HWC to NCHW #get TOP-5, TOP-1 results #{classes} \n \n' | 2.940656 | 3 |
RemoteNAO-client-host/nao_remotenao/scripts/teleop_rn.py | anoxil/RemoteNAO | 0 | 6631170 | <reponame>anoxil/RemoteNAO<filename>RemoteNAO-client-host/nao_remotenao/scripts/teleop_rn.py<gh_stars>0
#!/usr/bin/env python
import rospy, subprocess
from socketIO_client_nexus import SocketIO
from geometry_msgs.msg import Twist, Vector3
socketIO = SocketIO('https://remote-nao.herokuapp.com')
linear_x = 0
angular_z = 0
def changeMovement(*args):
"""Function which modifies the linear and angular velocity of the robot"""
movement = args[0]
global linear_x
global angular_z
if (movement == "stop"):
linear_x = 0
angular_z = 0
elif (movement == "forward"):
if (linear_x >= 1):
print("Impossible d'avancer plus.")
return
linear_x = linear_x + 0.2
elif (movement == "backward"):
if (linear_x <= -1):
print("Impossible de reculer plus.")
return
linear_x = linear_x - 0.2
elif (movement == "left"):
if (angular_z >= 1):
print("Impossible de gaucher plus.")
return
angular_z = angular_z + 0.2
elif (movement == "right"):
if (angular_z <= -1):
print("Impossible de droiter plus.")
return
angular_z = angular_z - 0.2
else:
print("Instruction has not been understood.")
linear = Vector3()
linear.x = linear_x
linear.y = 0
linear.z = 0
angular = Vector3()
angular.x = 0
angular.y = 0
angular.z = angular_z
instruction = Twist(linear, angular)
pub.publish(instruction)
def teleopRN():
global pub
pub = rospy.Publisher("cmd_vel", Twist, queue_size=(10))
rospy.init_node('teleopRN', anonymous=True)
print("Publishing teleoperation through node " + rospy.get_name() + " ...")
rate = rospy.Rate(10)
socketIO.on("movement_instruction", changeMovement)
socketIO.wait()
"""
while not rospy.is_shutdown():
print("xxxxxx")
rate.sleep()"""
if __name__ == '__main__':
try:
teleopRN()
except rospy.ROSInterruptException:
pass
| #!/usr/bin/env python
import rospy, subprocess
from socketIO_client_nexus import SocketIO
from geometry_msgs.msg import Twist, Vector3
socketIO = SocketIO('https://remote-nao.herokuapp.com')
linear_x = 0
angular_z = 0
def changeMovement(*args):
"""Function which modifies the linear and angular velocity of the robot"""
movement = args[0]
global linear_x
global angular_z
if (movement == "stop"):
linear_x = 0
angular_z = 0
elif (movement == "forward"):
if (linear_x >= 1):
print("Impossible d'avancer plus.")
return
linear_x = linear_x + 0.2
elif (movement == "backward"):
if (linear_x <= -1):
print("Impossible de reculer plus.")
return
linear_x = linear_x - 0.2
elif (movement == "left"):
if (angular_z >= 1):
print("Impossible de gaucher plus.")
return
angular_z = angular_z + 0.2
elif (movement == "right"):
if (angular_z <= -1):
print("Impossible de droiter plus.")
return
angular_z = angular_z - 0.2
else:
print("Instruction has not been understood.")
linear = Vector3()
linear.x = linear_x
linear.y = 0
linear.z = 0
angular = Vector3()
angular.x = 0
angular.y = 0
angular.z = angular_z
instruction = Twist(linear, angular)
pub.publish(instruction)
def teleopRN():
global pub
pub = rospy.Publisher("cmd_vel", Twist, queue_size=(10))
rospy.init_node('teleopRN', anonymous=True)
print("Publishing teleoperation through node " + rospy.get_name() + " ...")
rate = rospy.Rate(10)
socketIO.on("movement_instruction", changeMovement)
socketIO.wait()
"""
while not rospy.is_shutdown():
print("xxxxxx")
rate.sleep()"""
if __name__ == '__main__':
try:
teleopRN()
except rospy.ROSInterruptException:
pass | en | 0.631247 | #!/usr/bin/env python Function which modifies the linear and angular velocity of the robot while not rospy.is_shutdown(): print("xxxxxx") rate.sleep() | 2.518913 | 3 |
mycroft/api/__init__.py | sotirisspyrou/mycroft-core | 1 | 6631171 | <reponame>sotirisspyrou/mycroft-core
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from copy import copy
import requests
from requests import HTTPError
from mycroft.configuration import Configuration
from mycroft.configuration.config import DEFAULT_CONFIG, SYSTEM_CONFIG, \
USER_CONFIG
from mycroft.identity import IdentityManager
from mycroft.version import VersionManager
from mycroft.util import get_arch
# python 2/3 compatibility
from future.utils import iteritems
_paired_cache = False
class Api(object):
""" Generic object to wrap web APIs """
def __init__(self, path):
self.path = path
# Load the config, skipping the REMOTE_CONFIG since we are
# getting the info needed to get to it!
config = Configuration.get([DEFAULT_CONFIG,
SYSTEM_CONFIG,
USER_CONFIG],
cache=False)
config_server = config.get("server")
self.url = config_server.get("url")
self.version = config_server.get("version")
self.identity = IdentityManager.get()
def request(self, params):
self.check_token()
self.build_path(params)
self.old_params = copy(params)
return self.send(params)
def check_token(self):
if self.identity.refresh and self.identity.is_expired():
self.identity = IdentityManager.load()
if self.identity.is_expired():
self.refresh_token()
def refresh_token(self):
data = self.send({
"path": "auth/token",
"headers": {
"Authorization": "Bearer " + self.identity.refresh
}
})
IdentityManager.save(data)
def send(self, params):
method = params.get("method", "GET")
headers = self.build_headers(params)
data = self.build_data(params)
json = self.build_json(params)
query = self.build_query(params)
url = self.build_url(params)
response = requests.request(method, url, headers=headers, params=query,
data=data, json=json, timeout=(3.05, 15))
return self.get_response(response)
def get_response(self, response):
data = self.get_data(response)
if 200 <= response.status_code < 300:
return data
elif response.status_code == 401 \
and not response.url.endswith("auth/token"):
self.refresh_token()
return self.send(self.old_params)
raise HTTPError(data, response=response)
def get_data(self, response):
try:
return response.json()
except:
return response.text
def build_headers(self, params):
headers = params.get("headers", {})
self.add_content_type(headers)
self.add_authorization(headers)
params["headers"] = headers
return headers
def add_content_type(self, headers):
if not headers.__contains__("Content-Type"):
headers["Content-Type"] = "application/json"
def add_authorization(self, headers):
if not headers.__contains__("Authorization"):
headers["Authorization"] = "Bearer " + self.identity.access
def build_data(self, params):
return params.get("data")
def build_json(self, params):
json = params.get("json")
if json and params["headers"]["Content-Type"] == "application/json":
for k, v in iteritems(json):
if v == "":
json[k] = None
params["json"] = json
return json
def build_query(self, params):
return params.get("query")
def build_path(self, params):
path = params.get("path", "")
params["path"] = self.path + path
return params["path"]
def build_url(self, params):
path = params.get("path", "")
version = params.get("version", self.version)
return self.url + "/" + version + "/" + path
class DeviceApi(Api):
""" Web API wrapper for obtaining device-level information """
def __init__(self):
super(DeviceApi, self).__init__("device")
def get_code(self, state):
IdentityManager.update()
return self.request({
"path": "/code?state=" + state
})
def activate(self, state, token):
version = VersionManager.get()
platform = "unknown"
platform_build = ""
# load just the local configs to get platform info
config = Configuration.get([SYSTEM_CONFIG,
USER_CONFIG],
cache=False)
if "enclosure" in config:
platform = config.get("enclosure").get("platform", "unknown")
platform_build = config.get("enclosure").get("platform_build", "")
return self.request({
"method": "POST",
"path": "/activate",
"json": {"state": state,
"token": token,
"coreVersion": version.get("coreVersion"),
"platform": platform,
"platform_build": platform_build,
"enclosureVersion": version.get("enclosureVersion")}
})
def update_version(self):
version = VersionManager.get()
platform = "unknown"
platform_build = ""
# load just the local configs to get platform info
config = Configuration.get([SYSTEM_CONFIG,
USER_CONFIG],
cache=False)
if "enclosure" in config:
platform = config.get("enclosure").get("platform", "unknown")
platform_build = config.get("enclosure").get("platform_build", "")
return self.request({
"method": "PATCH",
"path": "/" + self.identity.uuid,
"json": {"coreVersion": version.get("coreVersion"),
"platform": platform,
"platform_build": platform_build,
"enclosureVersion": version.get("enclosureVersion")}
})
def send_email(self, title, body, sender):
return self.request({
"method": "PUT",
"path": "/" + self.identity.uuid + "/message",
"json": {"title": title, "body": body, "sender": sender}
})
def report_metric(self, name, data):
return self.request({
"method": "POST",
"path": "/" + self.identity.uuid + "/metric/" + name,
"json": data
})
def get(self):
""" Retrieve all device information from the web backend """
return self.request({
"path": "/" + self.identity.uuid
})
def get_settings(self):
""" Retrieve device settings information from the web backend
Returns:
str: JSON string with user configuration information.
"""
return self.request({
"path": "/" + self.identity.uuid + "/setting"
})
def get_location(self):
""" Retrieve device location information from the web backend
Returns:
str: JSON string with user location.
"""
return self.request({
"path": "/" + self.identity.uuid + "/location"
})
def get_subscription(self):
"""
Get information about type of subscrition this unit is connected
to.
Returns: dictionary with subscription information
"""
return self.request({
'path': '/' + self.identity.uuid + '/subscription'})
@property
def is_subscriber(self):
"""
status of subscription. True if device is connected to a paying
subscriber.
"""
try:
return self.get_subscription().get('@type') != 'free'
except:
# If can't retrieve, assume not paired and not a subscriber yet
return False
def get_subscriber_voice_url(self, voice=None):
self.check_token()
archs = {'x86_64': 'x86_64', 'armv7l': 'arm', 'aarch64': 'arm'}
arch = archs.get(get_arch())
if arch:
path = '/' + self.identity.uuid + '/voice?arch=' + arch
return self.request({'path': path})['link']
def find(self):
""" Deprecated, see get_location() """
# TODO: Eliminate ASAP, for backwards compatibility only
return self.get()
def find_setting(self):
""" Deprecated, see get_settings() """
# TODO: Eliminate ASAP, for backwards compatibility only
return self.get_settings()
def find_location(self):
""" Deprecated, see get_location() """
# TODO: Eliminate ASAP, for backwards compatibility only
return self.get_location()
def get_oauth_token(self, dev_cred):
"""
Get Oauth token for dev_credential dev_cred.
Argument:
dev_cred: development credentials identifier
Returns:
json string containing token and additional information
"""
return self.request({
"method": "GET",
"path": "/" + self.identity.uuid + "/token/" + str(dev_cred)
})
class STTApi(Api):
""" Web API wrapper for performing Speech to Text (STT) """
def __init__(self, path):
super(STTApi, self).__init__(path)
def stt(self, audio, language, limit):
""" Web API wrapper for performing Speech to Text (STT)
Args:
audio (bytes): The recorded audio, as in a FLAC file
language (str): A BCP-47 language code, e.g. "en-US"
limit (int): Maximum minutes to transcribe(?)
Returns:
str: JSON structure with transcription results
"""
return self.request({
"method": "POST",
"headers": {"Content-Type": "audio/x-flac"},
"query": {"lang": language, "limit": limit},
"data": audio
})
def has_been_paired():
""" Determine if this device has ever been paired with a web backend
Returns:
bool: True if ever paired with backend (not factory reset)
"""
# This forces a load from the identity file in case the pairing state
# has recently changed
id = IdentityManager.load()
return id.uuid is not None and id.uuid != ""
def is_paired():
""" Determine if this device is actively paired with a web backend
Determines if the installation of Mycroft has been paired by the user
with the backend system, and if that pairing is still active.
Returns:
bool: True if paired with backend
"""
global _paired_cache
if _paired_cache:
# NOTE: This assumes once paired, the unit remains paired. So
# un-pairing must restart the system (or clear this value).
# The Mark 1 does perform a restart on RESET.
return True
try:
api = DeviceApi()
device = api.get()
_paired_cache = api.identity.uuid is not None and \
api.identity.uuid != ""
return _paired_cache
except:
return False
| # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from copy import copy
import requests
from requests import HTTPError
from mycroft.configuration import Configuration
from mycroft.configuration.config import DEFAULT_CONFIG, SYSTEM_CONFIG, \
USER_CONFIG
from mycroft.identity import IdentityManager
from mycroft.version import VersionManager
from mycroft.util import get_arch
# python 2/3 compatibility
from future.utils import iteritems
_paired_cache = False
class Api(object):
""" Generic object to wrap web APIs """
def __init__(self, path):
self.path = path
# Load the config, skipping the REMOTE_CONFIG since we are
# getting the info needed to get to it!
config = Configuration.get([DEFAULT_CONFIG,
SYSTEM_CONFIG,
USER_CONFIG],
cache=False)
config_server = config.get("server")
self.url = config_server.get("url")
self.version = config_server.get("version")
self.identity = IdentityManager.get()
def request(self, params):
self.check_token()
self.build_path(params)
self.old_params = copy(params)
return self.send(params)
def check_token(self):
if self.identity.refresh and self.identity.is_expired():
self.identity = IdentityManager.load()
if self.identity.is_expired():
self.refresh_token()
def refresh_token(self):
data = self.send({
"path": "auth/token",
"headers": {
"Authorization": "Bearer " + self.identity.refresh
}
})
IdentityManager.save(data)
def send(self, params):
method = params.get("method", "GET")
headers = self.build_headers(params)
data = self.build_data(params)
json = self.build_json(params)
query = self.build_query(params)
url = self.build_url(params)
response = requests.request(method, url, headers=headers, params=query,
data=data, json=json, timeout=(3.05, 15))
return self.get_response(response)
def get_response(self, response):
data = self.get_data(response)
if 200 <= response.status_code < 300:
return data
elif response.status_code == 401 \
and not response.url.endswith("auth/token"):
self.refresh_token()
return self.send(self.old_params)
raise HTTPError(data, response=response)
def get_data(self, response):
try:
return response.json()
except:
return response.text
def build_headers(self, params):
headers = params.get("headers", {})
self.add_content_type(headers)
self.add_authorization(headers)
params["headers"] = headers
return headers
def add_content_type(self, headers):
if not headers.__contains__("Content-Type"):
headers["Content-Type"] = "application/json"
def add_authorization(self, headers):
if not headers.__contains__("Authorization"):
headers["Authorization"] = "Bearer " + self.identity.access
def build_data(self, params):
return params.get("data")
def build_json(self, params):
json = params.get("json")
if json and params["headers"]["Content-Type"] == "application/json":
for k, v in iteritems(json):
if v == "":
json[k] = None
params["json"] = json
return json
def build_query(self, params):
return params.get("query")
def build_path(self, params):
path = params.get("path", "")
params["path"] = self.path + path
return params["path"]
def build_url(self, params):
path = params.get("path", "")
version = params.get("version", self.version)
return self.url + "/" + version + "/" + path
class DeviceApi(Api):
""" Web API wrapper for obtaining device-level information """
def __init__(self):
super(DeviceApi, self).__init__("device")
def get_code(self, state):
IdentityManager.update()
return self.request({
"path": "/code?state=" + state
})
def activate(self, state, token):
version = VersionManager.get()
platform = "unknown"
platform_build = ""
# load just the local configs to get platform info
config = Configuration.get([SYSTEM_CONFIG,
USER_CONFIG],
cache=False)
if "enclosure" in config:
platform = config.get("enclosure").get("platform", "unknown")
platform_build = config.get("enclosure").get("platform_build", "")
return self.request({
"method": "POST",
"path": "/activate",
"json": {"state": state,
"token": token,
"coreVersion": version.get("coreVersion"),
"platform": platform,
"platform_build": platform_build,
"enclosureVersion": version.get("enclosureVersion")}
})
def update_version(self):
version = VersionManager.get()
platform = "unknown"
platform_build = ""
# load just the local configs to get platform info
config = Configuration.get([SYSTEM_CONFIG,
USER_CONFIG],
cache=False)
if "enclosure" in config:
platform = config.get("enclosure").get("platform", "unknown")
platform_build = config.get("enclosure").get("platform_build", "")
return self.request({
"method": "PATCH",
"path": "/" + self.identity.uuid,
"json": {"coreVersion": version.get("coreVersion"),
"platform": platform,
"platform_build": platform_build,
"enclosureVersion": version.get("enclosureVersion")}
})
def send_email(self, title, body, sender):
return self.request({
"method": "PUT",
"path": "/" + self.identity.uuid + "/message",
"json": {"title": title, "body": body, "sender": sender}
})
def report_metric(self, name, data):
return self.request({
"method": "POST",
"path": "/" + self.identity.uuid + "/metric/" + name,
"json": data
})
def get(self):
""" Retrieve all device information from the web backend """
return self.request({
"path": "/" + self.identity.uuid
})
def get_settings(self):
""" Retrieve device settings information from the web backend
Returns:
str: JSON string with user configuration information.
"""
return self.request({
"path": "/" + self.identity.uuid + "/setting"
})
def get_location(self):
""" Retrieve device location information from the web backend
Returns:
str: JSON string with user location.
"""
return self.request({
"path": "/" + self.identity.uuid + "/location"
})
def get_subscription(self):
"""
Get information about type of subscrition this unit is connected
to.
Returns: dictionary with subscription information
"""
return self.request({
'path': '/' + self.identity.uuid + '/subscription'})
@property
def is_subscriber(self):
"""
status of subscription. True if device is connected to a paying
subscriber.
"""
try:
return self.get_subscription().get('@type') != 'free'
except:
# If can't retrieve, assume not paired and not a subscriber yet
return False
def get_subscriber_voice_url(self, voice=None):
self.check_token()
archs = {'x86_64': 'x86_64', 'armv7l': 'arm', 'aarch64': 'arm'}
arch = archs.get(get_arch())
if arch:
path = '/' + self.identity.uuid + '/voice?arch=' + arch
return self.request({'path': path})['link']
def find(self):
""" Deprecated, see get_location() """
# TODO: Eliminate ASAP, for backwards compatibility only
return self.get()
def find_setting(self):
""" Deprecated, see get_settings() """
# TODO: Eliminate ASAP, for backwards compatibility only
return self.get_settings()
def find_location(self):
""" Deprecated, see get_location() """
# TODO: Eliminate ASAP, for backwards compatibility only
return self.get_location()
def get_oauth_token(self, dev_cred):
"""
Get Oauth token for dev_credential dev_cred.
Argument:
dev_cred: development credentials identifier
Returns:
json string containing token and additional information
"""
return self.request({
"method": "GET",
"path": "/" + self.identity.uuid + "/token/" + str(dev_cred)
})
class STTApi(Api):
""" Web API wrapper for performing Speech to Text (STT) """
def __init__(self, path):
super(STTApi, self).__init__(path)
def stt(self, audio, language, limit):
""" Web API wrapper for performing Speech to Text (STT)
Args:
audio (bytes): The recorded audio, as in a FLAC file
language (str): A BCP-47 language code, e.g. "en-US"
limit (int): Maximum minutes to transcribe(?)
Returns:
str: JSON structure with transcription results
"""
return self.request({
"method": "POST",
"headers": {"Content-Type": "audio/x-flac"},
"query": {"lang": language, "limit": limit},
"data": audio
})
def has_been_paired():
""" Determine if this device has ever been paired with a web backend
Returns:
bool: True if ever paired with backend (not factory reset)
"""
# This forces a load from the identity file in case the pairing state
# has recently changed
id = IdentityManager.load()
return id.uuid is not None and id.uuid != ""
def is_paired():
""" Determine if this device is actively paired with a web backend
Determines if the installation of Mycroft has been paired by the user
with the backend system, and if that pairing is still active.
Returns:
bool: True if paired with backend
"""
global _paired_cache
if _paired_cache:
# NOTE: This assumes once paired, the unit remains paired. So
# un-pairing must restart the system (or clear this value).
# The Mark 1 does perform a restart on RESET.
return True
try:
api = DeviceApi()
device = api.get()
_paired_cache = api.identity.uuid is not None and \
api.identity.uuid != ""
return _paired_cache
except:
return False | en | 0.781634 | # Copyright 2017 Mycroft AI Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # python 2/3 compatibility Generic object to wrap web APIs # Load the config, skipping the REMOTE_CONFIG since we are # getting the info needed to get to it! Web API wrapper for obtaining device-level information # load just the local configs to get platform info # load just the local configs to get platform info Retrieve all device information from the web backend Retrieve device settings information from the web backend Returns: str: JSON string with user configuration information. Retrieve device location information from the web backend Returns: str: JSON string with user location. Get information about type of subscrition this unit is connected to. Returns: dictionary with subscription information status of subscription. True if device is connected to a paying subscriber. # If can't retrieve, assume not paired and not a subscriber yet Deprecated, see get_location() # TODO: Eliminate ASAP, for backwards compatibility only Deprecated, see get_settings() # TODO: Eliminate ASAP, for backwards compatibility only Deprecated, see get_location() # TODO: Eliminate ASAP, for backwards compatibility only Get Oauth token for dev_credential dev_cred. Argument: dev_cred: development credentials identifier Returns: json string containing token and additional information Web API wrapper for performing Speech to Text (STT) Web API wrapper for performing Speech to Text (STT) Args: audio (bytes): The recorded audio, as in a FLAC file language (str): A BCP-47 language code, e.g. "en-US" limit (int): Maximum minutes to transcribe(?) Returns: str: JSON structure with transcription results Determine if this device has ever been paired with a web backend Returns: bool: True if ever paired with backend (not factory reset) # This forces a load from the identity file in case the pairing state # has recently changed Determine if this device is actively paired with a web backend Determines if the installation of Mycroft has been paired by the user with the backend system, and if that pairing is still active. Returns: bool: True if paired with backend # NOTE: This assumes once paired, the unit remains paired. So # un-pairing must restart the system (or clear this value). # The Mark 1 does perform a restart on RESET. | 1.896276 | 2 |
egs/yesno/ASR/transducer/test_transducer.py | TIFOSI528/icefall | 173 | 6631172 | <reponame>TIFOSI528/icefall<filename>egs/yesno/ASR/transducer/test_transducer.py
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corp. (authors: <NAME>)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
To run this file, do:
cd icefall/egs/yesno/ASR
python ./transducer/test_transducer.py
"""
import k2
import torch
from transducer.decoder import Decoder
from transducer.encoder import Tdnn
from transducer.joiner import Joiner
from transducer.model import Transducer
def test_transducer():
# encoder params
input_dim = 10
output_dim = 20
# decoder params
vocab_size = 3
blank_id = 0
embedding_dim = 128
num_layers = 2
encoder = Tdnn(input_dim, output_dim)
decoder = Decoder(
vocab_size=vocab_size,
embedding_dim=embedding_dim,
blank_id=blank_id,
num_layers=num_layers,
hidden_dim=output_dim,
embedding_dropout=0.0,
rnn_dropout=0.0,
)
joiner = Joiner(output_dim, vocab_size)
transducer = Transducer(encoder=encoder, decoder=decoder, joiner=joiner)
y = k2.RaggedTensor([[1, 2, 1], [1, 1, 1, 2, 1]])
N = y.dim0
T = 50
x = torch.rand(N, T, input_dim)
x_lens = torch.randint(low=30, high=T, size=(N,), dtype=torch.int32)
x_lens[0] = T
loss = transducer(x, x_lens, y)
print(loss)
def main():
test_transducer()
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
# Copyright 2021 Xiaomi Corp. (authors: <NAME>)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
To run this file, do:
cd icefall/egs/yesno/ASR
python ./transducer/test_transducer.py
"""
import k2
import torch
from transducer.decoder import Decoder
from transducer.encoder import Tdnn
from transducer.joiner import Joiner
from transducer.model import Transducer
def test_transducer():
# encoder params
input_dim = 10
output_dim = 20
# decoder params
vocab_size = 3
blank_id = 0
embedding_dim = 128
num_layers = 2
encoder = Tdnn(input_dim, output_dim)
decoder = Decoder(
vocab_size=vocab_size,
embedding_dim=embedding_dim,
blank_id=blank_id,
num_layers=num_layers,
hidden_dim=output_dim,
embedding_dropout=0.0,
rnn_dropout=0.0,
)
joiner = Joiner(output_dim, vocab_size)
transducer = Transducer(encoder=encoder, decoder=decoder, joiner=joiner)
y = k2.RaggedTensor([[1, 2, 1], [1, 1, 1, 2, 1]])
N = y.dim0
T = 50
x = torch.rand(N, T, input_dim)
x_lens = torch.randint(low=30, high=T, size=(N,), dtype=torch.int32)
x_lens[0] = T
loss = transducer(x, x_lens, y)
print(loss)
def main():
test_transducer()
if __name__ == "__main__":
main() | en | 0.770807 | #!/usr/bin/env python3 # Copyright 2021 Xiaomi Corp. (authors: <NAME>) # # See ../../../../LICENSE for clarification regarding multiple authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. To run this file, do: cd icefall/egs/yesno/ASR python ./transducer/test_transducer.py # encoder params # decoder params | 2.321594 | 2 |
stubs.min/Autodesk/Revit/DB/Structure/__init___parts/RebarShapeConstraintSagittaLength.py | hdm-dt-fb/ironpython-stubs | 1 | 6631173 | <reponame>hdm-dt-fb/ironpython-stubs<filename>stubs.min/Autodesk/Revit/DB/Structure/__init___parts/RebarShapeConstraintSagittaLength.py
class RebarShapeConstraintSagittaLength(RebarShapeConstraint,IDisposable):
"""
A constraint that can be applied to a RebarShapeDefinitionByArc
and drives the height of the arc.
RebarShapeConstraintSagittaLength(paramId: ElementId)
"""
def Dispose(self):
""" Dispose(self: RebarShapeConstraint,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: RebarShapeConstraint,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,paramId):
""" __new__(cls: type,paramId: ElementId) """
pass
| class RebarShapeConstraintSagittaLength(RebarShapeConstraint,IDisposable):
"""
A constraint that can be applied to a RebarShapeDefinitionByArc
and drives the height of the arc.
RebarShapeConstraintSagittaLength(paramId: ElementId)
"""
def Dispose(self):
""" Dispose(self: RebarShapeConstraint,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: RebarShapeConstraint,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,paramId):
""" __new__(cls: type,paramId: ElementId) """
pass | en | 0.490677 | A constraint that can be applied to a RebarShapeDefinitionByArc
and drives the height of the arc.
RebarShapeConstraintSagittaLength(paramId: ElementId) Dispose(self: RebarShapeConstraint,A_0: bool) ReleaseUnmanagedResources(self: RebarShapeConstraint,disposing: bool) __enter__(self: IDisposable) -> object __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature __new__(cls: type,paramId: ElementId) | 2.089741 | 2 |
setup.py | ghl3/AlphaFour | 0 | 6631174 | <gh_stars>0
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from os import path
# io.open is needed for projects that support Python 2.7
# It ensures open() defaults to text mode with universal newlines,
# and accepts an argument to specify the text encoding
# Python 3 only projects can skip this import
from io import open
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
with open('requirements.txt', 'r') as f:
install_requires = [
s for s in [
line.strip(' \n') for line in f
] if not s.startswith('#') and s != ''
]
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
name='AlphaFour',
version='1.0.0',
description='A python library for creating and using AIs to play ConnectFour',
url='https://github.com/ghl3/AlphaFour',
author='<NAME>',
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=['alphafour'],
install_requires = install_requires,
)
| """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from os import path
# io.open is needed for projects that support Python 2.7
# It ensures open() defaults to text mode with universal newlines,
# and accepts an argument to specify the text encoding
# Python 3 only projects can skip this import
from io import open
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
with open('requirements.txt', 'r') as f:
install_requires = [
s for s in [
line.strip(' \n') for line in f
] if not s.startswith('#') and s != ''
]
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
name='AlphaFour',
version='1.0.0',
description='A python library for creating and using AIs to play ConnectFour',
url='https://github.com/ghl3/AlphaFour',
author='<NAME>',
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=['alphafour'],
install_requires = install_requires,
) | en | 0.773672 | A setuptools based setup module. See: https://packaging.python.org/en/latest/distributing.html https://github.com/pypa/sampleproject # Always prefer setuptools over distutils # io.open is needed for projects that support Python 2.7 # It ensures open() defaults to text mode with universal newlines, # and accepts an argument to specify the text encoding # Python 3 only projects can skip this import # Get the long description from the README file # Arguments marked as "Required" below must be included for upload to PyPI. # Fields marked as "Optional" may be commented out. # You can just specify package directories manually here if your project is # simple. Or you can use find_packages(). # # Alternatively, if you just want to distribute a single Python file, use # the `py_modules` argument instead as follows, which will expect a file # called `my_module.py` to exist: # # py_modules=["my_module"], # | 1.840652 | 2 |
examples/plotting/server/timeout.py | DuCorey/bokeh | 1 | 6631175 | <filename>examples/plotting/server/timeout.py
import sys
import numpy as np
from bokeh.client import push_session
from bokeh.palettes import RdYlBu3
from bokeh.plotting import figure, curdoc
N = 50
p = figure(x_range=(0, 100), y_range=(0, 100), toolbar_location=None)
p.border_fill_color = 'black'
p.background_fill_color = 'black'
p.outline_line_color = None
p.grid.grid_line_color = None
p.rect(x=50, y=50, width=80, height=80,
line_alpha=0.5, line_color="darkgrey", fill_color=None)
r = p.text(x=[], y=[], text=[], text_color=[],
text_font_size="20pt", text_baseline="middle", text_align="center")
def make_callback(i):
ds = r.data_source
def func():
if i == N-1:
ds.data['x'].append(50)
ds.data['y'].append(95)
ds.data['text'].append("DONE")
ds.data['text_color'].append("white")
else:
ds.data['x'].append(np.random.random()*70 + 15)
ds.data['y'].append(np.random.random()*70 + 15)
ds.data['text_color'].append(RdYlBu3[i%3])
ds.data['text'].append(str(i))
ds.trigger('data', ds.data, ds.data)
func.interval = i * 100
return func
callbacks = [make_callback(i) for i in range(N)]
document = curdoc()
document.add_root(p)
for callback in callbacks:
document.add_timeout_callback(callback, callback.interval)
document.add_timeout_callback(sys.exit, (N+4)*100)
if __name__ == "__main__":
print("\npress ctrl-C to exit")
session = push_session(document)
session.show()
session.loop_until_closed()
| <filename>examples/plotting/server/timeout.py
import sys
import numpy as np
from bokeh.client import push_session
from bokeh.palettes import RdYlBu3
from bokeh.plotting import figure, curdoc
N = 50
p = figure(x_range=(0, 100), y_range=(0, 100), toolbar_location=None)
p.border_fill_color = 'black'
p.background_fill_color = 'black'
p.outline_line_color = None
p.grid.grid_line_color = None
p.rect(x=50, y=50, width=80, height=80,
line_alpha=0.5, line_color="darkgrey", fill_color=None)
r = p.text(x=[], y=[], text=[], text_color=[],
text_font_size="20pt", text_baseline="middle", text_align="center")
def make_callback(i):
ds = r.data_source
def func():
if i == N-1:
ds.data['x'].append(50)
ds.data['y'].append(95)
ds.data['text'].append("DONE")
ds.data['text_color'].append("white")
else:
ds.data['x'].append(np.random.random()*70 + 15)
ds.data['y'].append(np.random.random()*70 + 15)
ds.data['text_color'].append(RdYlBu3[i%3])
ds.data['text'].append(str(i))
ds.trigger('data', ds.data, ds.data)
func.interval = i * 100
return func
callbacks = [make_callback(i) for i in range(N)]
document = curdoc()
document.add_root(p)
for callback in callbacks:
document.add_timeout_callback(callback, callback.interval)
document.add_timeout_callback(sys.exit, (N+4)*100)
if __name__ == "__main__":
print("\npress ctrl-C to exit")
session = push_session(document)
session.show()
session.loop_until_closed()
| none | 1 | 2.656828 | 3 |
|
kpc_connector_utils/connector_mssql/config.py | praiwann/kpc-connector-utils | 0 | 6631176 | <filename>kpc_connector_utils/connector_mssql/config.py
from kpc_connector_utils.pusher_s3.config import BasePutS3Config
from kpc_connector_utils.common.base64 import base64_encode as encode
import json
class ConnectorMSSql(BasePutS3Config):
def __init__(self, mssql_connector_env):
super().__init__(mssql_connector_env)
self._hostname = None
self._username = None
self._password = None
self._database = None
self._port = None
self._query_string = None
def __str__(self):
data_dict = {
'hostname': self._hostname,
'username': self._username,
'password': self._password,
'database': self._database,
'port': self._port,
'query_string': self._query_string,
'puts3_config': self.get_data_dict()
}
value = {'MSSQLConnectorEvent': data_dict}
return json.dumps(value)
def set_hostname(self, value):
self._hostname = encode(value)
return self
def set_username(self, value):
self._username = encode(value)
return self
def set_password(self, value):
self._password = encode(value)
return self
def set_database(self, value):
self._database = encode(value)
return self
def set_port(self, value):
port = value
if not isinstance(port, int):
try:
port = int(port)
except Exception:
raise ValueError('Port value should be integer')
self._port = encode(port)
return self
def set_query_string(self, value):
self._query_string = encode(value)
return self
def set_by_dict(self, config: dict):
if config.get('hostname'):
self.set_hostname(config.get('hostname'))
if config.get('username'):
self.set_username(config.get('username'))
if config.get('password'):
self.set_password(config.get('password'))
if config.get('database'):
self.set_database(config.get('database'))
if config.get('port'):
self.set_port(config.get('port'))
if config.get('query_string'):
self.set_query_string(config.get('query_string'))
if config.get('puts3_config'):
super().set_by_dict(config.get('puts3_config'))
return self
| <filename>kpc_connector_utils/connector_mssql/config.py
from kpc_connector_utils.pusher_s3.config import BasePutS3Config
from kpc_connector_utils.common.base64 import base64_encode as encode
import json
class ConnectorMSSql(BasePutS3Config):
def __init__(self, mssql_connector_env):
super().__init__(mssql_connector_env)
self._hostname = None
self._username = None
self._password = None
self._database = None
self._port = None
self._query_string = None
def __str__(self):
data_dict = {
'hostname': self._hostname,
'username': self._username,
'password': self._password,
'database': self._database,
'port': self._port,
'query_string': self._query_string,
'puts3_config': self.get_data_dict()
}
value = {'MSSQLConnectorEvent': data_dict}
return json.dumps(value)
def set_hostname(self, value):
self._hostname = encode(value)
return self
def set_username(self, value):
self._username = encode(value)
return self
def set_password(self, value):
self._password = encode(value)
return self
def set_database(self, value):
self._database = encode(value)
return self
def set_port(self, value):
port = value
if not isinstance(port, int):
try:
port = int(port)
except Exception:
raise ValueError('Port value should be integer')
self._port = encode(port)
return self
def set_query_string(self, value):
self._query_string = encode(value)
return self
def set_by_dict(self, config: dict):
if config.get('hostname'):
self.set_hostname(config.get('hostname'))
if config.get('username'):
self.set_username(config.get('username'))
if config.get('password'):
self.set_password(config.get('password'))
if config.get('database'):
self.set_database(config.get('database'))
if config.get('port'):
self.set_port(config.get('port'))
if config.get('query_string'):
self.set_query_string(config.get('query_string'))
if config.get('puts3_config'):
super().set_by_dict(config.get('puts3_config'))
return self
| none | 1 | 2.114216 | 2 |
|
demo_pandas/dataframe_basic_7_selected_simple.py | caserwin/daily-learning-python | 1 | 6631177 | # -*- coding: utf-8 -*-
# @Time : 2018/10/3 下午2:36
# @Author : yidxue
import pandas as pd
from common.util_function import *
data = [[1, 2, 3, 4],
[4, 5, 6, 8],
[2, 3, 5, 9]]
df = pd.DataFrame(data=data, index=['a', 'b', 'c'], columns=['A', 'B', 'C', 'D'])
print_line("[]使用示例:根据column name获取")
print_br(df['A']) # 取出A列
print_br(df[['A', 'B']]) # 取出A,B两列
print_br(df[0:2]) # 取出前2行
print_line("loc 使用示例:loc根据index name和 column name 定位元素")
print_br(df.loc[['a', 'c'], ['A', 'B']])
print_br(df.loc['a':'c', ['A', 'B']])
print_line("iloc 使用示例:iloc 根据行数和列数的下标(index)来定位元素")
print_br("选取第2行,第2列元素:\n" + str(df.iloc[1, 1]))
print_br("选取第3行:\n" + str(df.iloc[2:3]))
print_br("选取第1,2行,第1列:\n" + str(df.iloc[0:2, 0]))
print_br("选取第1,2行,第1,3列:\n" + str(df.iloc[[0, 1], [0, 2]]))
print_line("返回对应的行为True,且列为’B'的DataFrame")
mask1 = [False, True, True]
print_br(df.loc[mask1, 'B'])
mask1 = [False, True, False]
mask2 = [True, False, True, True]
print_br(df.iloc[mask1, mask2])
| # -*- coding: utf-8 -*-
# @Time : 2018/10/3 下午2:36
# @Author : yidxue
import pandas as pd
from common.util_function import *
data = [[1, 2, 3, 4],
[4, 5, 6, 8],
[2, 3, 5, 9]]
df = pd.DataFrame(data=data, index=['a', 'b', 'c'], columns=['A', 'B', 'C', 'D'])
print_line("[]使用示例:根据column name获取")
print_br(df['A']) # 取出A列
print_br(df[['A', 'B']]) # 取出A,B两列
print_br(df[0:2]) # 取出前2行
print_line("loc 使用示例:loc根据index name和 column name 定位元素")
print_br(df.loc[['a', 'c'], ['A', 'B']])
print_br(df.loc['a':'c', ['A', 'B']])
print_line("iloc 使用示例:iloc 根据行数和列数的下标(index)来定位元素")
print_br("选取第2行,第2列元素:\n" + str(df.iloc[1, 1]))
print_br("选取第3行:\n" + str(df.iloc[2:3]))
print_br("选取第1,2行,第1列:\n" + str(df.iloc[0:2, 0]))
print_br("选取第1,2行,第1,3列:\n" + str(df.iloc[[0, 1], [0, 2]]))
print_line("返回对应的行为True,且列为’B'的DataFrame")
mask1 = [False, True, True]
print_br(df.loc[mask1, 'B'])
mask1 = [False, True, False]
mask2 = [True, False, True, True]
print_br(df.iloc[mask1, mask2])
| zh | 0.467831 | # -*- coding: utf-8 -*- # @Time : 2018/10/3 下午2:36 # @Author : yidxue # 取出A列 # 取出A,B两列 # 取出前2行 | 3.580591 | 4 |
qa/web_tests/tests/keypairs/test_import_keypair.py | robertstarmer/aurora | 23 | 6631178 | <gh_stars>10-100
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
from random import randint
import unittest
from qa.web_tests import config
class TestImportKeypairs(unittest.TestCase):
def setUp(self):
self.base_url = config.base_url
self.verificationErrors = []
self.accept_next_alert = True
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(config.implicitly_wait)
def test_import_keypairs(self):
driver = self.driver
driver.maximize_window()
driver.get(self.base_url + "/")
driver.find_element_by_name("username").send_keys(config.username)
driver.find_element_by_name("password").send_keys(<PASSWORD>)
driver.find_element_by_css_selector("input.loginSubmit").click()
Move = ActionChains(driver).move_to_element(driver.find_element_by_link_text("Security"))
Move.perform()
driver.find_element_by_link_text("Keypairs").click()
driver.find_element_by_link_text("Import Keypair").click()
keypair_name = "Test_keypair_%s" % str(randint(100, 10000))
driver.find_element_by_name("name").send_keys(keypair_name)
driver.find_element_by_name("publicKey").send_keys("ssh-rsa <KEY>")
driver.find_element_by_id("submit").click()
self.assertTrue(self.is_element_present(By.XPATH, '//*[@value="%s"]'
% keypair_name))
driver.find_element_by_xpath('//*[@value="%s"]' % keypair_name).click()
driver.find_element_by_xpath('//*[@id="delete"]/span/div').click()
driver.find_element_by_xpath('//*[@id="btn-confirm"]/span').click()
self.assertFalse(self.is_element_present(By.XPATH, '//*[@value="%s"]'
% keypair_name))
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def tearDown(self):
self.driver.save_screenshot(config.screen_path)
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
from random import randint
import unittest
from qa.web_tests import config
class TestImportKeypairs(unittest.TestCase):
def setUp(self):
self.base_url = config.base_url
self.verificationErrors = []
self.accept_next_alert = True
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(config.implicitly_wait)
def test_import_keypairs(self):
driver = self.driver
driver.maximize_window()
driver.get(self.base_url + "/")
driver.find_element_by_name("username").send_keys(config.username)
driver.find_element_by_name("password").send_keys(<PASSWORD>)
driver.find_element_by_css_selector("input.loginSubmit").click()
Move = ActionChains(driver).move_to_element(driver.find_element_by_link_text("Security"))
Move.perform()
driver.find_element_by_link_text("Keypairs").click()
driver.find_element_by_link_text("Import Keypair").click()
keypair_name = "Test_keypair_%s" % str(randint(100, 10000))
driver.find_element_by_name("name").send_keys(keypair_name)
driver.find_element_by_name("publicKey").send_keys("ssh-rsa <KEY>")
driver.find_element_by_id("submit").click()
self.assertTrue(self.is_element_present(By.XPATH, '//*[@value="%s"]'
% keypair_name))
driver.find_element_by_xpath('//*[@value="%s"]' % keypair_name).click()
driver.find_element_by_xpath('//*[@id="delete"]/span/div').click()
driver.find_element_by_xpath('//*[@id="btn-confirm"]/span').click()
self.assertFalse(self.is_element_present(By.XPATH, '//*[@value="%s"]'
% keypair_name))
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def tearDown(self):
self.driver.save_screenshot(config.screen_path)
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main() | none | 1 | 2.515914 | 3 |
|
frappe/website/doctype/blogger/test_blogger.py | pawaranand/phr_frappe | 1 | 6631179 | <filename>frappe/website/doctype/blogger/test_blogger.py
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import frappe
test_records = frappe.get_test_records('Blogger') | <filename>frappe/website/doctype/blogger/test_blogger.py
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import frappe
test_records = frappe.get_test_records('Blogger') | en | 0.587297 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt | 1.10133 | 1 |
AdminServer/tests/test_service_manager.py | whoarethebritons/appscale | 0 | 6631180 | <reponame>whoarethebritons/appscale<filename>AdminServer/tests/test_service_manager.py
from collections import namedtuple
from mock import MagicMock, mock_open, patch
from tornado.gen import Future
from tornado.httpclient import AsyncHTTPClient
from tornado.testing import AsyncTestCase, gen_test
from appscale.admin.service_manager import (
DatastoreServer, gen, options, psutil, ServerStates, ServiceManager,
ServiceTypes)
FakeHTTPResponse = namedtuple('Response', ['code'])
class FakeProcess(object):
pass
# Skip sleep calls.
patchers = []
def setUpModule():
patcher = patch.object(gen, 'sleep')
patchers.append(patcher)
sleep_response = Future()
sleep_response.set_result(None)
sleep_mock = patcher.start()
sleep_mock.return_value = sleep_response
def tearDownModule():
for patcher in patchers:
patcher.stop()
class TestDatastoreServer(AsyncTestCase):
@gen_test
def test_start(self):
client = AsyncHTTPClient()
response = Future()
response.set_result(FakeHTTPResponse(200))
client.fetch = MagicMock(return_value=response)
fake_process = FakeProcess()
fake_process.is_running = MagicMock(return_value=True)
fake_process.pid = 10000
server = DatastoreServer(4000, client, False)
# Test that a Datastore server process is started.
with patch('appscale.admin.service_manager.open', mock_open(),
create=True):
with patch.object(psutil, 'Popen',
return_value=fake_process) as mock_popen:
yield server.start()
cmd = ['appscale-datastore', '--type', 'cassandra', '--port', '4000']
self.assertEqual(mock_popen.call_count, 1)
self.assertEqual(mock_popen.call_args[0][0], cmd)
def test_from_pid(self):
client = AsyncHTTPClient()
fake_process = FakeProcess()
cmd = ['appscale-datastore', '--type', 'cassandra', '--port', '4000']
fake_process.cmdline = MagicMock(return_value=cmd)
# Test that the server attributes are parsed correctly.
with patch.object(psutil, 'Process', return_value=fake_process):
server = DatastoreServer.from_pid(10000, client)
self.assertEqual(server.port, 4000)
self.assertEqual(server.state, ServerStates.RUNNING)
self.assertEqual(server.type, ServiceTypes.DATASTORE)
class TestServiceManager(AsyncTestCase):
@gen_test
def test_get_state(self):
# Test that server objects are created with the correct PIDs.
with patch('appscale.admin.service_manager.pids_in_slice',
return_value=[10000, 10001]):
with patch.object(DatastoreServer, 'from_pid') as mock_from_pid:
ServiceManager.get_state()
self.assertEqual(mock_from_pid.call_count, 2)
for index, expected_pid in enumerate((10000, 10001)):
self.assertEqual(mock_from_pid.call_args_list[index][0][0], expected_pid)
@gen_test
def test_schedule_service(self):
zk_client = None
if not hasattr(options, 'private_ip'):
options.define('private_ip', '192.168.33.10')
manager = ServiceManager(zk_client)
# Test that servers are started when scheduled.
manager._schedule_service(ServiceTypes.DATASTORE,
{'count': 2, 'verbose': False})
self.assertEqual(len(manager.state), 2)
| from collections import namedtuple
from mock import MagicMock, mock_open, patch
from tornado.gen import Future
from tornado.httpclient import AsyncHTTPClient
from tornado.testing import AsyncTestCase, gen_test
from appscale.admin.service_manager import (
DatastoreServer, gen, options, psutil, ServerStates, ServiceManager,
ServiceTypes)
FakeHTTPResponse = namedtuple('Response', ['code'])
class FakeProcess(object):
pass
# Skip sleep calls.
patchers = []
def setUpModule():
patcher = patch.object(gen, 'sleep')
patchers.append(patcher)
sleep_response = Future()
sleep_response.set_result(None)
sleep_mock = patcher.start()
sleep_mock.return_value = sleep_response
def tearDownModule():
for patcher in patchers:
patcher.stop()
class TestDatastoreServer(AsyncTestCase):
@gen_test
def test_start(self):
client = AsyncHTTPClient()
response = Future()
response.set_result(FakeHTTPResponse(200))
client.fetch = MagicMock(return_value=response)
fake_process = FakeProcess()
fake_process.is_running = MagicMock(return_value=True)
fake_process.pid = 10000
server = DatastoreServer(4000, client, False)
# Test that a Datastore server process is started.
with patch('appscale.admin.service_manager.open', mock_open(),
create=True):
with patch.object(psutil, 'Popen',
return_value=fake_process) as mock_popen:
yield server.start()
cmd = ['appscale-datastore', '--type', 'cassandra', '--port', '4000']
self.assertEqual(mock_popen.call_count, 1)
self.assertEqual(mock_popen.call_args[0][0], cmd)
def test_from_pid(self):
client = AsyncHTTPClient()
fake_process = FakeProcess()
cmd = ['appscale-datastore', '--type', 'cassandra', '--port', '4000']
fake_process.cmdline = MagicMock(return_value=cmd)
# Test that the server attributes are parsed correctly.
with patch.object(psutil, 'Process', return_value=fake_process):
server = DatastoreServer.from_pid(10000, client)
self.assertEqual(server.port, 4000)
self.assertEqual(server.state, ServerStates.RUNNING)
self.assertEqual(server.type, ServiceTypes.DATASTORE)
class TestServiceManager(AsyncTestCase):
@gen_test
def test_get_state(self):
# Test that server objects are created with the correct PIDs.
with patch('appscale.admin.service_manager.pids_in_slice',
return_value=[10000, 10001]):
with patch.object(DatastoreServer, 'from_pid') as mock_from_pid:
ServiceManager.get_state()
self.assertEqual(mock_from_pid.call_count, 2)
for index, expected_pid in enumerate((10000, 10001)):
self.assertEqual(mock_from_pid.call_args_list[index][0][0], expected_pid)
@gen_test
def test_schedule_service(self):
zk_client = None
if not hasattr(options, 'private_ip'):
options.define('private_ip', '192.168.33.10')
manager = ServiceManager(zk_client)
# Test that servers are started when scheduled.
manager._schedule_service(ServiceTypes.DATASTORE,
{'count': 2, 'verbose': False})
self.assertEqual(len(manager.state), 2) | en | 0.922708 | # Skip sleep calls. # Test that a Datastore server process is started. # Test that the server attributes are parsed correctly. # Test that server objects are created with the correct PIDs. # Test that servers are started when scheduled. | 2.134691 | 2 |
skilltree/models.py | ulope/eve_skill_tree | 1 | 6631181 | from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Model, CharField, ForeignKey, BooleanField, TextField, PositiveSmallIntegerField, ManyToManyField
from django_extensions.db.models import TimeStampedModel
from zope.interface.exceptions import DoesNotImplement
class SkillGroup(TimeStampedModel):
name = CharField("Name", max_length=300)
class Meta(object):
verbose_name = "Skill Group"
verbose_name_plural = "Skill Groups"
ordering = ("name",)
def __unicode__(self):
return self.name
class Skill(TimeStampedModel):
name = CharField("Name", max_length=300)
description = TextField("Description")
rank = PositiveSmallIntegerField("Rank")
published = BooleanField("Published")
group = ForeignKey(SkillGroup, related_name="skills")
required_skills = ManyToManyField("SkillLevel", verbose_name="Required Skills", symmetrical=False, related_name="enables_skills")
class Meta(object):
verbose_name = "Skill"
verbose_name_plural = "Skills"
ordering = ("name", )
def __unicode__(self):
return self.name
def all_required_skills(self):
required_skills = set()
open_list = set((self,))
seen_list = set()
while open_list:
current_skill = open_list.pop()
for req in current_skill.required_skills.all():
required_skills.add(req)
if req.skill not in seen_list:
open_list.add(req.skill)
seen_list.add(current_skill)
return required_skills
class SkillLevel(TimeStampedModel):
skill = ForeignKey(Skill, related_name="levels")
level = PositiveSmallIntegerField("Level")
class Meta(object):
verbose_name = "Skill Level"
verbose_name_plural = "Skill Levels"
ordering = ("skill__name", "level", )
def __unicode__(self):
return u"%s Level %d" % (self.skill.name, self.level)
def previous(self):
if self.level > 1:
try:
return SkillLevel.objects.get(level=self.level - 1, skill=self.skill)
except ObjectDoesNotExist:
return None
| from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Model, CharField, ForeignKey, BooleanField, TextField, PositiveSmallIntegerField, ManyToManyField
from django_extensions.db.models import TimeStampedModel
from zope.interface.exceptions import DoesNotImplement
class SkillGroup(TimeStampedModel):
name = CharField("Name", max_length=300)
class Meta(object):
verbose_name = "Skill Group"
verbose_name_plural = "Skill Groups"
ordering = ("name",)
def __unicode__(self):
return self.name
class Skill(TimeStampedModel):
name = CharField("Name", max_length=300)
description = TextField("Description")
rank = PositiveSmallIntegerField("Rank")
published = BooleanField("Published")
group = ForeignKey(SkillGroup, related_name="skills")
required_skills = ManyToManyField("SkillLevel", verbose_name="Required Skills", symmetrical=False, related_name="enables_skills")
class Meta(object):
verbose_name = "Skill"
verbose_name_plural = "Skills"
ordering = ("name", )
def __unicode__(self):
return self.name
def all_required_skills(self):
required_skills = set()
open_list = set((self,))
seen_list = set()
while open_list:
current_skill = open_list.pop()
for req in current_skill.required_skills.all():
required_skills.add(req)
if req.skill not in seen_list:
open_list.add(req.skill)
seen_list.add(current_skill)
return required_skills
class SkillLevel(TimeStampedModel):
skill = ForeignKey(Skill, related_name="levels")
level = PositiveSmallIntegerField("Level")
class Meta(object):
verbose_name = "Skill Level"
verbose_name_plural = "Skill Levels"
ordering = ("skill__name", "level", )
def __unicode__(self):
return u"%s Level %d" % (self.skill.name, self.level)
def previous(self):
if self.level > 1:
try:
return SkillLevel.objects.get(level=self.level - 1, skill=self.skill)
except ObjectDoesNotExist:
return None
| none | 1 | 2.214718 | 2 |
|
tests/test_mul.py | drLis/SolidityHomomorphicHiding | 0 | 6631182 | <reponame>drLis/SolidityHomomorphicHiding
import pytest
import brownie
def test_mul(test):
e1 = test.e(1)
k = 777
e2 = test.e(1 * 777)
prod = test.mul(e1[0], e1[1], k)
assert e2[0] == prod[0] and e2[1] == prod[1] | import pytest
import brownie
def test_mul(test):
e1 = test.e(1)
k = 777
e2 = test.e(1 * 777)
prod = test.mul(e1[0], e1[1], k)
assert e2[0] == prod[0] and e2[1] == prod[1] | none | 1 | 2.402582 | 2 |
|
examples/create_scripts/interface-e.py | bendichter/api-python | 32 | 6631183 | <reponame>bendichter/api-python
#!/usr/bin/python
import sys
from nwb import nwb_file
from nwb import nwb_utils as utils
"""
Example extending the format: creating a new Interface.
This example uses two extensions defined in director "extensions"
e-interface.py - defines Interface extension
e-timeseries.py - defines a new timeseries type (MyNewTimeSeries)
The convention of having "e-" in front of the extension (and "-e" at the
end of the create script name) is only used for these examples. Any name for the
create script and extension(s) can be used as long as the actual name of the
extension(s) are referenced by the create script and passed as parameters to
nwb_validate.py when validating NWB files created using one or more extensions.
"""
# create a new NWB file
OUTPUT_DIR = "../created_nwb_files/"
file_name = __file__[0:-3] + ".nwb"
settings = {}
settings["file_name"] = OUTPUT_DIR + file_name
settings["identifier"] = utils.create_identifier("MyNewInterface example")
settings["mode"] = "w"
settings["start_time"] = "2016-04-07T03:16:03.604121"
settings["description"] = "Test file demonstrating using a new Interface type using an extension"
# specify the extensions, two are used.
settings['extensions'] = ["extensions/e-timeseries.py", "extensions/e-interface.py"]
f = nwb_file.open(**settings)
########################################################################
# create a module for the interface
mod = f.make_group("<Module>", "my_module")
# create the interface inside the module
ig = mod.make_group("MyNewInterface", attrs={"source": "source of data for MyNewInterface"})
# set attribute and dataset in interface
ig.set_attr("foo", "MyNewInterface - foo attribute")
ig.set_dataset("bar", [1, 2, 3, 4, 5])
# Make some sample data for the MyNewTimeseries
data = [[1.2, 1.3, 1.4], [2.2, 2.3, 2.4], [3.2, 3.3, 3.4], [4.2, 4.3, 4.4], [5.2, 5.3, 5.4]]
times = [0.1, 0.2, 0.3, 0.4, 0.5]
# create the MyNewtimeseries inside the interface
nts = ig.make_group("<new_ts>", "my_new_ts", attrs={"source": "source of data for my_new_ts"})
nts.set_dataset("data", data, attrs={"conversion": 1.0, "resolution": 0.001, "unit": "--unit goes here--"})
nts.set_dataset("timestamps", times)
# specify metadata that is part of MyNewTimeSeries type
nts.set_attr("foo", "This added to attribute 'foo'")
nts.set_dataset("bar", [2, 4, 5, 6, 7])
# All done. Close the file
f.close()
| #!/usr/bin/python
import sys
from nwb import nwb_file
from nwb import nwb_utils as utils
"""
Example extending the format: creating a new Interface.
This example uses two extensions defined in director "extensions"
e-interface.py - defines Interface extension
e-timeseries.py - defines a new timeseries type (MyNewTimeSeries)
The convention of having "e-" in front of the extension (and "-e" at the
end of the create script name) is only used for these examples. Any name for the
create script and extension(s) can be used as long as the actual name of the
extension(s) are referenced by the create script and passed as parameters to
nwb_validate.py when validating NWB files created using one or more extensions.
"""
# create a new NWB file
OUTPUT_DIR = "../created_nwb_files/"
file_name = __file__[0:-3] + ".nwb"
settings = {}
settings["file_name"] = OUTPUT_DIR + file_name
settings["identifier"] = utils.create_identifier("MyNewInterface example")
settings["mode"] = "w"
settings["start_time"] = "2016-04-07T03:16:03.604121"
settings["description"] = "Test file demonstrating using a new Interface type using an extension"
# specify the extensions, two are used.
settings['extensions'] = ["extensions/e-timeseries.py", "extensions/e-interface.py"]
f = nwb_file.open(**settings)
########################################################################
# create a module for the interface
mod = f.make_group("<Module>", "my_module")
# create the interface inside the module
ig = mod.make_group("MyNewInterface", attrs={"source": "source of data for MyNewInterface"})
# set attribute and dataset in interface
ig.set_attr("foo", "MyNewInterface - foo attribute")
ig.set_dataset("bar", [1, 2, 3, 4, 5])
# Make some sample data for the MyNewTimeseries
data = [[1.2, 1.3, 1.4], [2.2, 2.3, 2.4], [3.2, 3.3, 3.4], [4.2, 4.3, 4.4], [5.2, 5.3, 5.4]]
times = [0.1, 0.2, 0.3, 0.4, 0.5]
# create the MyNewtimeseries inside the interface
nts = ig.make_group("<new_ts>", "my_new_ts", attrs={"source": "source of data for my_new_ts"})
nts.set_dataset("data", data, attrs={"conversion": 1.0, "resolution": 0.001, "unit": "--unit goes here--"})
nts.set_dataset("timestamps", times)
# specify metadata that is part of MyNewTimeSeries type
nts.set_attr("foo", "This added to attribute 'foo'")
nts.set_dataset("bar", [2, 4, 5, 6, 7])
# All done. Close the file
f.close() | en | 0.661074 | #!/usr/bin/python Example extending the format: creating a new Interface. This example uses two extensions defined in director "extensions" e-interface.py - defines Interface extension e-timeseries.py - defines a new timeseries type (MyNewTimeSeries) The convention of having "e-" in front of the extension (and "-e" at the end of the create script name) is only used for these examples. Any name for the create script and extension(s) can be used as long as the actual name of the extension(s) are referenced by the create script and passed as parameters to nwb_validate.py when validating NWB files created using one or more extensions. # create a new NWB file # specify the extensions, two are used. ######################################################################## # create a module for the interface # create the interface inside the module # set attribute and dataset in interface # Make some sample data for the MyNewTimeseries # create the MyNewtimeseries inside the interface # specify metadata that is part of MyNewTimeSeries type # All done. Close the file | 2.403157 | 2 |
backend/parser/parser_listener.py | anglebinbin/Barista-tool | 1 | 6631184 |
class ParserListener:
def update(self, phase, row):
""" Called when the parser has parsed a new record.
"""
pass
def handle(self, event, message, groups):
""" Called when the parser has parsed a registered event.
"""
pass
def registerKey(self, phase, key):
""" Called when a new key was found in the log data.
"""
pass
def parsingFinished(self):
""" Called when the parser has processed all available streams.
"""
pass
|
class ParserListener:
def update(self, phase, row):
""" Called when the parser has parsed a new record.
"""
pass
def handle(self, event, message, groups):
""" Called when the parser has parsed a registered event.
"""
pass
def registerKey(self, phase, key):
""" Called when a new key was found in the log data.
"""
pass
def parsingFinished(self):
""" Called when the parser has processed all available streams.
"""
pass
| en | 0.932925 | Called when the parser has parsed a new record. Called when the parser has parsed a registered event. Called when a new key was found in the log data. Called when the parser has processed all available streams. | 2.407749 | 2 |
utils/logger.py | sungyihsun/meta-transfer-learning | 250 | 6631185 | <reponame>sungyihsun/meta-transfer-learning<filename>utils/logger.py
import os, sys
import logging
class Logger(object):
def __init__(self, log_name):
self.terminal = sys.stdout
if not os.path.exists(os.path.dirname(log_name)):
os.makedirs(os.path.dirname(log_name))
self.log = open(log_name, "w")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
#this flush method is needed for python 3 compatibility.
#this handles the flush command by doing nothing.
#you might want to specify some extra behavior here.
pass | import os, sys
import logging
class Logger(object):
def __init__(self, log_name):
self.terminal = sys.stdout
if not os.path.exists(os.path.dirname(log_name)):
os.makedirs(os.path.dirname(log_name))
self.log = open(log_name, "w")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
#this flush method is needed for python 3 compatibility.
#this handles the flush command by doing nothing.
#you might want to specify some extra behavior here.
pass | en | 0.922697 | #this flush method is needed for python 3 compatibility. #this handles the flush command by doing nothing. #you might want to specify some extra behavior here. | 3.170983 | 3 |
data/train/python/db857ba8f6183651782147c38c1d8b7685958619roles.py | harshp8l/deep-learning-lang-detection | 84 | 6631186 | class ACObject(object): # Access Control Object
def __init__(self, name):
self.name = name
self.label = name.replace('_', ' ').capitalize()
self.description = self.label + ' ' + self.__class__.__name__.capitalize()
def __str__(self):
return "<%s: %s>" % (self.__class__.__name__, self.label)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.label)
class Permission(ACObject): pass
admin_application = Permission('admin')
access_business = Permission('access_business')
manage_own_profile = Permission('manage_own_profile')
manage_invoices = Permission('manage_invoices')
manage_biz_profile = Permission('manage_biz_profile')
apply_membership = Permission('apply_membership')
view_own_invoices = Permission('view_own_invoices')
search_biz = Permission('search_biz')
approve_membership = Permission('approve_membership')
invite_member = Permission('invite_member')
activate_member = Permission('activate_member')
manage_team = Permission('manage_team')
class Role(ACObject): pass
admin = Role('admin')
admin.permissions = [admin_application]
registered = Role("registered")
registered.permissions = [
apply_membership,
]
member = Role("member")
member.permissions = [
access_business,
manage_own_profile,
search_biz,
view_own_invoices,
#access_own_info,
]
host = Role("host")
host.permissions = [
approve_membership,
invite_member,
manage_biz_profile,
activate_member,
manage_invoices,
manage_team,
]
director = Role("director")
director.permissions = [
approve_membership,
invite_member,
manage_biz_profile,
activate_member,
manage_invoices,
manage_team,
]
ordered_roles = ("admin", "director", "host", "member")
all_roles = dict((v.name, v) for v in globals().values() if isinstance(v, Role))
all_permissions = dict((v.name, v) for v in globals().values() if isinstance(v, Permission))
#TODO : Add additional roles like accountant, event manager when they are
# defined above
team_roles = [host, director]
| class ACObject(object): # Access Control Object
def __init__(self, name):
self.name = name
self.label = name.replace('_', ' ').capitalize()
self.description = self.label + ' ' + self.__class__.__name__.capitalize()
def __str__(self):
return "<%s: %s>" % (self.__class__.__name__, self.label)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.label)
class Permission(ACObject): pass
admin_application = Permission('admin')
access_business = Permission('access_business')
manage_own_profile = Permission('manage_own_profile')
manage_invoices = Permission('manage_invoices')
manage_biz_profile = Permission('manage_biz_profile')
apply_membership = Permission('apply_membership')
view_own_invoices = Permission('view_own_invoices')
search_biz = Permission('search_biz')
approve_membership = Permission('approve_membership')
invite_member = Permission('invite_member')
activate_member = Permission('activate_member')
manage_team = Permission('manage_team')
class Role(ACObject): pass
admin = Role('admin')
admin.permissions = [admin_application]
registered = Role("registered")
registered.permissions = [
apply_membership,
]
member = Role("member")
member.permissions = [
access_business,
manage_own_profile,
search_biz,
view_own_invoices,
#access_own_info,
]
host = Role("host")
host.permissions = [
approve_membership,
invite_member,
manage_biz_profile,
activate_member,
manage_invoices,
manage_team,
]
director = Role("director")
director.permissions = [
approve_membership,
invite_member,
manage_biz_profile,
activate_member,
manage_invoices,
manage_team,
]
ordered_roles = ("admin", "director", "host", "member")
all_roles = dict((v.name, v) for v in globals().values() if isinstance(v, Role))
all_permissions = dict((v.name, v) for v in globals().values() if isinstance(v, Permission))
#TODO : Add additional roles like accountant, event manager when they are
# defined above
team_roles = [host, director]
| en | 0.809133 | # Access Control Object #access_own_info, #TODO : Add additional roles like accountant, event manager when they are # defined above | 2.700788 | 3 |
shop/views/completeorder.py | odrolliv13/Hex-Photos | 0 | 6631187 | from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.db.models import Q
from manager import models as pmod
from . import templater
import decimal, datetime
from django.core.mail import send_mail
import requests
from base_app import payment as gateway
def process_request(request):
print("Below are the paramaters!")
print(request.urlparams[0])
if request.method == 'POST':
billing = request.POST.get('billing')
shipping = request.POST.get('shipping')
shippingoptions = request.POST.get('shippingoptions')
selleroption = request.POST.get('sellers')
redirectParams = ""
# This checks the options for the order. If not the page goes back to checkout and will display the errors
if billing is None:
redirectParams += "b"
if shipping is None:
redirectParams += "s"
if shippingoptions is None:
redirectParams += "o"
if len(redirectParams) > 0:
redirect = '/shop/checkout/' + redirectParams
return HttpResponseRedirect(redirect)
billing = billing.replace("}","")
shipping = shipping.replace("}","")
shippingoptions = shippingoptions.replace("}","")
user = pmod.User.objects.get(username = request.user.username)
userbilling = pmod.UserBilling.objects.get(user = user, id = billing)
usershipping = pmod.UserShipping.objects.get(user = user, id = shipping)
useroption = pmod.ShippingOption.objects.get(id = shippingoptions)
if selleroption is None:
seller = pmod.User.objects.get(username = "<EMAIL>")
sellercommission = False
else:
selleroption = selleroption.replace("}","")
seller = pmod.User.objects.get(id = selleroption)
sellercommission = True
transactiontype = pmod.TransactionType.objects.get(transactiontype = "OnlineSale")
# This gets the taxrate for the customer's state
try:
taxRate = pmod.TaxRates.objects.get(state = shipping.state)
except:
taxRate = pmod.TaxRates.objects.get(state = "default")
cart = request.session.get('cart', {})
Objects = {}
for key in cart:
object = pmod.CatalogProduct.objects.get(id=key)
Objects[object] = cart[key]
subtotal = 0
for key in Objects:
subtotal += key.price * Objects[key]
# Here the payment is checked
payment_passed = gateway.payment()
payment_passed.setVariables(userbilling, subtotal, taxRate.taxRate)
if payment_passed.check() == False:
redirectParams = "c"
redirect = '/shop/checkout/' + redirectParams
return HttpResponseRedirect(redirect)
transaction = pmod.Transaction()
transaction.buyer = user
transaction.seller = seller
transaction.transactiontype = transactiontype
transaction.shipping = usershipping
transaction.billing = userbilling
transaction.shippingoption = useroption
transaction.subtotal = subtotal + useroption.price
transaction.taxAmount = subtotal * taxRate.taxRate
transaction.date = datetime.datetime.now()
transaction.commissionNeeded = sellercommission
transaction.packed = False
transaction.save()
cost = 0
for key in Objects:
pack = pmod.TransactionToPack()
pack.transaction = transaction
pack.catalog_product = key
pack.quantity = Objects[key]
pack.packed = False
pack.save()
# The journal entry is created here
journalentry = pmod.JournalEntry()
journalentry.transaction = transaction
journalentry.note = "Online Sale to " + user.username + "for $" + str(transaction.subtotal + transaction.taxAmount)
journalentry.save()
cashledger = pmod.Subledger.objects.get(type = "Cash")
saleledger = pmod.Subledger.objects.get(type = "Sales")
cash = pmod.DebitCredit()
cash.journalentry = journalentry
cash.subledger = cashledger
cash.isDebit = True
cash.amount = transaction.subtotal + transaction.taxAmount
cash.save()
sale = pmod.DebitCredit()
sale.journalentry = journalentry
sale.subledger = saleledger
sale.isDebit = False
sale.amount = transaction.subtotal + transaction.taxAmount
sale.save()
cart = {}
request.session['cart'] = cart
totalcharged = cash.amount
items = ""
for key in Objects:
items += str(key.name) + ": Quantity " + str(Objects[key]) + "\r\n"
message = user.first_name + " " + user.last_name + ":\r\n" + "We have received a payment of $" + str(cash.amount) + " for the following items:\r\n" + items + "\r\nThank you!\r\n\r\nHexPhotos"
send_mail('HexPhotos Payment Received', message, '<EMAIL>', [user.email], fail_silently=False)
EndDate = datetime.date.today() + datetime.timedelta(days=useroption.daystoarrive)
tvars = {
'Objects': Objects,
'userbilling': userbilling,
'usershipping': usershipping,
'useroption': useroption,
'totalcharged': totalcharged,
'EndDate': EndDate,
}
return templater.render_to_response(request, 'completeorder.html', tvars)
| from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.db.models import Q
from manager import models as pmod
from . import templater
import decimal, datetime
from django.core.mail import send_mail
import requests
from base_app import payment as gateway
def process_request(request):
print("Below are the paramaters!")
print(request.urlparams[0])
if request.method == 'POST':
billing = request.POST.get('billing')
shipping = request.POST.get('shipping')
shippingoptions = request.POST.get('shippingoptions')
selleroption = request.POST.get('sellers')
redirectParams = ""
# This checks the options for the order. If not the page goes back to checkout and will display the errors
if billing is None:
redirectParams += "b"
if shipping is None:
redirectParams += "s"
if shippingoptions is None:
redirectParams += "o"
if len(redirectParams) > 0:
redirect = '/shop/checkout/' + redirectParams
return HttpResponseRedirect(redirect)
billing = billing.replace("}","")
shipping = shipping.replace("}","")
shippingoptions = shippingoptions.replace("}","")
user = pmod.User.objects.get(username = request.user.username)
userbilling = pmod.UserBilling.objects.get(user = user, id = billing)
usershipping = pmod.UserShipping.objects.get(user = user, id = shipping)
useroption = pmod.ShippingOption.objects.get(id = shippingoptions)
if selleroption is None:
seller = pmod.User.objects.get(username = "<EMAIL>")
sellercommission = False
else:
selleroption = selleroption.replace("}","")
seller = pmod.User.objects.get(id = selleroption)
sellercommission = True
transactiontype = pmod.TransactionType.objects.get(transactiontype = "OnlineSale")
# This gets the taxrate for the customer's state
try:
taxRate = pmod.TaxRates.objects.get(state = shipping.state)
except:
taxRate = pmod.TaxRates.objects.get(state = "default")
cart = request.session.get('cart', {})
Objects = {}
for key in cart:
object = pmod.CatalogProduct.objects.get(id=key)
Objects[object] = cart[key]
subtotal = 0
for key in Objects:
subtotal += key.price * Objects[key]
# Here the payment is checked
payment_passed = gateway.payment()
payment_passed.setVariables(userbilling, subtotal, taxRate.taxRate)
if payment_passed.check() == False:
redirectParams = "c"
redirect = '/shop/checkout/' + redirectParams
return HttpResponseRedirect(redirect)
transaction = pmod.Transaction()
transaction.buyer = user
transaction.seller = seller
transaction.transactiontype = transactiontype
transaction.shipping = usershipping
transaction.billing = userbilling
transaction.shippingoption = useroption
transaction.subtotal = subtotal + useroption.price
transaction.taxAmount = subtotal * taxRate.taxRate
transaction.date = datetime.datetime.now()
transaction.commissionNeeded = sellercommission
transaction.packed = False
transaction.save()
cost = 0
for key in Objects:
pack = pmod.TransactionToPack()
pack.transaction = transaction
pack.catalog_product = key
pack.quantity = Objects[key]
pack.packed = False
pack.save()
# The journal entry is created here
journalentry = pmod.JournalEntry()
journalentry.transaction = transaction
journalentry.note = "Online Sale to " + user.username + "for $" + str(transaction.subtotal + transaction.taxAmount)
journalentry.save()
cashledger = pmod.Subledger.objects.get(type = "Cash")
saleledger = pmod.Subledger.objects.get(type = "Sales")
cash = pmod.DebitCredit()
cash.journalentry = journalentry
cash.subledger = cashledger
cash.isDebit = True
cash.amount = transaction.subtotal + transaction.taxAmount
cash.save()
sale = pmod.DebitCredit()
sale.journalentry = journalentry
sale.subledger = saleledger
sale.isDebit = False
sale.amount = transaction.subtotal + transaction.taxAmount
sale.save()
cart = {}
request.session['cart'] = cart
totalcharged = cash.amount
items = ""
for key in Objects:
items += str(key.name) + ": Quantity " + str(Objects[key]) + "\r\n"
message = user.first_name + " " + user.last_name + ":\r\n" + "We have received a payment of $" + str(cash.amount) + " for the following items:\r\n" + items + "\r\nThank you!\r\n\r\nHexPhotos"
send_mail('HexPhotos Payment Received', message, '<EMAIL>', [user.email], fail_silently=False)
EndDate = datetime.date.today() + datetime.timedelta(days=useroption.daystoarrive)
tvars = {
'Objects': Objects,
'userbilling': userbilling,
'usershipping': usershipping,
'useroption': useroption,
'totalcharged': totalcharged,
'EndDate': EndDate,
}
return templater.render_to_response(request, 'completeorder.html', tvars)
| en | 0.818201 | # This checks the options for the order. If not the page goes back to checkout and will display the errors # This gets the taxrate for the customer's state # Here the payment is checked # The journal entry is created here | 2.137847 | 2 |
unidad5/c_extensions/setup.py | leliel12/diseno_sci_sfw | 23 | 6631188 | <reponame>leliel12/diseno_sci_sfw
from distutils.core import setup, Extension
def main():
setup(name="fputs",
version="1.0.0",
description="Python interface for the fputs C library function",
author="<your name>",
author_email="<EMAIL>",
ext_modules=[Extension("fputs", ["fputsmodule.c"])])
if __name__ == "__main__":
main()
| from distutils.core import setup, Extension
def main():
setup(name="fputs",
version="1.0.0",
description="Python interface for the fputs C library function",
author="<your name>",
author_email="<EMAIL>",
ext_modules=[Extension("fputs", ["fputsmodule.c"])])
if __name__ == "__main__":
main() | none | 1 | 1.410617 | 1 |
|
resolwe_bio/tests/processes/test_reads_filtering.py | HudoGriz/resolwe-bio | 0 | 6631189 | # pylint: disable=missing-docstring
from resolwe.flow.models import Data
from resolwe.test import tag_process
from resolwe_bio.utils.test import BioProcessTestCase
class ReadsFilteringProcessorTestCase(BioProcessTestCase):
@tag_process('trimmomatic-single')
def test_trimmomatic_single(self):
with self.preparation_stage():
reads = self.prepare_reads()
adapters = self.run_process('upload-fasta-nucl', {'src': 'bbduk_adapters.fasta'})
inputs = {
'reads': reads.pk,
'illuminaclip': {
'adapters': adapters.pk,
'seed_mismatches': 2,
'simple_clip_threshold': 10,
},
'maxinfo': {
'target_length': 10,
'strictness': 0.6,
},
'slidingwindow': {
'window_size': 4,
'required_quality': 15,
},
'trim_bases': {
'leading': 20,
'trailing': 20,
'crop': 40,
'headcrop': 3,
},
'reads_filtering': {
'minlen': 22,
'average_quality': 10,
}}
filtered_reads = self.run_processor('trimmomatic-single', inputs)
self.assertFiles(filtered_reads, 'fastq', ['filtered_reads_trimmomatic_single.fastq.gz'], compression='gzip')
del filtered_reads.output['fastqc_url'][0]['total_size'] # Non-deterministic output.
self.assertFields(filtered_reads, "fastqc_url", [{'file': 'fastqc/reads_fastqc/fastqc_report.html',
'refs': ['fastqc/reads_fastqc']}])
@tag_process('trimmomatic-paired')
def test_trimmomatic_paired(self):
with self.preparation_stage():
inputs = {
'src1': ['rRNA_forw.fastq.gz'],
'src2': ['rRNA_rew.fastq.gz']}
reads = self.run_processor('upload-fastq-paired', inputs)
inputs = {'reads': reads.pk,
'trim_bases': {'trailing': 3}}
filtered_reads = self.run_processor('trimmomatic-paired', inputs)
self.assertFiles(filtered_reads, 'fastq', ['filtered_reads_trimmomatic_paired_fw.fastq.gz'],
compression='gzip')
self.assertFiles(filtered_reads, 'fastq2', ['filtered_reads_trimmomatic_paired_rw.fastq.gz'],
compression='gzip')
del filtered_reads.output['fastqc_url'][0]['total_size'] # Non-deterministic output.
self.assertFields(filtered_reads, "fastqc_url", [{'file': 'fastqc/rRNA_forw_fastqc/fastqc_report.html',
'refs': ['fastqc/rRNA_forw_fastqc']}])
del filtered_reads.output['fastqc_url2'][0]['total_size'] # Non-deterministic output.
self.assertFields(filtered_reads, "fastqc_url2", [{'file': 'fastqc/rRNA_rew_fastqc/fastqc_report.html',
'refs': ['fastqc/rRNA_rew_fastqc']}])
@tag_process('cutadapt-single')
def test_cutadapt_single(self):
with self.preparation_stage():
reads = self.prepare_reads(['cutadapt single.fastq.gz', 'cutadapt_single1.fastq.gz'])
primers_up = self.prepare_adapters('5_prime_adapter.fasta.gz')
primers_down = self.prepare_adapters('3_prime_adapter.fasta.gz')
inputs = {
'reads': reads.id,
'adapters': {
'polya_tail': 5,
'down_primers_seq': ['AGCACCT'],
'up_primers_seq': ['AGCTAAA'],
},
'modify_reads': {
'nextseq_trim': 5,
},
'filtering': {
'minlen': 10,
}
}
cutadapt_single = self.run_process('cutadapt-single', inputs)
self.assertFiles(cutadapt_single, 'fastq', ['cutadapt_single_trimmed.fastq.gz'],
compression='gzip')
inputs = {
'reads': reads.id,
'adapters': {
'polya_tail': 5,
'down_primers_file': primers_down.id,
'up_primers_file': primers_up.id,
},
'filtering': {
'minlen': 10,
}
}
cutadapt_single = self.run_process('cutadapt-single', inputs)
self.assertFiles(cutadapt_single, 'fastq', ['cutadapt_single_trimmed.fastq.gz'],
compression='gzip')
@tag_process('cutadapt-paired')
def test_cutadapt_paired(self):
with self.preparation_stage():
reads = self.prepare_paired_reads(mate1=['cutadapt mate1.fastq.gz'],
mate2=['cutadapt mate2.fastq.gz'])
primers_up = self.prepare_adapters('5_prime_adapter.fasta.gz')
primers_down = self.prepare_adapters('3_prime_adapter.fasta.gz')
inputs = {
'reads': reads.id,
'adapters': {
'mate1_3prime_seq': ['AGCACCT'],
'mate2_3prime_seq': ['AGCACCT'],
'mate1_5prime_seq': ['AGCTAAA'],
'mate2_5prime_seq': ['AGCTAAA'],
},
'filtering': {
'minlen': 10,
},
}
cutadapt_paired = self.run_process('cutadapt-paired', inputs)
self.assertFiles(cutadapt_paired, 'fastq', ['cutadapt_paired_forward_trimmed.fastq.gz'],
compression='gzip')
self.assertFiles(cutadapt_paired, 'fastq2', ['cutadapt_paired_reverse_trimmed.fastq.gz'],
compression='gzip')
inputs = {
'reads': reads.id,
'adapters': {
'mate1_3prime_file': primers_down.id,
'mate2_3prime_file': primers_down.id,
'mate1_5prime_file': primers_up.id,
'mate2_5prime_file': primers_up.id,
},
'filtering': {
'minlen': 10,
}
}
cutadapt_paired = self.run_process('cutadapt-paired', inputs)
self.assertFiles(cutadapt_paired, 'fastq', ['cutadapt_paired_forward_trimmed.fastq.gz'],
compression='gzip')
self.assertFiles(cutadapt_paired, 'fastq2', ['cutadapt_paired_reverse_trimmed.fastq.gz'],
compression='gzip')
@tag_process('cutadapt-custom-single', 'cutadapt-custom-paired')
def test_cutadapt_custom(self):
with self.preparation_stage():
reads_single = self.prepare_reads(['cutadapt single.fastq.gz', 'cutadapt_single1.fastq.gz'])
reads_paired = self.prepare_paired_reads(
mate1=['cutadapt mate1.fastq.gz'],
mate2=['cutadapt mate2.fastq.gz']
)
inputs_single = {'reads': reads_single.id}
inputs_paired = {'reads': reads_paired.id}
cutadapt_single = self.run_process('cutadapt-custom-single', inputs_single)
cutadapt_paired = self.run_process('cutadapt-custom-paired', inputs_paired)
self.assertFiles(cutadapt_single, 'fastq', ['cutadapt_custom_single_trimmed.fastq.gz'],
compression='gzip')
self.assertFiles(cutadapt_paired, 'fastq', ['cutadapt_custom_paired_forward_trimmed.fastq.gz'],
compression='gzip')
self.assertFiles(cutadapt_paired, 'fastq2', ['cutadapt_custom_paired_reverse_trimmed.fastq.gz'],
compression='gzip')
@tag_process('cutadapt-3prime-single')
def test_cutadapt_3prime_single(self):
with self.preparation_stage():
reads = self.prepare_reads(['cutadapt single.fastq.gz', 'cutadapt_single1.fastq.gz'])
inputs = {
'reads': reads.id,
'options': {
'nextseq_trim': 5,
'min_len': 20,
'min_overlap': 20,
'times': 2,
},
}
cutadapt_single = self.run_process('cutadapt-3prime-single', inputs)
self.assertFiles(cutadapt_single, 'fastq', ['cutadapt_3prime_single_trimmed.fastq.gz'],
compression='gzip')
@tag_process('cutadapt-corall-single')
def test_cutadapt_corall_single(self):
with self.preparation_stage():
reads = self.prepare_reads(['./corall/input/corall_single.fastq.gz'])
cutadapt_single = self.run_process('cutadapt-corall-single', {'reads': reads.id})
self.assertFiles(cutadapt_single, 'fastq', ['./corall/output/single_trimmed.fastq.gz'],
compression='gzip')
@tag_process('cutadapt-corall-paired')
def test_cutadapt_corall_paired(self):
with self.preparation_stage():
reads_paired = self.prepare_paired_reads(
mate1=['./corall/input/corall_mate1.fastq.gz'],
mate2=['./corall/input/corall_mate2.fastq.gz']
)
cutadapt_paired = self.run_process('cutadapt-corall-paired', {'reads': reads_paired.id})
self.assertFiles(cutadapt_paired, 'fastq', ['./corall/output/mate1_trimmed.fastq.gz'],
compression='gzip')
self.assertFiles(cutadapt_paired, 'fastq2', ['./corall/output/mate2_trimmed.fastq.gz'],
compression='gzip')
@tag_process('bbduk-single')
def test_bbduk_single(self):
with self.preparation_stage():
reads = self.prepare_reads(['bbduk test reads.fastq.gz', 'rRNA forw.fastq.gz'])
inputs = {
'reads': reads.id,
}
filtered_reads = self.run_process('bbduk-single', inputs)
self.assertFiles(filtered_reads, 'fastq', ['bbduk_reads.fastq.gz'], compression='gzip')
del filtered_reads.output['fastqc_url'][0]['total_size'] # Non-deterministic output.
report = {
'file': 'fastqc/bbduk test reads_preprocessed_fastqc/fastqc_report.html',
'refs': [
'fastqc/bbduk test reads_preprocessed_fastqc',
],
}
self.assertFields(filtered_reads, "fastqc_url", [report])
@tag_process('bbduk-paired')
def test_bbduk_paired(self):
with self.preparation_stage():
reads_paired = self.prepare_paired_reads(['rRNA forw.fastq.gz'], ['rRNA_rew.fastq.gz'])
inputs = {
'reads': reads_paired.id,
}
filtered_reads = self.run_process('bbduk-paired', inputs)
self.assertFiles(filtered_reads, 'fastq', ['bbduk_fw_reads.fastq.gz'], compression='gzip')
self.assertFiles(filtered_reads, 'fastq2', ['bbduk_rv_reads.fastq.gz'], compression='gzip')
del filtered_reads.output['fastqc_url'][0]['total_size'] # Non-deterministic output.
report = {
'file': 'fastqc/rRNA forw_preprocessed_fastqc/fastqc_report.html',
'refs': [
'fastqc/rRNA forw_preprocessed_fastqc',
],
}
self.assertFields(filtered_reads, "fastqc_url", [report])
del filtered_reads.output['fastqc_url2'][0]['total_size'] # Non-deterministic output.
report2 = {
'file': 'fastqc/rRNA_rew_preprocessed_fastqc/fastqc_report.html',
'refs': [
'fastqc/rRNA_rew_preprocessed_fastqc',
],
}
self.assertFields(filtered_reads, "fastqc_url2", [report2])
@tag_process('bamclipper')
def test_bamclipper(self):
species = 'Homo sapiens'
build = 'fake_genome_RSEM'
align_input = './bamclipper/input/TP53.bam'
with self.preparation_stage():
bam = self.prepare_bam(
fn=align_input,
species=species,
build=build
)
inputs_bedpe = {'src': './bamclipper/input/TP53.bedpe',
'species': species, 'build': build}
bedpe = self.run_process('upload-bedpe', inputs_bedpe)
# Test if bamclipper has been skipped.
bc_skip_inputs = {'alignment': bam.id, 'skip': True}
skipped_bc = self.run_process('bamclipper', bc_skip_inputs)
self.assertFile(skipped_bc, 'bam', align_input)
bc_data = Data.objects.last()
self.assertEqual(bc_data.process_info, ['Skipping bamclipper step.'])
# Test bamclipper.
inputs_bamclipper = {'alignment': bam.id, 'bedpe': bedpe.id}
clipped = self.run_process('bamclipper', inputs_bamclipper)
self.assertFile(clipped, 'stats', './bamclipper/output/TP53.primerclipped.bam_stats.txt')
self.assertFile(clipped, 'bigwig', './bamclipper/output/TP53.primerclipped.bw')
self.assertFields(clipped, 'species', species)
self.assertFields(clipped, 'build', build)
@tag_process('markduplicates')
def test_markduplicates(self):
species = 'Homo sapiens'
build = 'custombuild'
primerclipped = './bamclipper/output/TP53.primerclipped.bam'
with self.preparation_stage():
bam = self.prepare_bam(
fn=primerclipped,
species=species,
build=build)
# Test if skipped. Input bam should always equal output bam.
md_inputs = {'bam': bam.id, 'skip': True}
skipped_md = self.run_process('markduplicates', md_inputs)
self.assertFile(skipped_md, 'bam', primerclipped)
# Test that removal of duplicates works.
md_inputs = {'bam': bam.id, 'remove_duplicates': True}
removed_md = self.run_process('markduplicates', md_inputs)
def filter_startedon(line):
return line.startswith(b'# Started on:') or line.startswith(b'# MarkDuplicates')
self.assertFileExists(removed_md, 'bam')
self.assertFileExists(removed_md, 'bai')
self.assertFile(removed_md, 'stats', './markduplicate/output/TP53.primerclipped.markduplicates.bam_stats.txt')
self.assertFile(removed_md, 'bigwig', './markduplicate/output/TP53.primerclipped.markduplicates.bw')
self.assertFile(removed_md, 'metrics_file', './markduplicate/output/TP53.primerclipped_metrics.txt',
file_filter=filter_startedon)
self.assertFields(removed_md, 'species', species)
self.assertFields(removed_md, 'build', build)
@tag_process('bqsr')
def test_bqsr(self):
species = 'Homo sapiens'
build = 'custom_build'
with self.preparation_stage():
input_genome = {
# Based on b37 genome, chromosome 19 has been cut from beginning up to position 1207173.
# This includes an exon of STK11. Cutting from the start of the chromosome was done so that
# there is no need to shift any subsequent bed and vcf files.
'src': './bqsr/input/hs_b37_chr17_upto_TP53.fasta.gz',
'species': species,
'build': build
}
input_bam = {
'src': './markduplicate/output/TP53.primerclipped.markduplicates.bam',
'species': species,
'build': build
}
ks_dbsnp = []
for i in ['./bqsr/input/dbsnp_TP53.vcf.gz']: # add more files if needed
ks_dbsnp.append(
self.run_process('upload-variants-vcf', {'src': i, 'species': species, 'build': build})
)
intervals = self.run_process('upload-bed', {
'src': './bqsr/input/TP53.bed',
'species': species,
'build': build})
bam = self.run_process('upload-bam', input_bam)
reference = self.run_process('upload-genome', input_genome)
bqsr_inputs = {
'bam': bam.id,
'reference': reference.id,
'known_sites': [i.id for i in ks_dbsnp],
'intervals': intervals.id
}
bqsr = self.run_process('bqsr', bqsr_inputs)
self.assertFileExists(bqsr, 'bam')
self.assertFileExists(bqsr, 'bai')
self.assertFile(bqsr, 'stats', './bqsr/output/TP53.primerclipped.markduplicates.bam_stats.txt')
self.assertFile(bqsr, 'bigwig', './bqsr/output/TP53.primerclipped.markduplicates.bw')
self.assertFile(bqsr, 'recal_table',
'./bqsr/output/TP53.primerclipped.markduplicates_recalibration.table')
self.assertFields(bqsr, 'species', species)
self.assertFields(bqsr, 'build', build)
# Check if read groups has successfully been added.
bqsr_inputs['read_group'] = '-LB=DAB;-PL=Illumina;-PU=barcode;-SM=sample1'
bqsr_rg = self.run_process('bqsr', bqsr_inputs)
self.assertFileExists(bqsr_rg, 'bam')
self.assertFileExists(bqsr_rg, 'bai')
bqsr_inputs['read_group'] = '-LB=DAB;-PL=Illumina;-PU=barcode;-SM=sample1;-SM=sample2'
bqsr_dbltag = self.run_process('bqsr', bqsr_inputs, Data.STATUS_ERROR)
self.assertEqual(bqsr_dbltag.process_error[0], 'You have duplicate tags in read_group argument.')
| # pylint: disable=missing-docstring
from resolwe.flow.models import Data
from resolwe.test import tag_process
from resolwe_bio.utils.test import BioProcessTestCase
class ReadsFilteringProcessorTestCase(BioProcessTestCase):
@tag_process('trimmomatic-single')
def test_trimmomatic_single(self):
with self.preparation_stage():
reads = self.prepare_reads()
adapters = self.run_process('upload-fasta-nucl', {'src': 'bbduk_adapters.fasta'})
inputs = {
'reads': reads.pk,
'illuminaclip': {
'adapters': adapters.pk,
'seed_mismatches': 2,
'simple_clip_threshold': 10,
},
'maxinfo': {
'target_length': 10,
'strictness': 0.6,
},
'slidingwindow': {
'window_size': 4,
'required_quality': 15,
},
'trim_bases': {
'leading': 20,
'trailing': 20,
'crop': 40,
'headcrop': 3,
},
'reads_filtering': {
'minlen': 22,
'average_quality': 10,
}}
filtered_reads = self.run_processor('trimmomatic-single', inputs)
self.assertFiles(filtered_reads, 'fastq', ['filtered_reads_trimmomatic_single.fastq.gz'], compression='gzip')
del filtered_reads.output['fastqc_url'][0]['total_size'] # Non-deterministic output.
self.assertFields(filtered_reads, "fastqc_url", [{'file': 'fastqc/reads_fastqc/fastqc_report.html',
'refs': ['fastqc/reads_fastqc']}])
@tag_process('trimmomatic-paired')
def test_trimmomatic_paired(self):
with self.preparation_stage():
inputs = {
'src1': ['rRNA_forw.fastq.gz'],
'src2': ['rRNA_rew.fastq.gz']}
reads = self.run_processor('upload-fastq-paired', inputs)
inputs = {'reads': reads.pk,
'trim_bases': {'trailing': 3}}
filtered_reads = self.run_processor('trimmomatic-paired', inputs)
self.assertFiles(filtered_reads, 'fastq', ['filtered_reads_trimmomatic_paired_fw.fastq.gz'],
compression='gzip')
self.assertFiles(filtered_reads, 'fastq2', ['filtered_reads_trimmomatic_paired_rw.fastq.gz'],
compression='gzip')
del filtered_reads.output['fastqc_url'][0]['total_size'] # Non-deterministic output.
self.assertFields(filtered_reads, "fastqc_url", [{'file': 'fastqc/rRNA_forw_fastqc/fastqc_report.html',
'refs': ['fastqc/rRNA_forw_fastqc']}])
del filtered_reads.output['fastqc_url2'][0]['total_size'] # Non-deterministic output.
self.assertFields(filtered_reads, "fastqc_url2", [{'file': 'fastqc/rRNA_rew_fastqc/fastqc_report.html',
'refs': ['fastqc/rRNA_rew_fastqc']}])
@tag_process('cutadapt-single')
def test_cutadapt_single(self):
with self.preparation_stage():
reads = self.prepare_reads(['cutadapt single.fastq.gz', 'cutadapt_single1.fastq.gz'])
primers_up = self.prepare_adapters('5_prime_adapter.fasta.gz')
primers_down = self.prepare_adapters('3_prime_adapter.fasta.gz')
inputs = {
'reads': reads.id,
'adapters': {
'polya_tail': 5,
'down_primers_seq': ['AGCACCT'],
'up_primers_seq': ['AGCTAAA'],
},
'modify_reads': {
'nextseq_trim': 5,
},
'filtering': {
'minlen': 10,
}
}
cutadapt_single = self.run_process('cutadapt-single', inputs)
self.assertFiles(cutadapt_single, 'fastq', ['cutadapt_single_trimmed.fastq.gz'],
compression='gzip')
inputs = {
'reads': reads.id,
'adapters': {
'polya_tail': 5,
'down_primers_file': primers_down.id,
'up_primers_file': primers_up.id,
},
'filtering': {
'minlen': 10,
}
}
cutadapt_single = self.run_process('cutadapt-single', inputs)
self.assertFiles(cutadapt_single, 'fastq', ['cutadapt_single_trimmed.fastq.gz'],
compression='gzip')
@tag_process('cutadapt-paired')
def test_cutadapt_paired(self):
with self.preparation_stage():
reads = self.prepare_paired_reads(mate1=['cutadapt mate1.fastq.gz'],
mate2=['cutadapt mate2.fastq.gz'])
primers_up = self.prepare_adapters('5_prime_adapter.fasta.gz')
primers_down = self.prepare_adapters('3_prime_adapter.fasta.gz')
inputs = {
'reads': reads.id,
'adapters': {
'mate1_3prime_seq': ['AGCACCT'],
'mate2_3prime_seq': ['AGCACCT'],
'mate1_5prime_seq': ['AGCTAAA'],
'mate2_5prime_seq': ['AGCTAAA'],
},
'filtering': {
'minlen': 10,
},
}
cutadapt_paired = self.run_process('cutadapt-paired', inputs)
self.assertFiles(cutadapt_paired, 'fastq', ['cutadapt_paired_forward_trimmed.fastq.gz'],
compression='gzip')
self.assertFiles(cutadapt_paired, 'fastq2', ['cutadapt_paired_reverse_trimmed.fastq.gz'],
compression='gzip')
inputs = {
'reads': reads.id,
'adapters': {
'mate1_3prime_file': primers_down.id,
'mate2_3prime_file': primers_down.id,
'mate1_5prime_file': primers_up.id,
'mate2_5prime_file': primers_up.id,
},
'filtering': {
'minlen': 10,
}
}
cutadapt_paired = self.run_process('cutadapt-paired', inputs)
self.assertFiles(cutadapt_paired, 'fastq', ['cutadapt_paired_forward_trimmed.fastq.gz'],
compression='gzip')
self.assertFiles(cutadapt_paired, 'fastq2', ['cutadapt_paired_reverse_trimmed.fastq.gz'],
compression='gzip')
@tag_process('cutadapt-custom-single', 'cutadapt-custom-paired')
def test_cutadapt_custom(self):
with self.preparation_stage():
reads_single = self.prepare_reads(['cutadapt single.fastq.gz', 'cutadapt_single1.fastq.gz'])
reads_paired = self.prepare_paired_reads(
mate1=['cutadapt mate1.fastq.gz'],
mate2=['cutadapt mate2.fastq.gz']
)
inputs_single = {'reads': reads_single.id}
inputs_paired = {'reads': reads_paired.id}
cutadapt_single = self.run_process('cutadapt-custom-single', inputs_single)
cutadapt_paired = self.run_process('cutadapt-custom-paired', inputs_paired)
self.assertFiles(cutadapt_single, 'fastq', ['cutadapt_custom_single_trimmed.fastq.gz'],
compression='gzip')
self.assertFiles(cutadapt_paired, 'fastq', ['cutadapt_custom_paired_forward_trimmed.fastq.gz'],
compression='gzip')
self.assertFiles(cutadapt_paired, 'fastq2', ['cutadapt_custom_paired_reverse_trimmed.fastq.gz'],
compression='gzip')
@tag_process('cutadapt-3prime-single')
def test_cutadapt_3prime_single(self):
with self.preparation_stage():
reads = self.prepare_reads(['cutadapt single.fastq.gz', 'cutadapt_single1.fastq.gz'])
inputs = {
'reads': reads.id,
'options': {
'nextseq_trim': 5,
'min_len': 20,
'min_overlap': 20,
'times': 2,
},
}
cutadapt_single = self.run_process('cutadapt-3prime-single', inputs)
self.assertFiles(cutadapt_single, 'fastq', ['cutadapt_3prime_single_trimmed.fastq.gz'],
compression='gzip')
@tag_process('cutadapt-corall-single')
def test_cutadapt_corall_single(self):
with self.preparation_stage():
reads = self.prepare_reads(['./corall/input/corall_single.fastq.gz'])
cutadapt_single = self.run_process('cutadapt-corall-single', {'reads': reads.id})
self.assertFiles(cutadapt_single, 'fastq', ['./corall/output/single_trimmed.fastq.gz'],
compression='gzip')
@tag_process('cutadapt-corall-paired')
def test_cutadapt_corall_paired(self):
with self.preparation_stage():
reads_paired = self.prepare_paired_reads(
mate1=['./corall/input/corall_mate1.fastq.gz'],
mate2=['./corall/input/corall_mate2.fastq.gz']
)
cutadapt_paired = self.run_process('cutadapt-corall-paired', {'reads': reads_paired.id})
self.assertFiles(cutadapt_paired, 'fastq', ['./corall/output/mate1_trimmed.fastq.gz'],
compression='gzip')
self.assertFiles(cutadapt_paired, 'fastq2', ['./corall/output/mate2_trimmed.fastq.gz'],
compression='gzip')
@tag_process('bbduk-single')
def test_bbduk_single(self):
with self.preparation_stage():
reads = self.prepare_reads(['bbduk test reads.fastq.gz', 'rRNA forw.fastq.gz'])
inputs = {
'reads': reads.id,
}
filtered_reads = self.run_process('bbduk-single', inputs)
self.assertFiles(filtered_reads, 'fastq', ['bbduk_reads.fastq.gz'], compression='gzip')
del filtered_reads.output['fastqc_url'][0]['total_size'] # Non-deterministic output.
report = {
'file': 'fastqc/bbduk test reads_preprocessed_fastqc/fastqc_report.html',
'refs': [
'fastqc/bbduk test reads_preprocessed_fastqc',
],
}
self.assertFields(filtered_reads, "fastqc_url", [report])
@tag_process('bbduk-paired')
def test_bbduk_paired(self):
with self.preparation_stage():
reads_paired = self.prepare_paired_reads(['rRNA forw.fastq.gz'], ['rRNA_rew.fastq.gz'])
inputs = {
'reads': reads_paired.id,
}
filtered_reads = self.run_process('bbduk-paired', inputs)
self.assertFiles(filtered_reads, 'fastq', ['bbduk_fw_reads.fastq.gz'], compression='gzip')
self.assertFiles(filtered_reads, 'fastq2', ['bbduk_rv_reads.fastq.gz'], compression='gzip')
del filtered_reads.output['fastqc_url'][0]['total_size'] # Non-deterministic output.
report = {
'file': 'fastqc/rRNA forw_preprocessed_fastqc/fastqc_report.html',
'refs': [
'fastqc/rRNA forw_preprocessed_fastqc',
],
}
self.assertFields(filtered_reads, "fastqc_url", [report])
del filtered_reads.output['fastqc_url2'][0]['total_size'] # Non-deterministic output.
report2 = {
'file': 'fastqc/rRNA_rew_preprocessed_fastqc/fastqc_report.html',
'refs': [
'fastqc/rRNA_rew_preprocessed_fastqc',
],
}
self.assertFields(filtered_reads, "fastqc_url2", [report2])
@tag_process('bamclipper')
def test_bamclipper(self):
species = 'Homo sapiens'
build = 'fake_genome_RSEM'
align_input = './bamclipper/input/TP53.bam'
with self.preparation_stage():
bam = self.prepare_bam(
fn=align_input,
species=species,
build=build
)
inputs_bedpe = {'src': './bamclipper/input/TP53.bedpe',
'species': species, 'build': build}
bedpe = self.run_process('upload-bedpe', inputs_bedpe)
# Test if bamclipper has been skipped.
bc_skip_inputs = {'alignment': bam.id, 'skip': True}
skipped_bc = self.run_process('bamclipper', bc_skip_inputs)
self.assertFile(skipped_bc, 'bam', align_input)
bc_data = Data.objects.last()
self.assertEqual(bc_data.process_info, ['Skipping bamclipper step.'])
# Test bamclipper.
inputs_bamclipper = {'alignment': bam.id, 'bedpe': bedpe.id}
clipped = self.run_process('bamclipper', inputs_bamclipper)
self.assertFile(clipped, 'stats', './bamclipper/output/TP53.primerclipped.bam_stats.txt')
self.assertFile(clipped, 'bigwig', './bamclipper/output/TP53.primerclipped.bw')
self.assertFields(clipped, 'species', species)
self.assertFields(clipped, 'build', build)
@tag_process('markduplicates')
def test_markduplicates(self):
species = 'Homo sapiens'
build = 'custombuild'
primerclipped = './bamclipper/output/TP53.primerclipped.bam'
with self.preparation_stage():
bam = self.prepare_bam(
fn=primerclipped,
species=species,
build=build)
# Test if skipped. Input bam should always equal output bam.
md_inputs = {'bam': bam.id, 'skip': True}
skipped_md = self.run_process('markduplicates', md_inputs)
self.assertFile(skipped_md, 'bam', primerclipped)
# Test that removal of duplicates works.
md_inputs = {'bam': bam.id, 'remove_duplicates': True}
removed_md = self.run_process('markduplicates', md_inputs)
def filter_startedon(line):
return line.startswith(b'# Started on:') or line.startswith(b'# MarkDuplicates')
self.assertFileExists(removed_md, 'bam')
self.assertFileExists(removed_md, 'bai')
self.assertFile(removed_md, 'stats', './markduplicate/output/TP53.primerclipped.markduplicates.bam_stats.txt')
self.assertFile(removed_md, 'bigwig', './markduplicate/output/TP53.primerclipped.markduplicates.bw')
self.assertFile(removed_md, 'metrics_file', './markduplicate/output/TP53.primerclipped_metrics.txt',
file_filter=filter_startedon)
self.assertFields(removed_md, 'species', species)
self.assertFields(removed_md, 'build', build)
@tag_process('bqsr')
def test_bqsr(self):
species = 'Homo sapiens'
build = 'custom_build'
with self.preparation_stage():
input_genome = {
# Based on b37 genome, chromosome 19 has been cut from beginning up to position 1207173.
# This includes an exon of STK11. Cutting from the start of the chromosome was done so that
# there is no need to shift any subsequent bed and vcf files.
'src': './bqsr/input/hs_b37_chr17_upto_TP53.fasta.gz',
'species': species,
'build': build
}
input_bam = {
'src': './markduplicate/output/TP53.primerclipped.markduplicates.bam',
'species': species,
'build': build
}
ks_dbsnp = []
for i in ['./bqsr/input/dbsnp_TP53.vcf.gz']: # add more files if needed
ks_dbsnp.append(
self.run_process('upload-variants-vcf', {'src': i, 'species': species, 'build': build})
)
intervals = self.run_process('upload-bed', {
'src': './bqsr/input/TP53.bed',
'species': species,
'build': build})
bam = self.run_process('upload-bam', input_bam)
reference = self.run_process('upload-genome', input_genome)
bqsr_inputs = {
'bam': bam.id,
'reference': reference.id,
'known_sites': [i.id for i in ks_dbsnp],
'intervals': intervals.id
}
bqsr = self.run_process('bqsr', bqsr_inputs)
self.assertFileExists(bqsr, 'bam')
self.assertFileExists(bqsr, 'bai')
self.assertFile(bqsr, 'stats', './bqsr/output/TP53.primerclipped.markduplicates.bam_stats.txt')
self.assertFile(bqsr, 'bigwig', './bqsr/output/TP53.primerclipped.markduplicates.bw')
self.assertFile(bqsr, 'recal_table',
'./bqsr/output/TP53.primerclipped.markduplicates_recalibration.table')
self.assertFields(bqsr, 'species', species)
self.assertFields(bqsr, 'build', build)
# Check if read groups has successfully been added.
bqsr_inputs['read_group'] = '-LB=DAB;-PL=Illumina;-PU=barcode;-SM=sample1'
bqsr_rg = self.run_process('bqsr', bqsr_inputs)
self.assertFileExists(bqsr_rg, 'bam')
self.assertFileExists(bqsr_rg, 'bai')
bqsr_inputs['read_group'] = '-LB=DAB;-PL=Illumina;-PU=barcode;-SM=sample1;-SM=sample2'
bqsr_dbltag = self.run_process('bqsr', bqsr_inputs, Data.STATUS_ERROR)
self.assertEqual(bqsr_dbltag.process_error[0], 'You have duplicate tags in read_group argument.')
| en | 0.885042 | # pylint: disable=missing-docstring # Non-deterministic output. # Non-deterministic output. # Non-deterministic output. # Non-deterministic output. # Non-deterministic output. # Non-deterministic output. # Test if bamclipper has been skipped. # Test bamclipper. # Test if skipped. Input bam should always equal output bam. # Test that removal of duplicates works. # Based on b37 genome, chromosome 19 has been cut from beginning up to position 1207173. # This includes an exon of STK11. Cutting from the start of the chromosome was done so that # there is no need to shift any subsequent bed and vcf files. # add more files if needed # Check if read groups has successfully been added. | 1.971467 | 2 |
tests/nightly/tools/benchmarking/test_benchmarking.py | alexriedel1/anomalib | 689 | 6631190 | <filename>tests/nightly/tools/benchmarking/test_benchmarking.py
"""Test benchmarking script on a subset of models and categories."""
# Copyright (C) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import sys
# Since tools is not part of the anomalib package, accessing benchmarking requires importlib
sys.path.append("tools/benchmarking")
from importlib.util import find_spec
if find_spec("benchmark") is not None:
from benchmark import distribute
else:
raise Exception("Unable to import benchmarking script for testing")
from pathlib import Path
from omegaconf import OmegaConf
from tests.helpers.dataset import get_dataset_path
def check_tb_logs(model: str):
"""check if TensorBoard logs are generated."""
for device in ["gpu", "cpu"]:
assert (
len(list(Path("runs", f"{model}_{device}").glob("events.out.tfevents.*"))) > 0
), f"Benchmarking script didn't generate tensorboard logs for {model}"
def check_csv(model: str):
"""Check if csv files are generated"""
for device in ["gpu", "cpu"]:
assert Path(
"runs", f"{model}_{device}.csv"
).exists(), f"Benchmarking script didn't generate csv logs for {model}"
def test_benchmarking():
"""Test if benchmarking script produces the required artifacts."""
config_path = "tests/nightly/tools/benchmarking/benchmark_params.yaml"
test_config = OmegaConf.load(config_path)
test_config.grid_search.dataset["path"] = [get_dataset_path()]
distribute(test_config)
check_tb_logs("padim")
check_csv("padim")
| <filename>tests/nightly/tools/benchmarking/test_benchmarking.py
"""Test benchmarking script on a subset of models and categories."""
# Copyright (C) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import sys
# Since tools is not part of the anomalib package, accessing benchmarking requires importlib
sys.path.append("tools/benchmarking")
from importlib.util import find_spec
if find_spec("benchmark") is not None:
from benchmark import distribute
else:
raise Exception("Unable to import benchmarking script for testing")
from pathlib import Path
from omegaconf import OmegaConf
from tests.helpers.dataset import get_dataset_path
def check_tb_logs(model: str):
"""check if TensorBoard logs are generated."""
for device in ["gpu", "cpu"]:
assert (
len(list(Path("runs", f"{model}_{device}").glob("events.out.tfevents.*"))) > 0
), f"Benchmarking script didn't generate tensorboard logs for {model}"
def check_csv(model: str):
"""Check if csv files are generated"""
for device in ["gpu", "cpu"]:
assert Path(
"runs", f"{model}_{device}.csv"
).exists(), f"Benchmarking script didn't generate csv logs for {model}"
def test_benchmarking():
"""Test if benchmarking script produces the required artifacts."""
config_path = "tests/nightly/tools/benchmarking/benchmark_params.yaml"
test_config = OmegaConf.load(config_path)
test_config.grid_search.dataset["path"] = [get_dataset_path()]
distribute(test_config)
check_tb_logs("padim")
check_csv("padim")
| en | 0.809317 | Test benchmarking script on a subset of models and categories. # Copyright (C) 2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions # and limitations under the License. # Since tools is not part of the anomalib package, accessing benchmarking requires importlib check if TensorBoard logs are generated. Check if csv files are generated Test if benchmarking script produces the required artifacts. | 2.183144 | 2 |
cortstim/base/utils/data_structures_utils.py | ncsl/virtual_cortical_stim_epilepsy | 1 | 6631191 | # Data structure manipulations and conversions
import re
import numpy as np
import json
from collections import OrderedDict
from copy import deepcopy
from cortstim.base.utils.log_error import raise_value_error, raise_import_error, initialize_logger
from datetime import date, datetime
logger = initialize_logger(__name__)
class NumpyEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8,
np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32,
np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)): # This is the fix
return obj.tolist()
elif isinstance(obj, (datetime, date)):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def vector2scalar(x):
if not (isinstance(x, np.ndarray)):
return x
else:
y = np.squeeze(x)
if all(y.squeeze() == y[0]):
return y[0]
else:
return reg_dict(x)
def list_of_strings_to_string(lstr, sep=","):
result_str = lstr[0]
for s in lstr[1:]:
result_str += sep + s
return result_str
def dict_str(d):
s = "{"
for key, value in d.items():
s += ("\n" + key + ": " + str(value))
s += "}"
return s
def isequal_string(a, b, case_sensitive=False):
if case_sensitive:
return a == b
else:
try:
return a.lower() == b.lower()
except AttributeError:
logger.warning("Case sensitive comparison!")
return a == b
def split_string_text_numbers(ls):
items = []
for s in ensure_list(ls):
match = re.findall('(\d+|\D+)', s)
if match:
items.append(tuple(match[:2]))
return items
def construct_import_path(path, package="tvb_epilepsy"):
path = path.split(".py")[0]
start = path.find(package)
return path[start:].replace("/", ".")
def formal_repr(instance, attr_dict, sort_dict_flag=False):
""" A formal string representation for an object.
:param attr_dict: dictionary attribute_name: attribute_value
:param instance: Instance to read class name from it
"""
class_name = instance.__class__.__name__
formal = class_name + "{"
if sort_dict_flag:
attr_dict = sort_dict(attr_dict)
for key, val in attr_dict.items():
if isinstance(val, dict):
formal += "\n" + key + "=["
for key2, val2 in val.items():
formal += "\n" + str(key2) + " = " + str(val2)
formal += "]"
else:
formal += "\n" + str(key) + " = " + str(val)
return formal + "}"
def obj_to_dict(obj):
"""
:param obj: Python object to introspect
:return: dictionary after recursively taking obj fields and their values
"""
if obj is None:
return obj
if isinstance(obj, (str, int, float)):
return obj
if isinstance(obj, (np.float32,)):
return float(obj)
if isinstance(obj, (np.ndarray,)):
return obj.tolist()
if isinstance(obj, list):
ret = []
for val in obj:
ret.append(obj_to_dict(val))
return ret
ret = {}
for key in obj.__dict__:
val = getattr(obj, key, None)
ret[key] = obj_to_dict(val)
return ret
def reg_dict(x, lbl=None, sort=None):
"""
:x: a list or np vector
:lbl: a list or np vector of labels
:return: dictionary
"""
if not (isinstance(x, (str, int, float, list, np.ndarray))):
return x
else:
if not (isinstance(x, list)):
x = np.squeeze(x)
x_no = len(x)
if not (isinstance(lbl, (list, np.ndarray))):
lbl = np.repeat('', x_no)
else:
lbl = np.squeeze(lbl)
labels_no = len(lbl)
total_no = min(labels_no, x_no)
if x_no <= labels_no:
if sort == 'ascend':
ind = np.argsort(x).tolist()
elif sort == 'descend':
ind = np.argsort(x)
ind = ind[::-1].tolist()
else:
ind = range(x_no)
else:
ind = range(total_no)
d = OrderedDict()
for i in ind:
d[str(i) + '.' + str(lbl[i])] = x[i]
if labels_no > total_no:
ind_lbl = np.delete(np.array(range(labels_no)), ind).tolist()
for i in ind_lbl:
d[str(i) + '.' + str(lbl[i])] = None
if x_no > total_no:
ind_x = np.delete(np.array(range(x_no)), ind).tolist()
for i in ind_x:
d[str(i) + '.'] = x[i]
return d
def sort_dict(d):
return OrderedDict(sorted(d.items(), key=lambda t: t[0]))
def dicts_of_lists(dictionary, n=1):
for key, value in dictionary.items():
dictionary[key] = ensure_list(dictionary[key])
if len(dictionary[key]) == 1 and n > 1:
dictionary[key] = dictionary[key] * n
return dictionary
def iterable_to_dict(obj):
d = OrderedDict()
for ind, value in enumerate(obj):
d["%02d" % ind] = value
return d
def dict_to_list_or_tuple(dictionary, output_obj="list"):
dictionary = sort_dict(dictionary)
output = dictionary.values()
if output_obj == "tuple":
output = tuple(output)
return output
def list_of_dicts_to_dicts_of_ndarrays(lst, shape=None):
d = dict(zip(lst[0], zip(*list([d.values() for d in lst]))))
if isinstance(shape, tuple):
for key, val in d.items():
d[key] = np.reshape(np.stack(d[key]), shape)
else:
for key, val in d.items():
d[key] = np.squeeze(np.stack(d[key]))
return d
def arrays_of_dicts_to_dicts_of_ndarrays(arr):
lst = arr.flatten().tolist()
d = list_of_dicts_to_dicts_of_ndarrays(lst)
for key, val in d.items():
d[key] = np.reshape(d[key], arr.shape)
return d
def dicts_of_lists_to_lists_of_dicts(dictionary):
return [dict(zip(dictionary, t)) for t in zip(*dictionary.values())]
def ensure_string(arg):
if not (isinstance(arg, str)):
if arg is None:
return ""
else:
return ensure_list(arg)[0]
else:
return arg
def ensure_list(arg):
if not (isinstance(arg, list)):
try: # if iterable
if isinstance(arg, (str, dict)):
arg = [arg]
else:
arg = list(arg)
except BaseException: # if not iterable
arg = [arg]
return arg
def ensure_string(arg):
if not (isinstance(arg, str)):
if arg is None:
return ""
else:
return ensure_list(arg)[0]
else:
return arg
def set_list_item_by_reference_safely(ind, item, lst):
while ind >= len(lst):
lst.append(None)
lst.__setitem__(ind, item)
def get_list_or_tuple_item_safely(obj, key):
try:
return obj[int(key)]
except BaseException:
return None
def linear_index_to_coordinate_tuples(linear_index, shape):
if len(linear_index) > 0:
coordinates_tuple = np.unravel_index(linear_index, shape)
return zip(*[ca.flatten().tolist() for ca in coordinates_tuple])
else:
return []
def labels_to_inds(labels, lbls):
idx = []
lbls = ensure_list(lbls)
for i, label in enumerate(labels):
if label in lbls:
idx.append(i)
return np.unique(idx)
def generate_region_labels(n_regions, labels=[], str=". ", numbering=True):
if len(labels) == n_regions:
if numbering:
return np.array([str.join(["%d", "%s"]) % tuple(l)
for l in zip(range(n_regions), labels)])
else:
return labels
else:
return np.array(["%d" % l for l in range(n_regions)])
def monopolar_to_bipolar(labels, indices=None, data=None):
if indices is None:
indices = range(len(labels))
bipolar_lbls = []
bipolar_inds = [[], []]
for ind in range(len(indices) - 1):
iS1 = indices[ind]
iS2 = indices[ind + 1]
if (labels[iS1][0] == labels[iS2][0]) and \
int(re.findall(r'\d+', labels[iS1])[0]) == \
int(re.findall(r'\d+', labels[iS2])[0]) - 1:
bipolar_lbls.append(labels[iS1] + "-" + labels[iS2])
bipolar_inds[0].append(iS1)
bipolar_inds[1].append(iS2)
if isinstance(data, np.ndarray):
data = data[bipolar_inds[0]] - data[bipolar_inds[1]]
return bipolar_lbls, bipolar_inds, data
else:
return bipolar_lbls, bipolar_inds
# This function is meant to confirm that two objects assumingly of the
# same type are equal, i.e., identical
def assert_equal_objects(obj1, obj2, attributes_dict=None, logger=None):
def print_not_equal_message(attr, field1, field2, logger):
# logger.error("\n\nValueError: Original and read object field "+ attr + " not equal!")
# raise_value_error("\n\nOriginal and read object field " + attr + " not equal!")
logger.warning("Original and read object field " + attr + " not equal!" +
"\nOriginal field:\n" + str(field1) +
"\nRead object field:\n" + str(field2), logger)
if isinstance(obj1, dict):
def get_field1(obj, key):
return obj[key]
if not (isinstance(attributes_dict, dict)):
attributes_dict = dict()
for key in obj1.keys():
attributes_dict.update({key: key})
elif isinstance(obj1, (list, tuple)):
def get_field1(
obj,
key):
return get_list_or_tuple_item_safely(
obj,
key)
indices = range(len(obj1))
attributes_dict = dict(zip([str(ind) for ind in indices], indices))
else:
def get_field1(obj, attribute):
return getattr(obj, attribute)
if not (isinstance(attributes_dict, dict)):
attributes_dict = dict()
for key in obj1.__dict__.keys():
attributes_dict.update({key: key})
if isinstance(obj2, dict):
def get_field2(obj, key):
return obj.get(key, None)
elif isinstance(obj2, (list, tuple)):
def get_field2(
obj,
key):
return get_list_or_tuple_item_safely(
obj,
key)
else:
def get_field2(obj, attribute):
return getattr(obj, attribute, None)
equal = True
for attribute in attributes_dict:
# print attributes_dict[attribute]
field1 = get_field1(obj1, attributes_dict[attribute])
field2 = get_field2(obj2, attributes_dict[attribute])
try:
# TODO: a better hack for the stupid case of an ndarray of a string, such as model.zmode or pmode
# For non numeric types
if isinstance(field1, str) or isinstance(field1, list) or isinstance(field1, dict) \
or (isinstance(field1, np.ndarray) and field1.dtype.kind in 'OSU'):
if np.any(field1 != field2):
print_not_equal_message(
attributes_dict[attribute], field1, field2, logger)
equal = False
# For numeric numpy arrays:
elif isinstance(field1, np.ndarray) and not field1.dtype.kind in 'OSU':
# TODO: handle better accuracy differences, empty matrices and
# complex numbers...
if field1.shape != field2.shape:
print_not_equal_message(
attributes_dict[attribute], field1, field2, logger)
equal = False
elif np.any(np.float32(field1) - np.float32(field2) > 0):
print_not_equal_message(
attributes_dict[attribute], field1, field2, logger)
equal = False
# For numeric scalar types
elif isinstance(field1, (int, float, long, complex, np.number)):
if np.float32(field1) - np.float32(field2) > 0:
print_not_equal_message(
attributes_dict[attribute], field1, field2, logger)
equal = False
else:
equal = assert_equal_objects(field1, field2, logger=logger)
except BaseException:
try:
logger.warning("Comparing str(objects) for field "
+ attributes_dict[attribute] + " because there was an error!", logger)
if np.any(str(field1) != str(field2)):
print_not_equal_message(
attributes_dict[attribute], field1, field2, logger)
equal = False
except BaseException:
raise_value_error("ValueError: Something went wrong when trying to compare "
+ attributes_dict[attribute] + " !", logger)
if equal:
return True
else:
return False
def shape_to_size(shape):
shape = np.array(shape)
shape = shape[shape > 0]
return np.int(np.max([shape.prod(), 1]))
def shape_to_ndim(shape, squeeze=False):
if squeeze:
shape = filter(lambda x: not (np.any(np.in1d(x, [0, 1]))), list(shape))
return len(shape)
def linspace_broadcast(start, stop, num_steps, maxdims=3):
x_star = np.linspace(0, 1, num_steps)
dims = 0
x = None
while x is None and dims < maxdims:
try:
x = (x_star[:, None] * (stop - start) + start)
except BaseException:
x_star = x_star[:, np.newaxis]
dims = dims + 1
return x
def squeeze_array_to_scalar(arr):
arr = np.array(arr)
if arr.size == 1:
return arr
elif np.all(arr == arr[0]):
return arr[0]
else:
return arr
def assert_arrays(params, shape=None, transpose=False):
# type: (object, object) -> object
if shape is None or \
not (isinstance(shape, tuple)
and len(shape) in range(3) and np.all([isinstance(s, (int, np.int)) for s in shape])):
shape = None
shapes = [] # list of all unique shapes
n_shapes = [] # list of all unique shapes' frequencies
size = 0 # initial shape
else:
size = shape_to_size(shape)
for ip in range(len(params)):
# Convert all accepted types to np arrays:
if isinstance(params[ip], np.ndarray):
pass
elif isinstance(params[ip], (list, tuple)):
# assuming a list or tuple of symbols...
params[ip] = np.array(params[ip]).astype(type(params[ip][0]))
elif isinstance(params[ip], (float, int, long, complex, np.number)):
params[ip] = np.array(params[ip])
else:
try:
import sympy
except BaseException:
raise_import_error("sympy import failed")
if isinstance(params[ip], tuple(sympy.core.all_classes)):
params[ip] = np.array(params[ip])
else:
raise_value_error("Input " + str(params[ip]) + " of type " + str(type(params[ip])) + " is not numeric, "
"of type np.ndarray, nor Symbol")
if shape is None:
# Only one size > 1 is acceptable
if params[ip].size != size:
if size > 1 and params[ip].size > 1:
raise_value_error(
"Inputs are of at least two distinct sizes > 1")
elif params[ip].size > size:
size = params[ip].size
# Construct a kind of histogram of all different shapes of the
# inputs:
ind = np.array([(x == params[ip].shape) for x in shapes])
if np.any(ind):
ind = np.where(ind)[0]
# TODO: handle this properly
n_shapes[int(ind)] += 1
else:
shapes.append(params[ip].shape)
n_shapes.append(1)
else:
if params[ip].size > size:
raise_value_error(
"At least one input is of a greater size than the one given!")
if shape is None:
# Keep only shapes of the correct size
ind = np.array([shape_to_size(s) == size for s in shapes])
shapes = np.array(shapes)[ind]
n_shapes = np.array(n_shapes)[ind]
# Find the most frequent shape
ind = np.argmax(n_shapes)
shape = tuple(shapes[ind])
if transpose and len(shape) > 1:
if (transpose is "horizontal" or "row" and shape[0] > shape[1]) or \
(transpose is "vertical" or "column" and shape[0] < shape[1]):
shape = list(shape)
temp = shape[1]
shape[1] = shape[0]
shape[0] = temp
shape = tuple(shape)
# Now reshape or tile when necessary
for ip in range(len(params)):
try:
if params[ip].shape != shape:
if params[ip].size in [0, 1]:
params[ip] = np.tile(params[ip], shape)
else:
params[ip] = np.reshape(params[ip], shape)
except BaseException:
# TODO: maybe make this an explicit message
logger.info("\n\nwhat the fuck??")
if len(params) == 1:
return params[0]
else:
return tuple(params)
def make_float(x, precision="64"):
if isinstance(x, np.ndarray):
if isequal_string(precision, "64"):
return x.astype(np.float64)
elif isequal_string(precision, "32"):
return x.astype(np.float32)
else:
return x.astype(np.float)
else:
if isequal_string(precision, "64"):
return np.float64(x)
elif isequal_string(precision, "32"):
np.float32(x)
else:
return np.float(x)
def make_int(x, precision="64"):
if isinstance(x, np.ndarray):
if isequal_string(precision, "64"):
return x.astype(np.int64)
elif isequal_string(precision, "32"):
return x.astype(np.int32)
else:
return x.astype(np.int)
else:
if isequal_string(precision, "64"):
return np.int64(x)
elif isequal_string(precision, "32"):
np.int32(x)
else:
return np.int(x)
def copy_object_attributes(
obj1, obj2, attr1, attr2=None, deep_copy=False, check_none=False):
attr1 = ensure_list(attr1)
if attr2 is None:
attr2 = attr1
else:
attr2 = ensure_list(attr2)
if deep_copy:
def fcopy(
a1,
a2):
return setattr(
obj2,
a2,
deepcopy(
getattr(
obj1,
a1)))
else:
def fcopy(a1, a2):
return setattr(obj2, a2, getattr(obj1, a1))
if check_none:
for a1, a2 in zip(attr1, attr2):
if getattr(obj2, a2) is None:
fcopy(a1, a2)
else:
for a1, a2 in zip(attr1, attr2):
fcopy(a1, a2)
return obj2
| # Data structure manipulations and conversions
import re
import numpy as np
import json
from collections import OrderedDict
from copy import deepcopy
from cortstim.base.utils.log_error import raise_value_error, raise_import_error, initialize_logger
from datetime import date, datetime
logger = initialize_logger(__name__)
class NumpyEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8,
np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32,
np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)): # This is the fix
return obj.tolist()
elif isinstance(obj, (datetime, date)):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def vector2scalar(x):
if not (isinstance(x, np.ndarray)):
return x
else:
y = np.squeeze(x)
if all(y.squeeze() == y[0]):
return y[0]
else:
return reg_dict(x)
def list_of_strings_to_string(lstr, sep=","):
result_str = lstr[0]
for s in lstr[1:]:
result_str += sep + s
return result_str
def dict_str(d):
s = "{"
for key, value in d.items():
s += ("\n" + key + ": " + str(value))
s += "}"
return s
def isequal_string(a, b, case_sensitive=False):
if case_sensitive:
return a == b
else:
try:
return a.lower() == b.lower()
except AttributeError:
logger.warning("Case sensitive comparison!")
return a == b
def split_string_text_numbers(ls):
items = []
for s in ensure_list(ls):
match = re.findall('(\d+|\D+)', s)
if match:
items.append(tuple(match[:2]))
return items
def construct_import_path(path, package="tvb_epilepsy"):
path = path.split(".py")[0]
start = path.find(package)
return path[start:].replace("/", ".")
def formal_repr(instance, attr_dict, sort_dict_flag=False):
""" A formal string representation for an object.
:param attr_dict: dictionary attribute_name: attribute_value
:param instance: Instance to read class name from it
"""
class_name = instance.__class__.__name__
formal = class_name + "{"
if sort_dict_flag:
attr_dict = sort_dict(attr_dict)
for key, val in attr_dict.items():
if isinstance(val, dict):
formal += "\n" + key + "=["
for key2, val2 in val.items():
formal += "\n" + str(key2) + " = " + str(val2)
formal += "]"
else:
formal += "\n" + str(key) + " = " + str(val)
return formal + "}"
def obj_to_dict(obj):
"""
:param obj: Python object to introspect
:return: dictionary after recursively taking obj fields and their values
"""
if obj is None:
return obj
if isinstance(obj, (str, int, float)):
return obj
if isinstance(obj, (np.float32,)):
return float(obj)
if isinstance(obj, (np.ndarray,)):
return obj.tolist()
if isinstance(obj, list):
ret = []
for val in obj:
ret.append(obj_to_dict(val))
return ret
ret = {}
for key in obj.__dict__:
val = getattr(obj, key, None)
ret[key] = obj_to_dict(val)
return ret
def reg_dict(x, lbl=None, sort=None):
"""
:x: a list or np vector
:lbl: a list or np vector of labels
:return: dictionary
"""
if not (isinstance(x, (str, int, float, list, np.ndarray))):
return x
else:
if not (isinstance(x, list)):
x = np.squeeze(x)
x_no = len(x)
if not (isinstance(lbl, (list, np.ndarray))):
lbl = np.repeat('', x_no)
else:
lbl = np.squeeze(lbl)
labels_no = len(lbl)
total_no = min(labels_no, x_no)
if x_no <= labels_no:
if sort == 'ascend':
ind = np.argsort(x).tolist()
elif sort == 'descend':
ind = np.argsort(x)
ind = ind[::-1].tolist()
else:
ind = range(x_no)
else:
ind = range(total_no)
d = OrderedDict()
for i in ind:
d[str(i) + '.' + str(lbl[i])] = x[i]
if labels_no > total_no:
ind_lbl = np.delete(np.array(range(labels_no)), ind).tolist()
for i in ind_lbl:
d[str(i) + '.' + str(lbl[i])] = None
if x_no > total_no:
ind_x = np.delete(np.array(range(x_no)), ind).tolist()
for i in ind_x:
d[str(i) + '.'] = x[i]
return d
def sort_dict(d):
return OrderedDict(sorted(d.items(), key=lambda t: t[0]))
def dicts_of_lists(dictionary, n=1):
for key, value in dictionary.items():
dictionary[key] = ensure_list(dictionary[key])
if len(dictionary[key]) == 1 and n > 1:
dictionary[key] = dictionary[key] * n
return dictionary
def iterable_to_dict(obj):
d = OrderedDict()
for ind, value in enumerate(obj):
d["%02d" % ind] = value
return d
def dict_to_list_or_tuple(dictionary, output_obj="list"):
dictionary = sort_dict(dictionary)
output = dictionary.values()
if output_obj == "tuple":
output = tuple(output)
return output
def list_of_dicts_to_dicts_of_ndarrays(lst, shape=None):
d = dict(zip(lst[0], zip(*list([d.values() for d in lst]))))
if isinstance(shape, tuple):
for key, val in d.items():
d[key] = np.reshape(np.stack(d[key]), shape)
else:
for key, val in d.items():
d[key] = np.squeeze(np.stack(d[key]))
return d
def arrays_of_dicts_to_dicts_of_ndarrays(arr):
lst = arr.flatten().tolist()
d = list_of_dicts_to_dicts_of_ndarrays(lst)
for key, val in d.items():
d[key] = np.reshape(d[key], arr.shape)
return d
def dicts_of_lists_to_lists_of_dicts(dictionary):
return [dict(zip(dictionary, t)) for t in zip(*dictionary.values())]
def ensure_string(arg):
if not (isinstance(arg, str)):
if arg is None:
return ""
else:
return ensure_list(arg)[0]
else:
return arg
def ensure_list(arg):
if not (isinstance(arg, list)):
try: # if iterable
if isinstance(arg, (str, dict)):
arg = [arg]
else:
arg = list(arg)
except BaseException: # if not iterable
arg = [arg]
return arg
def ensure_string(arg):
if not (isinstance(arg, str)):
if arg is None:
return ""
else:
return ensure_list(arg)[0]
else:
return arg
def set_list_item_by_reference_safely(ind, item, lst):
while ind >= len(lst):
lst.append(None)
lst.__setitem__(ind, item)
def get_list_or_tuple_item_safely(obj, key):
try:
return obj[int(key)]
except BaseException:
return None
def linear_index_to_coordinate_tuples(linear_index, shape):
if len(linear_index) > 0:
coordinates_tuple = np.unravel_index(linear_index, shape)
return zip(*[ca.flatten().tolist() for ca in coordinates_tuple])
else:
return []
def labels_to_inds(labels, lbls):
idx = []
lbls = ensure_list(lbls)
for i, label in enumerate(labels):
if label in lbls:
idx.append(i)
return np.unique(idx)
def generate_region_labels(n_regions, labels=[], str=". ", numbering=True):
if len(labels) == n_regions:
if numbering:
return np.array([str.join(["%d", "%s"]) % tuple(l)
for l in zip(range(n_regions), labels)])
else:
return labels
else:
return np.array(["%d" % l for l in range(n_regions)])
def monopolar_to_bipolar(labels, indices=None, data=None):
if indices is None:
indices = range(len(labels))
bipolar_lbls = []
bipolar_inds = [[], []]
for ind in range(len(indices) - 1):
iS1 = indices[ind]
iS2 = indices[ind + 1]
if (labels[iS1][0] == labels[iS2][0]) and \
int(re.findall(r'\d+', labels[iS1])[0]) == \
int(re.findall(r'\d+', labels[iS2])[0]) - 1:
bipolar_lbls.append(labels[iS1] + "-" + labels[iS2])
bipolar_inds[0].append(iS1)
bipolar_inds[1].append(iS2)
if isinstance(data, np.ndarray):
data = data[bipolar_inds[0]] - data[bipolar_inds[1]]
return bipolar_lbls, bipolar_inds, data
else:
return bipolar_lbls, bipolar_inds
# This function is meant to confirm that two objects assumingly of the
# same type are equal, i.e., identical
def assert_equal_objects(obj1, obj2, attributes_dict=None, logger=None):
def print_not_equal_message(attr, field1, field2, logger):
# logger.error("\n\nValueError: Original and read object field "+ attr + " not equal!")
# raise_value_error("\n\nOriginal and read object field " + attr + " not equal!")
logger.warning("Original and read object field " + attr + " not equal!" +
"\nOriginal field:\n" + str(field1) +
"\nRead object field:\n" + str(field2), logger)
if isinstance(obj1, dict):
def get_field1(obj, key):
return obj[key]
if not (isinstance(attributes_dict, dict)):
attributes_dict = dict()
for key in obj1.keys():
attributes_dict.update({key: key})
elif isinstance(obj1, (list, tuple)):
def get_field1(
obj,
key):
return get_list_or_tuple_item_safely(
obj,
key)
indices = range(len(obj1))
attributes_dict = dict(zip([str(ind) for ind in indices], indices))
else:
def get_field1(obj, attribute):
return getattr(obj, attribute)
if not (isinstance(attributes_dict, dict)):
attributes_dict = dict()
for key in obj1.__dict__.keys():
attributes_dict.update({key: key})
if isinstance(obj2, dict):
def get_field2(obj, key):
return obj.get(key, None)
elif isinstance(obj2, (list, tuple)):
def get_field2(
obj,
key):
return get_list_or_tuple_item_safely(
obj,
key)
else:
def get_field2(obj, attribute):
return getattr(obj, attribute, None)
equal = True
for attribute in attributes_dict:
# print attributes_dict[attribute]
field1 = get_field1(obj1, attributes_dict[attribute])
field2 = get_field2(obj2, attributes_dict[attribute])
try:
# TODO: a better hack for the stupid case of an ndarray of a string, such as model.zmode or pmode
# For non numeric types
if isinstance(field1, str) or isinstance(field1, list) or isinstance(field1, dict) \
or (isinstance(field1, np.ndarray) and field1.dtype.kind in 'OSU'):
if np.any(field1 != field2):
print_not_equal_message(
attributes_dict[attribute], field1, field2, logger)
equal = False
# For numeric numpy arrays:
elif isinstance(field1, np.ndarray) and not field1.dtype.kind in 'OSU':
# TODO: handle better accuracy differences, empty matrices and
# complex numbers...
if field1.shape != field2.shape:
print_not_equal_message(
attributes_dict[attribute], field1, field2, logger)
equal = False
elif np.any(np.float32(field1) - np.float32(field2) > 0):
print_not_equal_message(
attributes_dict[attribute], field1, field2, logger)
equal = False
# For numeric scalar types
elif isinstance(field1, (int, float, long, complex, np.number)):
if np.float32(field1) - np.float32(field2) > 0:
print_not_equal_message(
attributes_dict[attribute], field1, field2, logger)
equal = False
else:
equal = assert_equal_objects(field1, field2, logger=logger)
except BaseException:
try:
logger.warning("Comparing str(objects) for field "
+ attributes_dict[attribute] + " because there was an error!", logger)
if np.any(str(field1) != str(field2)):
print_not_equal_message(
attributes_dict[attribute], field1, field2, logger)
equal = False
except BaseException:
raise_value_error("ValueError: Something went wrong when trying to compare "
+ attributes_dict[attribute] + " !", logger)
if equal:
return True
else:
return False
def shape_to_size(shape):
shape = np.array(shape)
shape = shape[shape > 0]
return np.int(np.max([shape.prod(), 1]))
def shape_to_ndim(shape, squeeze=False):
if squeeze:
shape = filter(lambda x: not (np.any(np.in1d(x, [0, 1]))), list(shape))
return len(shape)
def linspace_broadcast(start, stop, num_steps, maxdims=3):
x_star = np.linspace(0, 1, num_steps)
dims = 0
x = None
while x is None and dims < maxdims:
try:
x = (x_star[:, None] * (stop - start) + start)
except BaseException:
x_star = x_star[:, np.newaxis]
dims = dims + 1
return x
def squeeze_array_to_scalar(arr):
arr = np.array(arr)
if arr.size == 1:
return arr
elif np.all(arr == arr[0]):
return arr[0]
else:
return arr
def assert_arrays(params, shape=None, transpose=False):
# type: (object, object) -> object
if shape is None or \
not (isinstance(shape, tuple)
and len(shape) in range(3) and np.all([isinstance(s, (int, np.int)) for s in shape])):
shape = None
shapes = [] # list of all unique shapes
n_shapes = [] # list of all unique shapes' frequencies
size = 0 # initial shape
else:
size = shape_to_size(shape)
for ip in range(len(params)):
# Convert all accepted types to np arrays:
if isinstance(params[ip], np.ndarray):
pass
elif isinstance(params[ip], (list, tuple)):
# assuming a list or tuple of symbols...
params[ip] = np.array(params[ip]).astype(type(params[ip][0]))
elif isinstance(params[ip], (float, int, long, complex, np.number)):
params[ip] = np.array(params[ip])
else:
try:
import sympy
except BaseException:
raise_import_error("sympy import failed")
if isinstance(params[ip], tuple(sympy.core.all_classes)):
params[ip] = np.array(params[ip])
else:
raise_value_error("Input " + str(params[ip]) + " of type " + str(type(params[ip])) + " is not numeric, "
"of type np.ndarray, nor Symbol")
if shape is None:
# Only one size > 1 is acceptable
if params[ip].size != size:
if size > 1 and params[ip].size > 1:
raise_value_error(
"Inputs are of at least two distinct sizes > 1")
elif params[ip].size > size:
size = params[ip].size
# Construct a kind of histogram of all different shapes of the
# inputs:
ind = np.array([(x == params[ip].shape) for x in shapes])
if np.any(ind):
ind = np.where(ind)[0]
# TODO: handle this properly
n_shapes[int(ind)] += 1
else:
shapes.append(params[ip].shape)
n_shapes.append(1)
else:
if params[ip].size > size:
raise_value_error(
"At least one input is of a greater size than the one given!")
if shape is None:
# Keep only shapes of the correct size
ind = np.array([shape_to_size(s) == size for s in shapes])
shapes = np.array(shapes)[ind]
n_shapes = np.array(n_shapes)[ind]
# Find the most frequent shape
ind = np.argmax(n_shapes)
shape = tuple(shapes[ind])
if transpose and len(shape) > 1:
if (transpose is "horizontal" or "row" and shape[0] > shape[1]) or \
(transpose is "vertical" or "column" and shape[0] < shape[1]):
shape = list(shape)
temp = shape[1]
shape[1] = shape[0]
shape[0] = temp
shape = tuple(shape)
# Now reshape or tile when necessary
for ip in range(len(params)):
try:
if params[ip].shape != shape:
if params[ip].size in [0, 1]:
params[ip] = np.tile(params[ip], shape)
else:
params[ip] = np.reshape(params[ip], shape)
except BaseException:
# TODO: maybe make this an explicit message
logger.info("\n\nwhat the fuck??")
if len(params) == 1:
return params[0]
else:
return tuple(params)
def make_float(x, precision="64"):
if isinstance(x, np.ndarray):
if isequal_string(precision, "64"):
return x.astype(np.float64)
elif isequal_string(precision, "32"):
return x.astype(np.float32)
else:
return x.astype(np.float)
else:
if isequal_string(precision, "64"):
return np.float64(x)
elif isequal_string(precision, "32"):
np.float32(x)
else:
return np.float(x)
def make_int(x, precision="64"):
if isinstance(x, np.ndarray):
if isequal_string(precision, "64"):
return x.astype(np.int64)
elif isequal_string(precision, "32"):
return x.astype(np.int32)
else:
return x.astype(np.int)
else:
if isequal_string(precision, "64"):
return np.int64(x)
elif isequal_string(precision, "32"):
np.int32(x)
else:
return np.int(x)
def copy_object_attributes(
obj1, obj2, attr1, attr2=None, deep_copy=False, check_none=False):
attr1 = ensure_list(attr1)
if attr2 is None:
attr2 = attr1
else:
attr2 = ensure_list(attr2)
if deep_copy:
def fcopy(
a1,
a2):
return setattr(
obj2,
a2,
deepcopy(
getattr(
obj1,
a1)))
else:
def fcopy(a1, a2):
return setattr(obj2, a2, getattr(obj1, a1))
if check_none:
for a1, a2 in zip(attr1, attr2):
if getattr(obj2, a2) is None:
fcopy(a1, a2)
else:
for a1, a2 in zip(attr1, attr2):
fcopy(a1, a2)
return obj2
| en | 0.692384 | # Data structure manipulations and conversions Special json encoder for numpy types # This is the fix A formal string representation for an object. :param attr_dict: dictionary attribute_name: attribute_value :param instance: Instance to read class name from it :param obj: Python object to introspect :return: dictionary after recursively taking obj fields and their values :x: a list or np vector :lbl: a list or np vector of labels :return: dictionary # if iterable # if not iterable # This function is meant to confirm that two objects assumingly of the # same type are equal, i.e., identical # logger.error("\n\nValueError: Original and read object field "+ attr + " not equal!") # raise_value_error("\n\nOriginal and read object field " + attr + " not equal!") # print attributes_dict[attribute] # TODO: a better hack for the stupid case of an ndarray of a string, such as model.zmode or pmode # For non numeric types # For numeric numpy arrays: # TODO: handle better accuracy differences, empty matrices and # complex numbers... # For numeric scalar types # type: (object, object) -> object # list of all unique shapes # list of all unique shapes' frequencies # initial shape # Convert all accepted types to np arrays: # assuming a list or tuple of symbols... # Only one size > 1 is acceptable # Construct a kind of histogram of all different shapes of the # inputs: # TODO: handle this properly # Keep only shapes of the correct size # Find the most frequent shape # Now reshape or tile when necessary # TODO: maybe make this an explicit message | 2.573698 | 3 |
aula2.py | Vitoraugustoliveira/python-tutorial | 0 | 6631192 | # Lists
list1 = []
print(type(list1))
lista = [1, 2, 3, 4]
lista2 = [5, 6, 7, 8]
matrix = [lista, lista2]
print(matrix)
print(matrix[0][1])
soma_lista = lista + lista2
print(soma_lista)
lista.append(85)
print(lista)
print(lista[-5])
print(lista[0])
lista.append("XABLAU")
print(lista)
lista.append(lista2)
print(lista)
print(lista.index("XABLAU"))
del lista[lista.index("XABLAU")]
del lista[0]
lista.pop()
print(lista)
#lista = [1, 2, 3, 4]
# pos - -> value
# 0 - -> 1
# 1 - -> 2
# 2 - -> 3
# 3 - -> 4
# -1 - -> 4
# -2 - -> 3
# -3 - -> 2
# -4 - -> 1
# Dict
# Tuples
# Sets
| # Lists
list1 = []
print(type(list1))
lista = [1, 2, 3, 4]
lista2 = [5, 6, 7, 8]
matrix = [lista, lista2]
print(matrix)
print(matrix[0][1])
soma_lista = lista + lista2
print(soma_lista)
lista.append(85)
print(lista)
print(lista[-5])
print(lista[0])
lista.append("XABLAU")
print(lista)
lista.append(lista2)
print(lista)
print(lista.index("XABLAU"))
del lista[lista.index("XABLAU")]
del lista[0]
lista.pop()
print(lista)
#lista = [1, 2, 3, 4]
# pos - -> value
# 0 - -> 1
# 1 - -> 2
# 2 - -> 3
# 3 - -> 4
# -1 - -> 4
# -2 - -> 3
# -3 - -> 2
# -4 - -> 1
# Dict
# Tuples
# Sets
| en | 0.480117 | # Lists #lista = [1, 2, 3, 4] # pos - -> value # 0 - -> 1 # 1 - -> 2 # 2 - -> 3 # 3 - -> 4 # -1 - -> 4 # -2 - -> 3 # -3 - -> 2 # -4 - -> 1 # Dict # Tuples # Sets | 3.833946 | 4 |
tests/common/test_case.py | tjaffri/paraphrase-id-tensorflow | 354 | 6631193 | # pylint: disable=invalid-name,protected-access
from unittest import TestCase
import codecs
import logging
import os
import shutil
import tensorflow as tf
class DuplicateTestCase(TestCase):
TEST_DIR = './TMP_TEST/'
TRAIN_FILE = TEST_DIR + 'train_file'
VALIDATION_FILE = TEST_DIR + 'validation_file'
TEST_FILE = TEST_DIR + 'test_file'
VECTORS_FILE = TEST_DIR + 'vectors_file'
def setUp(self):
logging.basicConfig(format=('%(asctime)s - %(levelname)s - '
'%(name)s - %(message)s'),
level=logging.INFO)
os.makedirs(self.TEST_DIR, exist_ok=True)
def tearDown(self):
tf.reset_default_graph()
shutil.rmtree(self.TEST_DIR)
def write_duplicate_questions_train_file(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as dupe_train_file:
dupe_train_file.write("\"1\",\"1\",\"2\",\"question1\","
"\"question2 question3pad\",\"0\"\n")
dupe_train_file.write("\"2\",\"3\",\"4\",\"question4\","
"\"question5\",\"1\"\n")
dupe_train_file.write("\"3\",\"5\",\"6\",\"question6\","
"\"question7\",\"0\"\n")
def write_duplicate_questions_validation_file(self):
with codecs.open(self.VALIDATION_FILE, 'w',
'utf-8') as dupe_val_file:
dupe_val_file.write("\"1\",\"7\",\"8\",\"question1\","
"\"question2 question8\",\"0\"\n")
dupe_val_file.write("\"2\",\"9\",\"10\",\"question9\","
"\"question10\",\"1\"\n")
dupe_val_file.write("\"3\",\"11\",\"12\",\"question6\","
"\"question7 question11 question12\","
"\"0\"\n")
def write_duplicate_questions_test_file(self):
with codecs.open(self.TEST_FILE, 'w', 'utf-8') as dupe_test_file:
dupe_test_file.write("\"1\",\"question1 questionunk1 question1\","
"\"questionunk2\"\n")
dupe_test_file.write("\"2\",\"question3pad\","
"\"question4 questionunk3\"\n")
dupe_test_file.write("\"3\",\"question5\",\"question6\"\n")
def write_vector_file(self):
with codecs.open(self.VECTORS_FILE, 'w', 'utf-8') as vectors_file:
vectors_file.write("word1 0.0 1.1 0.2\n")
vectors_file.write("word2 0.1 0.4 -4.0\n")
| # pylint: disable=invalid-name,protected-access
from unittest import TestCase
import codecs
import logging
import os
import shutil
import tensorflow as tf
class DuplicateTestCase(TestCase):
TEST_DIR = './TMP_TEST/'
TRAIN_FILE = TEST_DIR + 'train_file'
VALIDATION_FILE = TEST_DIR + 'validation_file'
TEST_FILE = TEST_DIR + 'test_file'
VECTORS_FILE = TEST_DIR + 'vectors_file'
def setUp(self):
logging.basicConfig(format=('%(asctime)s - %(levelname)s - '
'%(name)s - %(message)s'),
level=logging.INFO)
os.makedirs(self.TEST_DIR, exist_ok=True)
def tearDown(self):
tf.reset_default_graph()
shutil.rmtree(self.TEST_DIR)
def write_duplicate_questions_train_file(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as dupe_train_file:
dupe_train_file.write("\"1\",\"1\",\"2\",\"question1\","
"\"question2 question3pad\",\"0\"\n")
dupe_train_file.write("\"2\",\"3\",\"4\",\"question4\","
"\"question5\",\"1\"\n")
dupe_train_file.write("\"3\",\"5\",\"6\",\"question6\","
"\"question7\",\"0\"\n")
def write_duplicate_questions_validation_file(self):
with codecs.open(self.VALIDATION_FILE, 'w',
'utf-8') as dupe_val_file:
dupe_val_file.write("\"1\",\"7\",\"8\",\"question1\","
"\"question2 question8\",\"0\"\n")
dupe_val_file.write("\"2\",\"9\",\"10\",\"question9\","
"\"question10\",\"1\"\n")
dupe_val_file.write("\"3\",\"11\",\"12\",\"question6\","
"\"question7 question11 question12\","
"\"0\"\n")
def write_duplicate_questions_test_file(self):
with codecs.open(self.TEST_FILE, 'w', 'utf-8') as dupe_test_file:
dupe_test_file.write("\"1\",\"question1 questionunk1 question1\","
"\"questionunk2\"\n")
dupe_test_file.write("\"2\",\"question3pad\","
"\"question4 questionunk3\"\n")
dupe_test_file.write("\"3\",\"question5\",\"question6\"\n")
def write_vector_file(self):
with codecs.open(self.VECTORS_FILE, 'w', 'utf-8') as vectors_file:
vectors_file.write("word1 0.0 1.1 0.2\n")
vectors_file.write("word2 0.1 0.4 -4.0\n")
| en | 0.261104 | # pylint: disable=invalid-name,protected-access | 2.669614 | 3 |
convert to gray/convertToGray.py | Jerry0424/NDHU_ImageProcessing | 0 | 6631194 |
'''
Transform a color image into gray image using the conversion formula.
Show the pictures using matplotlib.
'''
# use matplotlib to help get the image and show the images
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# get the color image
img = mpimg.imread('lena.png')
# put the color image into a subplot
plt.subplot(2,1,1)
plt.imshow(img)
# Convert the color image into grayscale using the formula which adjusts the values of RGB
R, G, B = img[:,:,0], img[:,:,1], img[:,:,2]
imgGray = 0.299 * R + 0.587 * G + 0.114 * B
# put the changed image which now is grayscale into the other subplot
plt.subplot(2,1,2)
plt.imshow(imgGray, cmap='gray')
# show the images
plt.show()
|
'''
Transform a color image into gray image using the conversion formula.
Show the pictures using matplotlib.
'''
# use matplotlib to help get the image and show the images
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# get the color image
img = mpimg.imread('lena.png')
# put the color image into a subplot
plt.subplot(2,1,1)
plt.imshow(img)
# Convert the color image into grayscale using the formula which adjusts the values of RGB
R, G, B = img[:,:,0], img[:,:,1], img[:,:,2]
imgGray = 0.299 * R + 0.587 * G + 0.114 * B
# put the changed image which now is grayscale into the other subplot
plt.subplot(2,1,2)
plt.imshow(imgGray, cmap='gray')
# show the images
plt.show()
| en | 0.782724 | Transform a color image into gray image using the conversion formula. Show the pictures using matplotlib. # use matplotlib to help get the image and show the images # get the color image # put the color image into a subplot # Convert the color image into grayscale using the formula which adjusts the values of RGB # put the changed image which now is grayscale into the other subplot # show the images | 4.134772 | 4 |
moncli/column_value/base.py | harryrobbins/moncli | 40 | 6631195 | import json, copy
from schematics.transforms import blacklist
from schematics.types import StringType
from .. import entities as en, ColumnValueError
from .constants import SIMPLE_NULL_VALUE, COMPLEX_NULL_VALUE
class _ColumnValue(en.BaseColumn):
"""Base column value model"""
def __init__(self, **kwargs):
self.text = kwargs.pop('text', None)
self.additional_info = kwargs.pop('additional_info', None)
super().__init__(**kwargs)
class ColumnValue(_ColumnValue):
"""The value of an items column.
Properties
additional_info : `json`
The column value's additional information.
id : `str`
The column's unique identifier.
text : `str`
The column's textual value in string form.
title : `str`
The columns title.
value : `any`
The column's value in a Python native format.
settings_str: `str`
The column's unique settings.
Methods
format : `dict`
Format for column value update.
set_value : `void`
Sets the value of the column.
"""
null_value = None
read_only = False
allow_casts = ()
native_type = None
native_default = None
def __init__(self, **kwargs):
value = kwargs.pop('value', None)
super().__init__(**kwargs)
# Set serialized configured null value if no value.
if value and value != self.null_value:
value = json.loads(value)
self._value = self._convert(value)
else:
self._value = copy.deepcopy(self.native_default)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if self.read_only:
raise ColumnValueError('readonly_column_set', self.id, 'Cannot update value of read-only column "{}".'.format(self.title))
if isinstance(value, self.allow_casts):
self._value = self._cast(value)
elif value == self.native_default or isinstance(value, self.native_type):
self._value = value
elif not value:
self._value = copy.deepcopy(self.native_default)
else:
raise ColumnValueError('invalid_column_value', self.id,
'Unable to set value "{}" to column "{}".'.format(value, self.title))
@property
def settings(self):
return json.loads(self.settings_str)
@property
def additional_info_map(self):
return json.dumps(self.additional_info)
def format(self):
if self.read_only:
raise ColumnValueError(
'readonly_column_format',
self.id,
'Cannot format value for read-only column "{}".'.format(self.title))
if self.value == self.native_default:
return self.null_value
return self._format()
def to_primitive(self):
return dict(
id=self.id,
title=self.title,
text=self.text,
additional_info=self.additional_info,
value=self.value)
def __repr__(self):
return str({
'id': self.id,
'title': self.title,
'value': self.value
})
def _convert(self, value):
return value
def _cast(self, value):
return self.native_type(value)
def _format(self):
return str(self.value)
class SimpleNullValue(ColumnValue):
null_value = SIMPLE_NULL_VALUE
class ComplexNullValue(ColumnValue):
null_value = COMPLEX_NULL_VALUE
| import json, copy
from schematics.transforms import blacklist
from schematics.types import StringType
from .. import entities as en, ColumnValueError
from .constants import SIMPLE_NULL_VALUE, COMPLEX_NULL_VALUE
class _ColumnValue(en.BaseColumn):
"""Base column value model"""
def __init__(self, **kwargs):
self.text = kwargs.pop('text', None)
self.additional_info = kwargs.pop('additional_info', None)
super().__init__(**kwargs)
class ColumnValue(_ColumnValue):
"""The value of an items column.
Properties
additional_info : `json`
The column value's additional information.
id : `str`
The column's unique identifier.
text : `str`
The column's textual value in string form.
title : `str`
The columns title.
value : `any`
The column's value in a Python native format.
settings_str: `str`
The column's unique settings.
Methods
format : `dict`
Format for column value update.
set_value : `void`
Sets the value of the column.
"""
null_value = None
read_only = False
allow_casts = ()
native_type = None
native_default = None
def __init__(self, **kwargs):
value = kwargs.pop('value', None)
super().__init__(**kwargs)
# Set serialized configured null value if no value.
if value and value != self.null_value:
value = json.loads(value)
self._value = self._convert(value)
else:
self._value = copy.deepcopy(self.native_default)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if self.read_only:
raise ColumnValueError('readonly_column_set', self.id, 'Cannot update value of read-only column "{}".'.format(self.title))
if isinstance(value, self.allow_casts):
self._value = self._cast(value)
elif value == self.native_default or isinstance(value, self.native_type):
self._value = value
elif not value:
self._value = copy.deepcopy(self.native_default)
else:
raise ColumnValueError('invalid_column_value', self.id,
'Unable to set value "{}" to column "{}".'.format(value, self.title))
@property
def settings(self):
return json.loads(self.settings_str)
@property
def additional_info_map(self):
return json.dumps(self.additional_info)
def format(self):
if self.read_only:
raise ColumnValueError(
'readonly_column_format',
self.id,
'Cannot format value for read-only column "{}".'.format(self.title))
if self.value == self.native_default:
return self.null_value
return self._format()
def to_primitive(self):
return dict(
id=self.id,
title=self.title,
text=self.text,
additional_info=self.additional_info,
value=self.value)
def __repr__(self):
return str({
'id': self.id,
'title': self.title,
'value': self.value
})
def _convert(self, value):
return value
def _cast(self, value):
return self.native_type(value)
def _format(self):
return str(self.value)
class SimpleNullValue(ColumnValue):
null_value = SIMPLE_NULL_VALUE
class ComplexNullValue(ColumnValue):
null_value = COMPLEX_NULL_VALUE
| en | 0.3477 | Base column value model The value of an items column. Properties additional_info : `json` The column value's additional information. id : `str` The column's unique identifier. text : `str` The column's textual value in string form. title : `str` The columns title. value : `any` The column's value in a Python native format. settings_str: `str` The column's unique settings. Methods format : `dict` Format for column value update. set_value : `void` Sets the value of the column. # Set serialized configured null value if no value. | 2.318736 | 2 |
flask_filer/api.py | BbsonLin/flask-filer | 1 | 6631196 | <reponame>BbsonLin/flask-filer<gh_stars>1-10
import os
import logging
from flask import json, jsonify, request, send_file, current_app
from flask.views import MethodView
from werkzeug.utils import secure_filename
from .utils import get_dirlist, get_info, open_file
from .exceptions import InvalidPathError
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
LOG.addHandler(logging.StreamHandler())
class BrowseAPI(MethodView):
def get(self, path='/'):
filer_list = get_dirlist(path)
LOG.debug(filer_list)
return json.dumps(get_info(filer_list))
class DownloadAPI(MethodView):
def get(self, path=None):
fp = open_file(path)
LOG.debug(fp)
return send_file(fp, as_attachment=True, attachment_filename=os.path.basename(path))
class UploadAPI(MethodView):
def post(self, path=''):
target = os.path.join(current_app.config['FILER_ROOT_PATH'], path)
LOG.debug(target)
if not os.path.isdir(target):
raise InvalidPathError(path=target)
else:
for uploaded_file in request.files.getlist('file'):
file_path = os.path.join(target, secure_filename(uploaded_file.filename))
uploaded_file.save(file_path)
return jsonify(msg='upload successed')
| import os
import logging
from flask import json, jsonify, request, send_file, current_app
from flask.views import MethodView
from werkzeug.utils import secure_filename
from .utils import get_dirlist, get_info, open_file
from .exceptions import InvalidPathError
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
LOG.addHandler(logging.StreamHandler())
class BrowseAPI(MethodView):
def get(self, path='/'):
filer_list = get_dirlist(path)
LOG.debug(filer_list)
return json.dumps(get_info(filer_list))
class DownloadAPI(MethodView):
def get(self, path=None):
fp = open_file(path)
LOG.debug(fp)
return send_file(fp, as_attachment=True, attachment_filename=os.path.basename(path))
class UploadAPI(MethodView):
def post(self, path=''):
target = os.path.join(current_app.config['FILER_ROOT_PATH'], path)
LOG.debug(target)
if not os.path.isdir(target):
raise InvalidPathError(path=target)
else:
for uploaded_file in request.files.getlist('file'):
file_path = os.path.join(target, secure_filename(uploaded_file.filename))
uploaded_file.save(file_path)
return jsonify(msg='upload successed') | none | 1 | 2.232646 | 2 |
|
src/lgr/migrations/0001_initial.py | b4ckspace/lgr | 0 | 6631197 | # Generated by Django 3.0 on 2019-12-07 21:36
from django.db import migrations, models
import django.db.models.deletion
import lgr.mixin
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Barcode",
fields=[
(
"code",
models.CharField(
max_length=64, primary_key=True, serialize=False, unique=True
),
),
("description", models.TextField(blank=True, default="")),
],
bases=(lgr.mixin.BarcodeHistoryMixin, models.Model),
),
migrations.CreateModel(
name="Person",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("nickname", models.CharField(max_length=64)),
("firstname", models.CharField(blank=True, default="", max_length=64)),
("lastname", models.CharField(blank=True, default="", max_length=64)),
("email", models.EmailField(blank=True, default="", max_length=254)),
],
),
migrations.CreateModel(
name="Tag",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name="Loan",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("description", models.TextField(blank=True)),
(
"status",
models.CharField(
choices=[("taken", "taken"), ("returned", "returned")],
default="taken",
max_length=10,
),
),
("taken_date", models.DateTimeField()),
("return_date", models.DateTimeField(blank=True, null=True)),
("returned_date", models.DateTimeField(blank=True, null=True)),
(
"barcodes",
models.ManyToManyField(related_name="loans", to="lgr.Barcode"),
),
(
"person",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="loans",
to="lgr.Person",
),
),
],
),
migrations.CreateModel(
name="Item",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=128, unique=True)),
("description", models.TextField(blank=True)),
("tags", models.ManyToManyField(blank=True, to="lgr.Tag")),
],
),
migrations.CreateModel(
name="History",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("message", models.CharField(max_length=1024)),
(
"affected",
models.ManyToManyField(related_name="history", to="lgr.Barcode"),
),
(
"person",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="changes",
to="lgr.Person",
),
),
],
options={"verbose_name_plural": "Histories",},
),
migrations.AddField(
model_name="barcode",
name="item",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="barcodes",
to="lgr.Item",
),
),
migrations.AddField(
model_name="barcode",
name="owner",
field=models.ForeignKey(
default=None,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="barcodes",
to="lgr.Person",
),
),
migrations.AddField(
model_name="barcode",
name="parent",
field=models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="children",
to="lgr.Barcode",
),
),
]
| # Generated by Django 3.0 on 2019-12-07 21:36
from django.db import migrations, models
import django.db.models.deletion
import lgr.mixin
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Barcode",
fields=[
(
"code",
models.CharField(
max_length=64, primary_key=True, serialize=False, unique=True
),
),
("description", models.TextField(blank=True, default="")),
],
bases=(lgr.mixin.BarcodeHistoryMixin, models.Model),
),
migrations.CreateModel(
name="Person",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("nickname", models.CharField(max_length=64)),
("firstname", models.CharField(blank=True, default="", max_length=64)),
("lastname", models.CharField(blank=True, default="", max_length=64)),
("email", models.EmailField(blank=True, default="", max_length=254)),
],
),
migrations.CreateModel(
name="Tag",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name="Loan",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("description", models.TextField(blank=True)),
(
"status",
models.CharField(
choices=[("taken", "taken"), ("returned", "returned")],
default="taken",
max_length=10,
),
),
("taken_date", models.DateTimeField()),
("return_date", models.DateTimeField(blank=True, null=True)),
("returned_date", models.DateTimeField(blank=True, null=True)),
(
"barcodes",
models.ManyToManyField(related_name="loans", to="lgr.Barcode"),
),
(
"person",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="loans",
to="lgr.Person",
),
),
],
),
migrations.CreateModel(
name="Item",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=128, unique=True)),
("description", models.TextField(blank=True)),
("tags", models.ManyToManyField(blank=True, to="lgr.Tag")),
],
),
migrations.CreateModel(
name="History",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("message", models.CharField(max_length=1024)),
(
"affected",
models.ManyToManyField(related_name="history", to="lgr.Barcode"),
),
(
"person",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="changes",
to="lgr.Person",
),
),
],
options={"verbose_name_plural": "Histories",},
),
migrations.AddField(
model_name="barcode",
name="item",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="barcodes",
to="lgr.Item",
),
),
migrations.AddField(
model_name="barcode",
name="owner",
field=models.ForeignKey(
default=None,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="barcodes",
to="lgr.Person",
),
),
migrations.AddField(
model_name="barcode",
name="parent",
field=models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="children",
to="lgr.Barcode",
),
),
]
| en | 0.872726 | # Generated by Django 3.0 on 2019-12-07 21:36 | 1.714183 | 2 |
robotics_project/scripts/demo/manipulation_client.py | hect1995/Robotics_intro | 0 | 6631198 | <filename>robotics_project/scripts/demo/manipulation_client.py
#!/usr/bin/env python
# Copyright (c) 2016 PAL Robotics SL. All Rights Reserved
#
# Permission to use, copy, modify, and/or distribute this software for
# any purpose with or without fee is hereby granted, provided that the
# above copyright notice and this permission notice appear in all
# copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Author:
# * <NAME>
# * <NAME>
# * <NAME>
import rospy
import time
from robotics_project.msg import PickUpPoseAction, PickUpPoseGoal
from geometry_msgs.msg import PoseStamped, Pose
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from play_motion_msgs.msg import PlayMotionAction, PlayMotionGoal
from actionlib import SimpleActionClient
from robotics_project.srv import MoveHead, MoveHeadRequest, MoveHeadResponse
import tf2_ros
from tf2_geometry_msgs import do_transform_pose
import numpy as np
from std_srvs.srv import Empty, SetBool, SetBoolResponse
import cv2
from cv_bridge import CvBridge
from moveit_msgs.msg import MoveItErrorCodes
moveit_error_dict = {}
for name in MoveItErrorCodes.__dict__.keys():
if not name[:1] == '_':
code = MoveItErrorCodes.__dict__[name]
moveit_error_dict[code] = name
class SphericalService(object):
def __init__(self):
rospy.loginfo("Starting Spherical Grab Service")
self.pick_srv_nm = rospy.get_param(rospy.get_name() + '/pick_srv')
self.place_srv_nm = rospy.get_param(rospy.get_name() + '/place_srv')
self.mv_head_srv_nm = rospy.get_param(rospy.get_name() + '/move_head_srv')
self.place_gui = rospy.Service(self.place_srv_nm, SetBool, self.start_aruco_place)
self.pick_gui = rospy.Service(self.pick_srv_nm, SetBool, self.start_aruco_pick)
self.move_head_srv = rospy.Service(self.mv_head_srv_nm, MoveHead, self.move_head)
self.head_cmd = rospy.Publisher('/head_controller/command', JointTrajectory, queue_size=1)
rospy.loginfo("Launching SphericalService constructor")
self.pick_type = ManipulateAruco()
def start_aruco_pick(self, req):
success = self.pick_type.pick_and_place_aruco("pick")
reply = SetBoolResponse()
reply.success = success
reply.message = ""
return reply
def start_aruco_place(self, req):
success = self.pick_type.pick_and_place_aruco("place")
reply = SetBoolResponse()
reply.success = success
reply.message = ""
return reply
def move_head(self, req):
jt = JointTrajectory()
jt.joint_names = ['head_1_joint', 'head_2_joint']
jtp = JointTrajectoryPoint()
response = MoveHeadResponse()
if req.motion == "down":
jtp.positions = [0.0, -0.75]
response.success = True
elif req.motion == "up":
jtp.positions = [0.0, 0.0]
response.success = True
else:
response.success = False
jtp.time_from_start = rospy.Duration(2.0)
jt.points.append(jtp)
rospy.loginfo("Moving head " + req.motion)
self.head_cmd.publish(jt)
rospy.loginfo("Done.")
return response
class ManipulateAruco(object):
def __init__(self):
rospy.loginfo("Initalizing ManipulateAruco...")
self.aruco_pose_top = rospy.get_param(rospy.get_name() + '/marker_pose_topic')
self.pickup_pose_top = rospy.get_param(rospy.get_name() + '/pickup_marker_pose')
self.place_pose_top = rospy.get_param(rospy.get_name() + '/place_marker_pose')
self.bridge = CvBridge()
self.tfBuffer = tf2_ros.Buffer()
self.tf_l = tf2_ros.TransformListener(self.tfBuffer)
rospy.loginfo("Waiting for /pickup_pose AS...")
self.pick_as = SimpleActionClient(self.pickup_pose_top, PickUpPoseAction)
self.pick_as.wait_for_server()
rospy.loginfo("Waiting for /place_pose AS...")
self.place_as = SimpleActionClient(self.place_pose_top, PickUpPoseAction)
self.place_as.wait_for_server()
rospy.loginfo("Waiting for '/play_motion' AS...")
self.play_m_as = SimpleActionClient('/play_motion', PlayMotionAction)
if not self.play_m_as.wait_for_server(rospy.Duration(300)):
rospy.logerr("Could not connect to /play_motion AS")
exit()
rospy.loginfo("Connected!")
rospy.sleep(1.0)
rospy.loginfo("Setting publishers to torso and head controller...")
self.torso_cmd = rospy.Publisher(
'/torso_controller/command', JointTrajectory, queue_size=1)
self.detected_pose_pub = rospy.Publisher('/detected_aruco_pose',
PoseStamped,
queue_size=1,
latch=True)
self.aruco_pose_rcv = False
self.aruco_pose_subs = rospy.Subscriber(self.aruco_pose_top, PoseStamped, self.aruco_pose_cb)
self.pick_g = PickUpPoseGoal()
rospy.loginfo("Done initializing ManipulateAruco.")
def strip_leading_slash(self, s):
return s[1:] if s.startswith("/") else s
def pick_and_place_aruco(self, string_operation):
success = False
if string_operation == "pick":
self.prepare_robot_pandp()
rospy.sleep(2.0)
while not rospy.is_shutdown() and self.aruco_pose_rcv == False:
rospy.loginfo("spherical_grasp_gui: Waiting for an aruco detection...")
rospy.sleep(1.0)
aruco_pose = self.aruco_pose
aruco_pose.header.frame_id = self.strip_leading_slash(aruco_pose.header.frame_id)
rospy.loginfo("Got: " + str(aruco_pose))
rospy.loginfo("spherical_grasp_gui: Transforming from frame: " +
aruco_pose.header.frame_id + " to 'base_footprint'")
ps = PoseStamped()
ps.pose.position = aruco_pose.pose.position
ps.header.stamp = self.tfBuffer.get_latest_common_time("base_footprint", aruco_pose.header.frame_id)
ps.header.frame_id = aruco_pose.header.frame_id
transform_ok = False
while not transform_ok and not rospy.is_shutdown():
try:
transform = self.tfBuffer.lookup_transform("base_footprint",
ps.header.frame_id,
rospy.Time(0))
aruco_ps = do_transform_pose(ps, transform)
transform_ok = True
except tf2_ros.ExtrapolationException as e:
rospy.logwarn(
"Exception on transforming point... trying again \n(" +
str(e) + ")")
rospy.sleep(0.01)
ps.header.stamp = self.tfBuffer.get_latest_common_time("base_footprint", aruco_pose.header.frame_id)
rospy.loginfo("Setting cube pose based on Aruco detection")
self.pick_g.object_pose.pose.position = aruco_ps.pose.position
self.pick_g.object_pose.pose.position.z -= 0.1*(1.0/2.0)
rospy.loginfo("aruco pose in base_footprint:" + str(self.pick_g))
self.pick_g.object_pose.header.frame_id = 'base_footprint'
self.pick_g.object_pose.pose.orientation.w = 1.0
self.detected_pose_pub.publish(self.pick_g.object_pose)
rospy.loginfo("Gonna pick:" + str(self.pick_g))
self.pick_as.send_goal_and_wait(self.pick_g)
rospy.loginfo("Done!")
result = self.pick_as.get_result()
if str(moveit_error_dict[result.error_code]) != "SUCCESS":
rospy.logerr("Failed to pick, not trying further")
success = False
else:
success = True
self.prepare_robot_nav()
return success
if string_operation == "place":
# Place the object on table in front
rospy.loginfo("Placing aruco marker")
self.place_as.send_goal_and_wait(self.pick_g)
rospy.loginfo("Done!")
result = self.place_as.get_result()
if str(moveit_error_dict[result.error_code]) != "SUCCESS":
rospy.logerr("Failed to place, not trying further")
success = False
else:
success = True
return success
def move_torso(self, string_operation):
jt = JointTrajectory()
jt.joint_names = ['torso_lift_joint']
jtp = JointTrajectoryPoint()
if string_operation == "lift":
jtp.positions = [0.34]
elif string_operation == "lower":
jtp.positions = [0.15]
else:
return
jtp.time_from_start = rospy.Duration(2.5)
jt.points.append(jtp)
rospy.loginfo("Moving torso " + string_operation)
self.torso_cmd.publish(jt)
def prepare_robot_pandp(self):
rospy.loginfo("Unfold arm safely")
pmg = PlayMotionGoal()
pmg.motion_name = 'pregrasp'
pmg.skip_planning = False
self.play_m_as.send_goal_and_wait(pmg)
rospy.loginfo("Done.")
rospy.loginfo("Robot prepared.")
def prepare_robot_nav(self):
# Move torso to its maximum height
self.move_torso("lift")
# Raise arm
rospy.loginfo("Moving arm to a safe pose")
pmg = PlayMotionGoal()
pmg.motion_name = 'pick_final_pose'
pmg.skip_planning = False
self.play_m_as.send_goal_and_wait(pmg)
rospy.loginfo("Raise object done.")
def aruco_pose_cb(self, aruco_pose_msg):
self.aruco_pose = aruco_pose_msg
self.aruco_pose_rcv = True
if __name__ == '__main__':
rospy.init_node('manipulation_client')
sphere = SphericalService()
rospy.spin()
| <filename>robotics_project/scripts/demo/manipulation_client.py
#!/usr/bin/env python
# Copyright (c) 2016 PAL Robotics SL. All Rights Reserved
#
# Permission to use, copy, modify, and/or distribute this software for
# any purpose with or without fee is hereby granted, provided that the
# above copyright notice and this permission notice appear in all
# copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Author:
# * <NAME>
# * <NAME>
# * <NAME>
import rospy
import time
from robotics_project.msg import PickUpPoseAction, PickUpPoseGoal
from geometry_msgs.msg import PoseStamped, Pose
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from play_motion_msgs.msg import PlayMotionAction, PlayMotionGoal
from actionlib import SimpleActionClient
from robotics_project.srv import MoveHead, MoveHeadRequest, MoveHeadResponse
import tf2_ros
from tf2_geometry_msgs import do_transform_pose
import numpy as np
from std_srvs.srv import Empty, SetBool, SetBoolResponse
import cv2
from cv_bridge import CvBridge
from moveit_msgs.msg import MoveItErrorCodes
moveit_error_dict = {}
for name in MoveItErrorCodes.__dict__.keys():
if not name[:1] == '_':
code = MoveItErrorCodes.__dict__[name]
moveit_error_dict[code] = name
class SphericalService(object):
def __init__(self):
rospy.loginfo("Starting Spherical Grab Service")
self.pick_srv_nm = rospy.get_param(rospy.get_name() + '/pick_srv')
self.place_srv_nm = rospy.get_param(rospy.get_name() + '/place_srv')
self.mv_head_srv_nm = rospy.get_param(rospy.get_name() + '/move_head_srv')
self.place_gui = rospy.Service(self.place_srv_nm, SetBool, self.start_aruco_place)
self.pick_gui = rospy.Service(self.pick_srv_nm, SetBool, self.start_aruco_pick)
self.move_head_srv = rospy.Service(self.mv_head_srv_nm, MoveHead, self.move_head)
self.head_cmd = rospy.Publisher('/head_controller/command', JointTrajectory, queue_size=1)
rospy.loginfo("Launching SphericalService constructor")
self.pick_type = ManipulateAruco()
def start_aruco_pick(self, req):
success = self.pick_type.pick_and_place_aruco("pick")
reply = SetBoolResponse()
reply.success = success
reply.message = ""
return reply
def start_aruco_place(self, req):
success = self.pick_type.pick_and_place_aruco("place")
reply = SetBoolResponse()
reply.success = success
reply.message = ""
return reply
def move_head(self, req):
jt = JointTrajectory()
jt.joint_names = ['head_1_joint', 'head_2_joint']
jtp = JointTrajectoryPoint()
response = MoveHeadResponse()
if req.motion == "down":
jtp.positions = [0.0, -0.75]
response.success = True
elif req.motion == "up":
jtp.positions = [0.0, 0.0]
response.success = True
else:
response.success = False
jtp.time_from_start = rospy.Duration(2.0)
jt.points.append(jtp)
rospy.loginfo("Moving head " + req.motion)
self.head_cmd.publish(jt)
rospy.loginfo("Done.")
return response
class ManipulateAruco(object):
def __init__(self):
rospy.loginfo("Initalizing ManipulateAruco...")
self.aruco_pose_top = rospy.get_param(rospy.get_name() + '/marker_pose_topic')
self.pickup_pose_top = rospy.get_param(rospy.get_name() + '/pickup_marker_pose')
self.place_pose_top = rospy.get_param(rospy.get_name() + '/place_marker_pose')
self.bridge = CvBridge()
self.tfBuffer = tf2_ros.Buffer()
self.tf_l = tf2_ros.TransformListener(self.tfBuffer)
rospy.loginfo("Waiting for /pickup_pose AS...")
self.pick_as = SimpleActionClient(self.pickup_pose_top, PickUpPoseAction)
self.pick_as.wait_for_server()
rospy.loginfo("Waiting for /place_pose AS...")
self.place_as = SimpleActionClient(self.place_pose_top, PickUpPoseAction)
self.place_as.wait_for_server()
rospy.loginfo("Waiting for '/play_motion' AS...")
self.play_m_as = SimpleActionClient('/play_motion', PlayMotionAction)
if not self.play_m_as.wait_for_server(rospy.Duration(300)):
rospy.logerr("Could not connect to /play_motion AS")
exit()
rospy.loginfo("Connected!")
rospy.sleep(1.0)
rospy.loginfo("Setting publishers to torso and head controller...")
self.torso_cmd = rospy.Publisher(
'/torso_controller/command', JointTrajectory, queue_size=1)
self.detected_pose_pub = rospy.Publisher('/detected_aruco_pose',
PoseStamped,
queue_size=1,
latch=True)
self.aruco_pose_rcv = False
self.aruco_pose_subs = rospy.Subscriber(self.aruco_pose_top, PoseStamped, self.aruco_pose_cb)
self.pick_g = PickUpPoseGoal()
rospy.loginfo("Done initializing ManipulateAruco.")
def strip_leading_slash(self, s):
return s[1:] if s.startswith("/") else s
def pick_and_place_aruco(self, string_operation):
success = False
if string_operation == "pick":
self.prepare_robot_pandp()
rospy.sleep(2.0)
while not rospy.is_shutdown() and self.aruco_pose_rcv == False:
rospy.loginfo("spherical_grasp_gui: Waiting for an aruco detection...")
rospy.sleep(1.0)
aruco_pose = self.aruco_pose
aruco_pose.header.frame_id = self.strip_leading_slash(aruco_pose.header.frame_id)
rospy.loginfo("Got: " + str(aruco_pose))
rospy.loginfo("spherical_grasp_gui: Transforming from frame: " +
aruco_pose.header.frame_id + " to 'base_footprint'")
ps = PoseStamped()
ps.pose.position = aruco_pose.pose.position
ps.header.stamp = self.tfBuffer.get_latest_common_time("base_footprint", aruco_pose.header.frame_id)
ps.header.frame_id = aruco_pose.header.frame_id
transform_ok = False
while not transform_ok and not rospy.is_shutdown():
try:
transform = self.tfBuffer.lookup_transform("base_footprint",
ps.header.frame_id,
rospy.Time(0))
aruco_ps = do_transform_pose(ps, transform)
transform_ok = True
except tf2_ros.ExtrapolationException as e:
rospy.logwarn(
"Exception on transforming point... trying again \n(" +
str(e) + ")")
rospy.sleep(0.01)
ps.header.stamp = self.tfBuffer.get_latest_common_time("base_footprint", aruco_pose.header.frame_id)
rospy.loginfo("Setting cube pose based on Aruco detection")
self.pick_g.object_pose.pose.position = aruco_ps.pose.position
self.pick_g.object_pose.pose.position.z -= 0.1*(1.0/2.0)
rospy.loginfo("aruco pose in base_footprint:" + str(self.pick_g))
self.pick_g.object_pose.header.frame_id = 'base_footprint'
self.pick_g.object_pose.pose.orientation.w = 1.0
self.detected_pose_pub.publish(self.pick_g.object_pose)
rospy.loginfo("Gonna pick:" + str(self.pick_g))
self.pick_as.send_goal_and_wait(self.pick_g)
rospy.loginfo("Done!")
result = self.pick_as.get_result()
if str(moveit_error_dict[result.error_code]) != "SUCCESS":
rospy.logerr("Failed to pick, not trying further")
success = False
else:
success = True
self.prepare_robot_nav()
return success
if string_operation == "place":
# Place the object on table in front
rospy.loginfo("Placing aruco marker")
self.place_as.send_goal_and_wait(self.pick_g)
rospy.loginfo("Done!")
result = self.place_as.get_result()
if str(moveit_error_dict[result.error_code]) != "SUCCESS":
rospy.logerr("Failed to place, not trying further")
success = False
else:
success = True
return success
def move_torso(self, string_operation):
jt = JointTrajectory()
jt.joint_names = ['torso_lift_joint']
jtp = JointTrajectoryPoint()
if string_operation == "lift":
jtp.positions = [0.34]
elif string_operation == "lower":
jtp.positions = [0.15]
else:
return
jtp.time_from_start = rospy.Duration(2.5)
jt.points.append(jtp)
rospy.loginfo("Moving torso " + string_operation)
self.torso_cmd.publish(jt)
def prepare_robot_pandp(self):
rospy.loginfo("Unfold arm safely")
pmg = PlayMotionGoal()
pmg.motion_name = 'pregrasp'
pmg.skip_planning = False
self.play_m_as.send_goal_and_wait(pmg)
rospy.loginfo("Done.")
rospy.loginfo("Robot prepared.")
def prepare_robot_nav(self):
# Move torso to its maximum height
self.move_torso("lift")
# Raise arm
rospy.loginfo("Moving arm to a safe pose")
pmg = PlayMotionGoal()
pmg.motion_name = 'pick_final_pose'
pmg.skip_planning = False
self.play_m_as.send_goal_and_wait(pmg)
rospy.loginfo("Raise object done.")
def aruco_pose_cb(self, aruco_pose_msg):
self.aruco_pose = aruco_pose_msg
self.aruco_pose_rcv = True
if __name__ == '__main__':
rospy.init_node('manipulation_client')
sphere = SphericalService()
rospy.spin()
| en | 0.629898 | #!/usr/bin/env python # Copyright (c) 2016 PAL Robotics SL. All Rights Reserved # # Permission to use, copy, modify, and/or distribute this software for # any purpose with or without fee is hereby granted, provided that the # above copyright notice and this permission notice appear in all # copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # # Author: # * <NAME> # * <NAME> # * <NAME> # Place the object on table in front # Move torso to its maximum height # Raise arm | 1.916987 | 2 |
real_trade/api/coincheck/__init__.py | taka-mochi/cryptocurrency-autotrading | 3 | 6631199 | <reponame>taka-mochi/cryptocurrency-autotrading
import os
import sys
#sys.path.append(os.path.dirname(__file__))
| import os
import sys
#sys.path.append(os.path.dirname(__file__)) | fa | 0.221498 | #sys.path.append(os.path.dirname(__file__)) | 1.622069 | 2 |
flappy-remake.py | dd2r/FlappyBirdDemo | 0 | 6631200 | import pygame
from pygame.locals import *
import random
#Initialize pygame
pygame.init()
clock = pygame.time.Clock()
fps = 60
#Screen constants
SCREEN_WIDTH = 864
SCREEN_HEIGHT = 936
#Screen size and window caption
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption('Flappy Bird')
#Define game variables
ground_scroll = 0
scroll_speed = 4
#Load images and store them in variables
bg = pygame.image.load('img/bg.png')
ground_img = pygame.image.load('img/ground.png')
#Sprite or character creation
class Bird(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.images = []
self.index = 0
self.counter = 0
#Iterate through flappy bird images
for num in range(1, 4):
img = pygame.image.load(f'img/bird{num}.png')
self.images.append(img)
self.image = self.images[self.index]
self.rect = self.image.get_rect() #rectangle to hold sprite for collision detection
self.rect.center = [x, y] #location/coordinates for sprite on screen
def update(self):
#Handle the animation
self.counter += 1
flap_cooldown = 5
if self.counter > flap_cooldown:
self.counter = 0
self.index += 1
if self.index >= len(self.images):
self.index = 0
self.image = self.images[self.index]
#Creation of a bird group for the bird class. This keeps track of the sprites added to it
bird_group = pygame.sprite.Group()
#Flappy variable and placement of flappy bird character on screen
flappy = Bird(100, SCREEN_HEIGHT/2)
bird_group.add(flappy) #Adding sprite to bird group of sprites, it is similar to a list in python
#Gameplay variable and start of game loop
gameplay = True
while gameplay:
clock.tick(fps)
#Draw background
screen.blit(bg, (0,0))
#Draw and scroll the ground
screen.blit(ground_img, (ground_scroll, 768))
ground_scroll -= scroll_speed
if abs(ground_scroll) > 35:
ground_scroll = 0
#Draw in Flappy Bird
bird_group.draw(screen)
bird_group.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameplay = False
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
gameplay = False
pygame.display.update()
pygame.quit() | import pygame
from pygame.locals import *
import random
#Initialize pygame
pygame.init()
clock = pygame.time.Clock()
fps = 60
#Screen constants
SCREEN_WIDTH = 864
SCREEN_HEIGHT = 936
#Screen size and window caption
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption('Flappy Bird')
#Define game variables
ground_scroll = 0
scroll_speed = 4
#Load images and store them in variables
bg = pygame.image.load('img/bg.png')
ground_img = pygame.image.load('img/ground.png')
#Sprite or character creation
class Bird(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.images = []
self.index = 0
self.counter = 0
#Iterate through flappy bird images
for num in range(1, 4):
img = pygame.image.load(f'img/bird{num}.png')
self.images.append(img)
self.image = self.images[self.index]
self.rect = self.image.get_rect() #rectangle to hold sprite for collision detection
self.rect.center = [x, y] #location/coordinates for sprite on screen
def update(self):
#Handle the animation
self.counter += 1
flap_cooldown = 5
if self.counter > flap_cooldown:
self.counter = 0
self.index += 1
if self.index >= len(self.images):
self.index = 0
self.image = self.images[self.index]
#Creation of a bird group for the bird class. This keeps track of the sprites added to it
bird_group = pygame.sprite.Group()
#Flappy variable and placement of flappy bird character on screen
flappy = Bird(100, SCREEN_HEIGHT/2)
bird_group.add(flappy) #Adding sprite to bird group of sprites, it is similar to a list in python
#Gameplay variable and start of game loop
gameplay = True
while gameplay:
clock.tick(fps)
#Draw background
screen.blit(bg, (0,0))
#Draw and scroll the ground
screen.blit(ground_img, (ground_scroll, 768))
ground_scroll -= scroll_speed
if abs(ground_scroll) > 35:
ground_scroll = 0
#Draw in Flappy Bird
bird_group.draw(screen)
bird_group.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameplay = False
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
gameplay = False
pygame.display.update()
pygame.quit() | en | 0.851633 | #Initialize pygame #Screen constants #Screen size and window caption #Define game variables #Load images and store them in variables #Sprite or character creation #Iterate through flappy bird images #rectangle to hold sprite for collision detection #location/coordinates for sprite on screen #Handle the animation #Creation of a bird group for the bird class. This keeps track of the sprites added to it #Flappy variable and placement of flappy bird character on screen #Adding sprite to bird group of sprites, it is similar to a list in python #Gameplay variable and start of game loop #Draw background #Draw and scroll the ground #Draw in Flappy Bird | 3.596909 | 4 |
day14-disk-fragmentation/run.py | mg6/advent-of-code-2017 | 0 | 6631201 | #!/usr/bin/env python3
from collections import deque
from functools import reduce
def hash_states(size=256):
state = deque(range(size))
skip_size = 0
at = 0
while True:
rev_length = yield
if rev_length > 1:
state.rotate(-at)
for v in [state.popleft() for _ in range(rev_length)]:
state.appendleft(v)
state.rotate(at)
yield state
at += rev_length + skip_size
skip_size += 1
def knot_hash(lengths, size=256, rounds=1):
h = hash_states(size)
state = None
for _ in range(rounds):
for length in lengths:
next(h)
state = h.send(length)
return state
def puzzle_multiply(lengths, size=256):
a, b, *_ = knot_hash(lengths, size)
return a * b
def sparse_hash(lengths, size=256, rounds=64):
std_suffixes = [17, 31, 73, 47, 23]
return list(knot_hash(lengths + std_suffixes, size, rounds))
def xor_block(state):
return reduce(lambda a, b: a ^ b, state, 0)
def xor_blocks(state, numblocks=16, blocklen=16):
blocks = [state[n*blocklen:(n+1)*blocklen] for n in range(numblocks)]
return [xor_block(b) for b in blocks]
def dense_hash(lengths, size=256, rounds=64):
return xor_blocks(sparse_hash(lengths, size, rounds))
def to_hex(nums):
return ''.join('%02x' % n for n in nums)
def puzzle_hash(msg):
msg = list(map(ord, msg))
return to_hex(dense_hash(msg))
def assert_send(coroutine, value, expected):
next(coroutine)
actual = coroutine.send(value)
assert actual == expected, "expected %s, got %s" % (expected, actual)
h = hash_states(5)
assert_send(h, 3, deque([2, 1, 0, 3, 4]))
assert_send(h, 4, deque([4, 3, 0, 1, 2]))
assert_send(h, 1, deque([4, 3, 0, 1, 2]))
assert_send(h, 5, deque([3, 4, 2, 1, 0]))
assert puzzle_multiply([3, 4, 1, 5], size=5) == 12
assert xor_block([65, 27, 9, 1, 4, 3, 40, 50, 91, 7, 6, 0, 2, 5, 68, 22]) == \
65 ^ 27 ^ 9 ^ 1 ^ 4 ^ 3 ^ 40 ^ 50 ^ 91 ^ 7 ^ 6 ^ 0 ^ 2 ^ 5 ^ 68 ^ 22 == 64
assert xor_blocks([65, 27, 9, 1, 4, 3, 40, 50, 91, 7, 6, 0, 2, 5, 68, 22], numblocks=1) == [64]
assert xor_blocks([65, 27, 9, 1, 4, 3, 40, 50, 91, 7, 6, 0, 2, 5, 68, 22], numblocks=16, blocklen=1) == \
[65, 27, 9, 1, 4, 3, 40, 50, 91, 7, 6, 0, 2, 5, 68, 22]
assert xor_blocks([65, 27, 9, 1, 4, 3, 40, 50, 91, 7, 6, 0, 2, 5, 68, 22], numblocks=8, blocklen=2) == \
[65 ^ 27, 9 ^ 1, 4 ^ 3, 40 ^ 50, 91 ^ 7, 6 ^ 0, 2 ^ 5, 68 ^ 22]
assert to_hex([]) == ''
assert to_hex([32]) == '20'
assert to_hex([64, 7, 255]) == '4007ff'
assert to_hex([1, 2, 3, 4]) == '01020304'
assert puzzle_hash('') == 'a2582a3a0e66e6e86e3812dcb672a272'
assert puzzle_hash('AoC 2017') == '33efeb34ea91902bb2f59c9920caa6cd'
assert puzzle_hash('1,2,3') == '3efbe78a8d82f29979031a4aa0b16a9d'
assert puzzle_hash('1,2,4') == '63960835bcdc130f0b66d7ff4f6a5a8e'
def hex_to_bin(s):
return ''.join('{0:04b}'.format(int(x, base=16)) for x in s)
assert hex_to_bin('0') == '0000'
assert hex_to_bin('1') == '0001'
assert hex_to_bin('e') == '1110'
assert hex_to_bin('f') == '1111'
assert hex_to_bin('a0c2017') == '1010000011000010000000010111'
def count_char(s, char):
return sum(1 for c in s if c == char)
assert count_char('', '1') == 0
assert count_char('0', '1') == 0
assert count_char('02', '1') == 0
assert count_char('1', '1') == 1
assert count_char('11', '1') == 2
def array_grid_from_string(s):
return [list(map(int, line)) for line in s.strip().split('\n')]
assert array_grid_from_string("""
0
""") == [[0]]
assert array_grid_from_string("""
00
00
""") == [[0, 0], [0, 0]]
assert array_grid_from_string("""
01
23
""") == [[0, 1], [2, 3]]
def flood(grid, x, y, visited=None, high=1, low=0):
if not visited:
visited = set()
visited.add((x, y))
if x >= 0 and y >= 0 and grid[x][y] == high:
grid[x][y] = low
else:
return grid
for x, y in ((x+0, y+1), (x+1, y+0), (x+0, y-1), (x-1, y+0)):
try:
if (x, y) not in visited:
flood(grid, x, y, visited)
except IndexError:
pass
return grid
assert flood([[0]], 0, 0) == [[0]]
assert flood([[1]], 0, 0) == [[0]]
assert flood([[2]], 0, 0) == [[2]]
assert flood([[1, 1], [1, 1]], 0, 0) == [[0, 0], [0, 0]]
assert flood([[1, 1], [1, 1]], 1, 1) == [[0, 0], [0, 0]]
assert flood([[1, 1], [1, 2]], 0, 0) == [[0, 0], [0, 2]]
assert flood([[1, 1], [1, 2]], 1, 1) == [[1, 1], [1, 2]]
assert flood([[1, 1, 1], [1, 2, 1], [1, 1, 1]], 0, 0) == [[0, 0, 0], [0, 2, 0], [0, 0, 0]]
assert flood([[1, 1, 1], [1, 2, 1], [1, 1, 1]], 2, 2) == [[0, 0, 0], [0, 2, 0], [0, 0, 0]]
assert flood([[1, 1, 1], [1, 2, 1], [1, 1, 1]], 0, 2) == [[0, 0, 0], [0, 2, 0], [0, 0, 0]]
assert flood([[1, 0, 1], [1, 0, 1], [1, 0, 1]], 0, 0) == [[0, 0, 1], [0, 0, 1], [0, 0, 1]]
assert flood([[1, 0, 1], [0, 0, 0], [1, 0, 1]], 0, 0) == [[0, 0, 1], [0, 0, 0], [1, 0, 1]]
def grid_find(grid, value):
for i, row in enumerate(grid):
for j, item in enumerate(row):
if item == value:
return i, j
return None
assert grid_find([[0]], 0) == (0, 0)
assert grid_find([[0]], 1) is None
assert grid_find([[0, 0], [0, 1]], 1) == (1, 1)
def count_regions(s):
grid = array_grid_from_string(s)
count = 0
while True:
p = grid_find(grid, 1)
if not p:
break
count += 1
x, y = p
flood(grid, x, y)
return count
assert count_regions("""
0
""") == 0
assert count_regions("""
1
""") == 1
assert count_regions("""
11
11
""") == 1
assert count_regions("""
111
101
111
""") == 1
assert count_regions("""
010
111
010
""") == 1
assert count_regions("""
101
101
101
""") == 2
assert count_regions("""
101
010
101
""") == 5
if __name__ == '__main__':
inp = 'vbqugkhl'
grid = '\n'.join(hex_to_bin(puzzle_hash('{}-{}'.format(inp, n)))
for n in range(128))
print(count_char(grid, '1'))
print(count_regions(grid))
| #!/usr/bin/env python3
from collections import deque
from functools import reduce
def hash_states(size=256):
state = deque(range(size))
skip_size = 0
at = 0
while True:
rev_length = yield
if rev_length > 1:
state.rotate(-at)
for v in [state.popleft() for _ in range(rev_length)]:
state.appendleft(v)
state.rotate(at)
yield state
at += rev_length + skip_size
skip_size += 1
def knot_hash(lengths, size=256, rounds=1):
h = hash_states(size)
state = None
for _ in range(rounds):
for length in lengths:
next(h)
state = h.send(length)
return state
def puzzle_multiply(lengths, size=256):
a, b, *_ = knot_hash(lengths, size)
return a * b
def sparse_hash(lengths, size=256, rounds=64):
std_suffixes = [17, 31, 73, 47, 23]
return list(knot_hash(lengths + std_suffixes, size, rounds))
def xor_block(state):
return reduce(lambda a, b: a ^ b, state, 0)
def xor_blocks(state, numblocks=16, blocklen=16):
blocks = [state[n*blocklen:(n+1)*blocklen] for n in range(numblocks)]
return [xor_block(b) for b in blocks]
def dense_hash(lengths, size=256, rounds=64):
return xor_blocks(sparse_hash(lengths, size, rounds))
def to_hex(nums):
return ''.join('%02x' % n for n in nums)
def puzzle_hash(msg):
msg = list(map(ord, msg))
return to_hex(dense_hash(msg))
def assert_send(coroutine, value, expected):
next(coroutine)
actual = coroutine.send(value)
assert actual == expected, "expected %s, got %s" % (expected, actual)
h = hash_states(5)
assert_send(h, 3, deque([2, 1, 0, 3, 4]))
assert_send(h, 4, deque([4, 3, 0, 1, 2]))
assert_send(h, 1, deque([4, 3, 0, 1, 2]))
assert_send(h, 5, deque([3, 4, 2, 1, 0]))
assert puzzle_multiply([3, 4, 1, 5], size=5) == 12
assert xor_block([65, 27, 9, 1, 4, 3, 40, 50, 91, 7, 6, 0, 2, 5, 68, 22]) == \
65 ^ 27 ^ 9 ^ 1 ^ 4 ^ 3 ^ 40 ^ 50 ^ 91 ^ 7 ^ 6 ^ 0 ^ 2 ^ 5 ^ 68 ^ 22 == 64
assert xor_blocks([65, 27, 9, 1, 4, 3, 40, 50, 91, 7, 6, 0, 2, 5, 68, 22], numblocks=1) == [64]
assert xor_blocks([65, 27, 9, 1, 4, 3, 40, 50, 91, 7, 6, 0, 2, 5, 68, 22], numblocks=16, blocklen=1) == \
[65, 27, 9, 1, 4, 3, 40, 50, 91, 7, 6, 0, 2, 5, 68, 22]
assert xor_blocks([65, 27, 9, 1, 4, 3, 40, 50, 91, 7, 6, 0, 2, 5, 68, 22], numblocks=8, blocklen=2) == \
[65 ^ 27, 9 ^ 1, 4 ^ 3, 40 ^ 50, 91 ^ 7, 6 ^ 0, 2 ^ 5, 68 ^ 22]
assert to_hex([]) == ''
assert to_hex([32]) == '20'
assert to_hex([64, 7, 255]) == '4007ff'
assert to_hex([1, 2, 3, 4]) == '01020304'
assert puzzle_hash('') == 'a2582a3a0e66e6e86e3812dcb672a272'
assert puzzle_hash('AoC 2017') == '33efeb34ea91902bb2f59c9920caa6cd'
assert puzzle_hash('1,2,3') == '3efbe78a8d82f29979031a4aa0b16a9d'
assert puzzle_hash('1,2,4') == '63960835bcdc130f0b66d7ff4f6a5a8e'
def hex_to_bin(s):
return ''.join('{0:04b}'.format(int(x, base=16)) for x in s)
assert hex_to_bin('0') == '0000'
assert hex_to_bin('1') == '0001'
assert hex_to_bin('e') == '1110'
assert hex_to_bin('f') == '1111'
assert hex_to_bin('a0c2017') == '1010000011000010000000010111'
def count_char(s, char):
return sum(1 for c in s if c == char)
assert count_char('', '1') == 0
assert count_char('0', '1') == 0
assert count_char('02', '1') == 0
assert count_char('1', '1') == 1
assert count_char('11', '1') == 2
def array_grid_from_string(s):
return [list(map(int, line)) for line in s.strip().split('\n')]
assert array_grid_from_string("""
0
""") == [[0]]
assert array_grid_from_string("""
00
00
""") == [[0, 0], [0, 0]]
assert array_grid_from_string("""
01
23
""") == [[0, 1], [2, 3]]
def flood(grid, x, y, visited=None, high=1, low=0):
if not visited:
visited = set()
visited.add((x, y))
if x >= 0 and y >= 0 and grid[x][y] == high:
grid[x][y] = low
else:
return grid
for x, y in ((x+0, y+1), (x+1, y+0), (x+0, y-1), (x-1, y+0)):
try:
if (x, y) not in visited:
flood(grid, x, y, visited)
except IndexError:
pass
return grid
assert flood([[0]], 0, 0) == [[0]]
assert flood([[1]], 0, 0) == [[0]]
assert flood([[2]], 0, 0) == [[2]]
assert flood([[1, 1], [1, 1]], 0, 0) == [[0, 0], [0, 0]]
assert flood([[1, 1], [1, 1]], 1, 1) == [[0, 0], [0, 0]]
assert flood([[1, 1], [1, 2]], 0, 0) == [[0, 0], [0, 2]]
assert flood([[1, 1], [1, 2]], 1, 1) == [[1, 1], [1, 2]]
assert flood([[1, 1, 1], [1, 2, 1], [1, 1, 1]], 0, 0) == [[0, 0, 0], [0, 2, 0], [0, 0, 0]]
assert flood([[1, 1, 1], [1, 2, 1], [1, 1, 1]], 2, 2) == [[0, 0, 0], [0, 2, 0], [0, 0, 0]]
assert flood([[1, 1, 1], [1, 2, 1], [1, 1, 1]], 0, 2) == [[0, 0, 0], [0, 2, 0], [0, 0, 0]]
assert flood([[1, 0, 1], [1, 0, 1], [1, 0, 1]], 0, 0) == [[0, 0, 1], [0, 0, 1], [0, 0, 1]]
assert flood([[1, 0, 1], [0, 0, 0], [1, 0, 1]], 0, 0) == [[0, 0, 1], [0, 0, 0], [1, 0, 1]]
def grid_find(grid, value):
for i, row in enumerate(grid):
for j, item in enumerate(row):
if item == value:
return i, j
return None
assert grid_find([[0]], 0) == (0, 0)
assert grid_find([[0]], 1) is None
assert grid_find([[0, 0], [0, 1]], 1) == (1, 1)
def count_regions(s):
grid = array_grid_from_string(s)
count = 0
while True:
p = grid_find(grid, 1)
if not p:
break
count += 1
x, y = p
flood(grid, x, y)
return count
assert count_regions("""
0
""") == 0
assert count_regions("""
1
""") == 1
assert count_regions("""
11
11
""") == 1
assert count_regions("""
111
101
111
""") == 1
assert count_regions("""
010
111
010
""") == 1
assert count_regions("""
101
101
101
""") == 2
assert count_regions("""
101
010
101
""") == 5
if __name__ == '__main__':
inp = 'vbqugkhl'
grid = '\n'.join(hex_to_bin(puzzle_hash('{}-{}'.format(inp, n)))
for n in range(128))
print(count_char(grid, '1'))
print(count_regions(grid))
| fr | 0.180376 | #!/usr/bin/env python3 0 00 00 01 23 0 1 11 11 111 101 111 010 111 010 101 101 101 101 010 101 | 2.69041 | 3 |
reports/api/urls.py | qgeindreau/Reddit | 54 | 6631202 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from django.conf.urls import url
from .views import ReportListCreateAPIView
urlpatterns = [
url(r'^reports/$', ReportListCreateAPIView.as_view(), name='list_or_create_reports'),
]
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from django.conf.urls import url
from .views import ReportListCreateAPIView
urlpatterns = [
url(r'^reports/$', ReportListCreateAPIView.as_view(), name='list_or_create_reports'),
]
| en | 0.308914 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- | 1.50139 | 2 |
data/studio21_generated/introductory/2896/starter_code.py | vijaykumawat256/Prompt-Summarization | 0 | 6631203 | def cost_of_carpet(room_length, room_width, roll_width, roll_cost):
| def cost_of_carpet(room_length, room_width, roll_width, roll_cost):
| none | 1 | 1.457503 | 1 |
|
PythonScripts/PythonBootcamp/Operators/arithmetic.py | SleepWalKer09/PythonProjects | 0 | 6631204 | #arithmetic operators
# 1 + -> suma
# 2 - -> resta
# 3 * -> multiplicacion
# 4 / -> division
# 5 % -> modulo, regresa el remanente de la division
# 6 ** -> exponente
# 7 // -> division sin decimales
a = 5 ** 2
print(a) | #arithmetic operators
# 1 + -> suma
# 2 - -> resta
# 3 * -> multiplicacion
# 4 / -> division
# 5 % -> modulo, regresa el remanente de la division
# 6 ** -> exponente
# 7 // -> division sin decimales
a = 5 ** 2
print(a) | en | 0.188941 | #arithmetic operators # 1 + -> suma # 2 - -> resta # 3 * -> multiplicacion # 4 / -> division # 5 % -> modulo, regresa el remanente de la division # 6 ** -> exponente # 7 // -> division sin decimales | 3.940677 | 4 |
flips.py | framoni/whataretheodds | 0 | 6631205 | """
Karen flips N fair coins. Becky flips N+1 fair coins.
What's the probability for Becky to get more heads than Karen?
Compute it for an arbitary large N.
"""
from itertools import permutations
import numpy as np
import pandas as pd
import seaborn as sns
def probs_head(N):
base_prob = (1/2)**N
P = []
for num_heads in range(0, N+1):
pattern = [1] * num_heads + [0] * (N - num_heads)
P.append(base_prob * len(set(permutations(pattern))))
return P
def probs_players(N, delta, mode):
P_K = probs_head(N)
P_B = probs_head(N + delta)
p_B_over_K = 0
if mode == "equal":
for i in range(len(P_K)):
p_B_over_K += P_K[i] * P_B[i]
elif mode == "more":
for i in range(len(P_K)):
p_B_over_K += P_K[i] * (sum(P_B[i+1:]))
elif mode == "less":
for i in range(len(P_K)):
p_B_over_K += P_K[i] * (sum(P_B[:i]))
return p_B_over_K
def build_df(M, mode):
z = np.zeros([M, M])
for i in range(1, M + 1):
v = []
for j in range(0, M - i + 1):
v.append(probs_players(i, j, mode))
z[i - 1, i - 1:M] = v
df = pd.DataFrame(z)
df.index = np.arange(1, len(df) + 1)
df.columns = pd.RangeIndex(1, len(df.columns) + 1)
cm = sns.cubehelix_palette(8, start=.5, rot=-.75, as_cmap=True)
htm = df.style.background_gradient(cmap=cm, axis=None).render()
return htm
if __name__ == "__main__":
M = input("Size? ")
M = int(M)
htm_equal = build_df(M, "equal")
htm_more = build_df(M, "more")
htm_less = build_df(M, "less")
htm = "<h2>Probability that Player 1 with COL flips gets more heads than Player 2 with ROW flips</h2><br>" + \
htm_more + "<h2>Probability that Player 1 with COL flips gets equal number of heads as Player 2 with ROW flips</h2><br>" \
+ htm_equal + "<h2>Probability that Player 1 with COL flips gets less heads than Player 2 with ROW flips</h2><br>" + \
htm_less
with open("output/flips_{}.htm".format(M), "w") as f:
f.write(htm)
| """
Karen flips N fair coins. Becky flips N+1 fair coins.
What's the probability for Becky to get more heads than Karen?
Compute it for an arbitary large N.
"""
from itertools import permutations
import numpy as np
import pandas as pd
import seaborn as sns
def probs_head(N):
base_prob = (1/2)**N
P = []
for num_heads in range(0, N+1):
pattern = [1] * num_heads + [0] * (N - num_heads)
P.append(base_prob * len(set(permutations(pattern))))
return P
def probs_players(N, delta, mode):
P_K = probs_head(N)
P_B = probs_head(N + delta)
p_B_over_K = 0
if mode == "equal":
for i in range(len(P_K)):
p_B_over_K += P_K[i] * P_B[i]
elif mode == "more":
for i in range(len(P_K)):
p_B_over_K += P_K[i] * (sum(P_B[i+1:]))
elif mode == "less":
for i in range(len(P_K)):
p_B_over_K += P_K[i] * (sum(P_B[:i]))
return p_B_over_K
def build_df(M, mode):
z = np.zeros([M, M])
for i in range(1, M + 1):
v = []
for j in range(0, M - i + 1):
v.append(probs_players(i, j, mode))
z[i - 1, i - 1:M] = v
df = pd.DataFrame(z)
df.index = np.arange(1, len(df) + 1)
df.columns = pd.RangeIndex(1, len(df.columns) + 1)
cm = sns.cubehelix_palette(8, start=.5, rot=-.75, as_cmap=True)
htm = df.style.background_gradient(cmap=cm, axis=None).render()
return htm
if __name__ == "__main__":
M = input("Size? ")
M = int(M)
htm_equal = build_df(M, "equal")
htm_more = build_df(M, "more")
htm_less = build_df(M, "less")
htm = "<h2>Probability that Player 1 with COL flips gets more heads than Player 2 with ROW flips</h2><br>" + \
htm_more + "<h2>Probability that Player 1 with COL flips gets equal number of heads as Player 2 with ROW flips</h2><br>" \
+ htm_equal + "<h2>Probability that Player 1 with COL flips gets less heads than Player 2 with ROW flips</h2><br>" + \
htm_less
with open("output/flips_{}.htm".format(M), "w") as f:
f.write(htm)
| en | 0.879542 | Karen flips N fair coins. Becky flips N+1 fair coins. What's the probability for Becky to get more heads than Karen? Compute it for an arbitary large N. | 2.942378 | 3 |
heat/engine/resources/openstack/heat/wait_condition.py | stackriot/heat | 265 | 6631206 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources import wait_condition as wc_base
from heat.engine import support
LOG = logging.getLogger(__name__)
class HeatWaitCondition(resource.Resource):
"""Resource for handling signals received by WaitConditionHandle.
Resource takes WaitConditionHandle and starts to create. Resource is in
CREATE_IN_PROGRESS status until WaitConditionHandle doesn't receive
sufficient number of successful signals (this number can be specified with
count property) and successfully creates after that, or fails due to
timeout.
"""
support_status = support.SupportStatus(version='2014.2')
PROPERTIES = (
HANDLE, TIMEOUT, COUNT,
) = (
'handle', 'timeout', 'count',
)
ATTRIBUTES = (
DATA,
) = (
'data',
)
properties_schema = {
HANDLE: properties.Schema(
properties.Schema.STRING,
_('A reference to the wait condition handle used to signal this '
'wait condition.'),
required=True
),
TIMEOUT: properties.Schema(
properties.Schema.NUMBER,
_('The number of seconds to wait for the correct number of '
'signals to arrive.'),
required=True,
constraints=[
constraints.Range(1, 43200),
]
),
COUNT: properties.Schema(
properties.Schema.INTEGER,
_('The number of success signals that must be received before '
'the stack creation process continues.'),
constraints=[
constraints.Range(min=1),
],
default=1,
update_allowed=True
),
}
attributes_schema = {
DATA: attributes.Schema(
_('JSON string containing data associated with wait '
'condition signals sent to the handle.'),
cache_mode=attributes.Schema.CACHE_NONE,
type=attributes.Schema.STRING
),
}
def _get_handle_resource(self):
return self.stack.resource_by_refid(self.properties[self.HANDLE])
def _validate_handle_resource(self, handle):
if handle is not None and isinstance(
handle, wc_base.BaseWaitConditionHandle):
return
LOG.debug("Got %r instead of wait condition handle", handle)
hn = handle.name if handle else self.properties[self.HANDLE]
msg = _('%s is not a valid wait condition handle.') % hn
raise ValueError(msg)
def _wait(self, handle, started_at, timeout_in):
if timeutils.is_older_than(started_at, timeout_in):
exc = wc_base.WaitConditionTimeout(self, handle)
LOG.info('%(name)s Timed out (%(timeout)s)',
{'name': str(self), 'timeout': str(exc)})
raise exc
handle_status = handle.get_status()
if any(s != handle.STATUS_SUCCESS for s in handle_status):
failure = wc_base.WaitConditionFailure(self, handle)
LOG.info('%(name)s Failed (%(failure)s)',
{'name': str(self), 'failure': str(failure)})
raise failure
if len(handle_status) >= self.properties[self.COUNT]:
LOG.info("%s Succeeded", str(self))
return True
return False
def handle_create(self):
handle = self._get_handle_resource()
self._validate_handle_resource(handle)
started_at = timeutils.utcnow()
return handle, started_at, float(self.properties[self.TIMEOUT])
def check_create_complete(self, data):
return self._wait(*data)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.properties = json_snippet.properties(self.properties_schema,
self.context)
handle = self._get_handle_resource()
started_at = timeutils.utcnow()
return handle, started_at, float(self.properties[self.TIMEOUT])
def check_update_complete(self, data):
return self._wait(*data)
def handle_delete(self):
handle = self._get_handle_resource()
if handle:
handle.metadata_set({})
def _resolve_attribute(self, key):
handle = self._get_handle_resource()
if handle is None:
return ''
if key == self.DATA:
meta = handle.metadata_get(refresh=True)
res = {k: meta[k][handle.DATA] for k in meta}
LOG.debug('%(name)s.GetAtt(%(key)s) == %(res)s'
% {'name': self.name,
'key': key,
'res': res})
return str(jsonutils.dumps(res))
def resource_mapping():
return {
'OS::Heat::WaitCondition': HeatWaitCondition,
}
| #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources import wait_condition as wc_base
from heat.engine import support
LOG = logging.getLogger(__name__)
class HeatWaitCondition(resource.Resource):
"""Resource for handling signals received by WaitConditionHandle.
Resource takes WaitConditionHandle and starts to create. Resource is in
CREATE_IN_PROGRESS status until WaitConditionHandle doesn't receive
sufficient number of successful signals (this number can be specified with
count property) and successfully creates after that, or fails due to
timeout.
"""
support_status = support.SupportStatus(version='2014.2')
PROPERTIES = (
HANDLE, TIMEOUT, COUNT,
) = (
'handle', 'timeout', 'count',
)
ATTRIBUTES = (
DATA,
) = (
'data',
)
properties_schema = {
HANDLE: properties.Schema(
properties.Schema.STRING,
_('A reference to the wait condition handle used to signal this '
'wait condition.'),
required=True
),
TIMEOUT: properties.Schema(
properties.Schema.NUMBER,
_('The number of seconds to wait for the correct number of '
'signals to arrive.'),
required=True,
constraints=[
constraints.Range(1, 43200),
]
),
COUNT: properties.Schema(
properties.Schema.INTEGER,
_('The number of success signals that must be received before '
'the stack creation process continues.'),
constraints=[
constraints.Range(min=1),
],
default=1,
update_allowed=True
),
}
attributes_schema = {
DATA: attributes.Schema(
_('JSON string containing data associated with wait '
'condition signals sent to the handle.'),
cache_mode=attributes.Schema.CACHE_NONE,
type=attributes.Schema.STRING
),
}
def _get_handle_resource(self):
return self.stack.resource_by_refid(self.properties[self.HANDLE])
def _validate_handle_resource(self, handle):
if handle is not None and isinstance(
handle, wc_base.BaseWaitConditionHandle):
return
LOG.debug("Got %r instead of wait condition handle", handle)
hn = handle.name if handle else self.properties[self.HANDLE]
msg = _('%s is not a valid wait condition handle.') % hn
raise ValueError(msg)
def _wait(self, handle, started_at, timeout_in):
if timeutils.is_older_than(started_at, timeout_in):
exc = wc_base.WaitConditionTimeout(self, handle)
LOG.info('%(name)s Timed out (%(timeout)s)',
{'name': str(self), 'timeout': str(exc)})
raise exc
handle_status = handle.get_status()
if any(s != handle.STATUS_SUCCESS for s in handle_status):
failure = wc_base.WaitConditionFailure(self, handle)
LOG.info('%(name)s Failed (%(failure)s)',
{'name': str(self), 'failure': str(failure)})
raise failure
if len(handle_status) >= self.properties[self.COUNT]:
LOG.info("%s Succeeded", str(self))
return True
return False
def handle_create(self):
handle = self._get_handle_resource()
self._validate_handle_resource(handle)
started_at = timeutils.utcnow()
return handle, started_at, float(self.properties[self.TIMEOUT])
def check_create_complete(self, data):
return self._wait(*data)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.properties = json_snippet.properties(self.properties_schema,
self.context)
handle = self._get_handle_resource()
started_at = timeutils.utcnow()
return handle, started_at, float(self.properties[self.TIMEOUT])
def check_update_complete(self, data):
return self._wait(*data)
def handle_delete(self):
handle = self._get_handle_resource()
if handle:
handle.metadata_set({})
def _resolve_attribute(self, key):
handle = self._get_handle_resource()
if handle is None:
return ''
if key == self.DATA:
meta = handle.metadata_get(refresh=True)
res = {k: meta[k][handle.DATA] for k in meta}
LOG.debug('%(name)s.GetAtt(%(key)s) == %(res)s'
% {'name': self.name,
'key': key,
'res': res})
return str(jsonutils.dumps(res))
def resource_mapping():
return {
'OS::Heat::WaitCondition': HeatWaitCondition,
}
| en | 0.893835 | # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Resource for handling signals received by WaitConditionHandle. Resource takes WaitConditionHandle and starts to create. Resource is in CREATE_IN_PROGRESS status until WaitConditionHandle doesn't receive sufficient number of successful signals (this number can be specified with count property) and successfully creates after that, or fails due to timeout. | 1.93168 | 2 |
qiime2/plugin/plugin.py | longhdo/qiime2 | 0 | 6631207 | <reponame>longhdo/qiime2<filename>qiime2/plugin/plugin.py<gh_stars>0
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2019, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import types
import qiime2.sdk
import qiime2.core.type.grammar as grammar
from qiime2.plugin.model import DirectoryFormat
from qiime2.plugin.model.base import FormatBase
from qiime2.core.type import is_semantic_type
from qiime2.core.util import get_view_name
TransformerRecord = collections.namedtuple(
'TransformerRecord', ['transformer', 'plugin', 'citations'])
SemanticTypeRecord = collections.namedtuple(
'SemanticTypeRecord', ['semantic_type', 'plugin'])
FormatRecord = collections.namedtuple('FormatRecord', ['format', 'plugin'])
ViewRecord = collections.namedtuple(
'ViewRecord', ['name', 'view', 'plugin', 'citations'])
TypeFormatRecord = collections.namedtuple(
'TypeFormatRecord', ['type_expression', 'format', 'plugin'])
class Plugin:
def __init__(self, name, version, website, package, citation_text=None,
user_support_text=None, short_description=None,
description=None, citations=None):
self.name = name
self.version = version
self.website = website
self.package = package
if user_support_text is None:
self.user_support_text = ('Please post to the QIIME 2 forum for '
'help with this plugin: https://forum.'
'qiime2.org')
else:
self.user_support_text = user_support_text
if short_description is None:
self.short_description = ''
else:
self.short_description = short_description
if description is None:
self.description = ('No description available. '
'See plugin website: %s'
% self.website)
else:
self.description = description
if citations is None:
self.citations = ()
else:
self.citations = tuple(citations)
self.methods = PluginMethods(self)
self.visualizers = PluginVisualizers(self)
self.pipelines = PluginPipelines(self)
self.formats = {}
self.views = {}
self.types = {}
self.transformers = {}
self.type_formats = []
@property
def actions(self):
# TODO this doesn't handle method/visualizer name collisions. The
# auto-generated `qiime2.plugins.<plugin-name>.actions` API has the
# same problem. This should be solved at method/visualizer registration
# time, which will solve the problem for both APIs.
actions = {}
actions.update(self.methods)
actions.update(self.visualizers)
actions.update(self.pipelines)
return types.MappingProxyType(actions)
def register_formats(self, *formats, citations=None):
for format in formats:
if not issubclass(format, FormatBase):
raise TypeError("%r is not a valid format." % format)
self.register_views(*formats, citations=citations)
def register_views(self, *views, citations=None):
if citations is None:
citations = ()
else:
citations = tuple(citations)
for view in views:
if not isinstance(view, type):
raise TypeError("%r should be a class." % view)
is_format = False
if issubclass(view, FormatBase):
is_format = True
name = get_view_name(view)
if name in self.views:
raise NameError("View %r is already registered by this "
"plugin." % name)
self.views[name] = ViewRecord(
name=name, view=view, plugin=self, citations=citations)
if is_format:
self.formats[name] = FormatRecord(format=view, plugin=self)
def register_transformer(self, _fn=None, *, citations=None):
"""
A transformer has the type Callable[[type], type]
"""
# `_fn` allows us to figure out if we are called with or without
# arguments in order to support both:
# ```
# @plugin.register_transformer
# def _(x: A) -> B:
# ...
# ```
# and
# ```
# @plugin.register_transformer(restrict=True)
# def _(x: A) -> B:
# ...
# ```
if citations is None:
citations = ()
else:
citations = tuple(citations)
def decorator(transformer):
annotations = transformer.__annotations__.copy()
if len(annotations) != 2:
raise TypeError("A transformer must only have a single input"
" and output annotation.")
try:
output = annotations.pop('return')
except KeyError:
raise TypeError("A transformer must provide a return type.")
if type(output) is tuple:
raise TypeError("A transformer can only return a single type,"
" not %r." % (output,))
input = list(annotations.values())[0]
if (input, output) in self.transformers:
raise TypeError("Duplicate transformer (%r) from %r to %r."
% (transformer, input, output))
if input == output:
raise TypeError("Plugins should not register identity"
" transformations (%r, %r to %r)."
% (transformer, input, output))
self.transformers[input, output] = TransformerRecord(
transformer=transformer, plugin=self, citations=citations)
return transformer
if _fn is None:
return decorator
else:
# Apply the decorator as we were applied with a single function
return decorator(_fn)
def register_semantic_types(self, *semantic_types):
for semantic_type in semantic_types:
if not is_semantic_type(semantic_type):
raise TypeError("%r is not a semantic type." % semantic_type)
if not (isinstance(semantic_type, grammar.IncompleteExp) or
(semantic_type.is_concrete() and
not semantic_type.fields)):
raise ValueError("%r is not a semantic type symbol."
% semantic_type)
if semantic_type.name in self.types:
raise ValueError("Duplicate semantic type symbol %r."
% semantic_type)
self.types[semantic_type.name] = SemanticTypeRecord(
semantic_type=semantic_type, plugin=self)
def register_semantic_type_to_format(self, semantic_type, artifact_format):
if not issubclass(artifact_format, DirectoryFormat):
raise TypeError("%r is not a directory format." % artifact_format)
if not is_semantic_type(semantic_type):
raise TypeError("%r is not a semantic type." % semantic_type)
if not is_semantic_type(semantic_type):
raise ValueError("%r is not a semantic type expression."
% semantic_type)
for t in semantic_type:
if t.predicate is not None:
raise ValueError("%r has a predicate, differentiating format"
" on predicate is not supported.")
self.type_formats.append(TypeFormatRecord(
type_expression=semantic_type,
format=artifact_format, plugin=self))
class PluginActions(dict):
_subpackage = None
def __init__(self, plugin):
self._plugin = plugin
self._package = 'qiime2.plugins.%s.%s' % (
self._plugin.name.replace('-', '_'), self._subpackage)
super().__init__()
class PluginMethods(PluginActions):
_subpackage = 'methods'
# TODO is `register` a better name now that functions are the only accepted
# source (i.e. markdown support is gone)?
def register_function(self, function, inputs, parameters, outputs, name,
description, input_descriptions=None,
parameter_descriptions=None,
output_descriptions=None, citations=None):
if citations is None:
citations = ()
else:
citations = tuple(citations)
method = qiime2.sdk.Method._init(function, inputs, parameters, outputs,
self._package, name, description,
input_descriptions,
parameter_descriptions,
output_descriptions, citations)
self[method.id] = method
class PluginVisualizers(PluginActions):
_subpackage = 'visualizers'
def register_function(self, function, inputs, parameters, name,
description, input_descriptions=None,
parameter_descriptions=None, citations=None):
if citations is None:
citations = ()
else:
citations = tuple(citations)
visualizer = qiime2.sdk.Visualizer._init(function, inputs, parameters,
self._package, name,
description,
input_descriptions,
parameter_descriptions,
citations)
self[visualizer.id] = visualizer
class PluginPipelines(PluginActions):
_subpackage = 'pipelines'
def register_function(self, function, inputs, parameters, outputs, name,
description, input_descriptions=None,
parameter_descriptions=None,
output_descriptions=None, citations=None):
if citations is None:
citations = ()
else:
citations = tuple(citations)
pipeline = qiime2.sdk.Pipeline._init(function, inputs, parameters,
outputs, self._package, name,
description, input_descriptions,
parameter_descriptions,
output_descriptions, citations)
self[pipeline.id] = pipeline
| # ----------------------------------------------------------------------------
# Copyright (c) 2016-2019, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import types
import qiime2.sdk
import qiime2.core.type.grammar as grammar
from qiime2.plugin.model import DirectoryFormat
from qiime2.plugin.model.base import FormatBase
from qiime2.core.type import is_semantic_type
from qiime2.core.util import get_view_name
TransformerRecord = collections.namedtuple(
'TransformerRecord', ['transformer', 'plugin', 'citations'])
SemanticTypeRecord = collections.namedtuple(
'SemanticTypeRecord', ['semantic_type', 'plugin'])
FormatRecord = collections.namedtuple('FormatRecord', ['format', 'plugin'])
ViewRecord = collections.namedtuple(
'ViewRecord', ['name', 'view', 'plugin', 'citations'])
TypeFormatRecord = collections.namedtuple(
'TypeFormatRecord', ['type_expression', 'format', 'plugin'])
class Plugin:
def __init__(self, name, version, website, package, citation_text=None,
user_support_text=None, short_description=None,
description=None, citations=None):
self.name = name
self.version = version
self.website = website
self.package = package
if user_support_text is None:
self.user_support_text = ('Please post to the QIIME 2 forum for '
'help with this plugin: https://forum.'
'qiime2.org')
else:
self.user_support_text = user_support_text
if short_description is None:
self.short_description = ''
else:
self.short_description = short_description
if description is None:
self.description = ('No description available. '
'See plugin website: %s'
% self.website)
else:
self.description = description
if citations is None:
self.citations = ()
else:
self.citations = tuple(citations)
self.methods = PluginMethods(self)
self.visualizers = PluginVisualizers(self)
self.pipelines = PluginPipelines(self)
self.formats = {}
self.views = {}
self.types = {}
self.transformers = {}
self.type_formats = []
@property
def actions(self):
# TODO this doesn't handle method/visualizer name collisions. The
# auto-generated `qiime2.plugins.<plugin-name>.actions` API has the
# same problem. This should be solved at method/visualizer registration
# time, which will solve the problem for both APIs.
actions = {}
actions.update(self.methods)
actions.update(self.visualizers)
actions.update(self.pipelines)
return types.MappingProxyType(actions)
def register_formats(self, *formats, citations=None):
for format in formats:
if not issubclass(format, FormatBase):
raise TypeError("%r is not a valid format." % format)
self.register_views(*formats, citations=citations)
def register_views(self, *views, citations=None):
if citations is None:
citations = ()
else:
citations = tuple(citations)
for view in views:
if not isinstance(view, type):
raise TypeError("%r should be a class." % view)
is_format = False
if issubclass(view, FormatBase):
is_format = True
name = get_view_name(view)
if name in self.views:
raise NameError("View %r is already registered by this "
"plugin." % name)
self.views[name] = ViewRecord(
name=name, view=view, plugin=self, citations=citations)
if is_format:
self.formats[name] = FormatRecord(format=view, plugin=self)
def register_transformer(self, _fn=None, *, citations=None):
"""
A transformer has the type Callable[[type], type]
"""
# `_fn` allows us to figure out if we are called with or without
# arguments in order to support both:
# ```
# @plugin.register_transformer
# def _(x: A) -> B:
# ...
# ```
# and
# ```
# @plugin.register_transformer(restrict=True)
# def _(x: A) -> B:
# ...
# ```
if citations is None:
citations = ()
else:
citations = tuple(citations)
def decorator(transformer):
annotations = transformer.__annotations__.copy()
if len(annotations) != 2:
raise TypeError("A transformer must only have a single input"
" and output annotation.")
try:
output = annotations.pop('return')
except KeyError:
raise TypeError("A transformer must provide a return type.")
if type(output) is tuple:
raise TypeError("A transformer can only return a single type,"
" not %r." % (output,))
input = list(annotations.values())[0]
if (input, output) in self.transformers:
raise TypeError("Duplicate transformer (%r) from %r to %r."
% (transformer, input, output))
if input == output:
raise TypeError("Plugins should not register identity"
" transformations (%r, %r to %r)."
% (transformer, input, output))
self.transformers[input, output] = TransformerRecord(
transformer=transformer, plugin=self, citations=citations)
return transformer
if _fn is None:
return decorator
else:
# Apply the decorator as we were applied with a single function
return decorator(_fn)
def register_semantic_types(self, *semantic_types):
for semantic_type in semantic_types:
if not is_semantic_type(semantic_type):
raise TypeError("%r is not a semantic type." % semantic_type)
if not (isinstance(semantic_type, grammar.IncompleteExp) or
(semantic_type.is_concrete() and
not semantic_type.fields)):
raise ValueError("%r is not a semantic type symbol."
% semantic_type)
if semantic_type.name in self.types:
raise ValueError("Duplicate semantic type symbol %r."
% semantic_type)
self.types[semantic_type.name] = SemanticTypeRecord(
semantic_type=semantic_type, plugin=self)
def register_semantic_type_to_format(self, semantic_type, artifact_format):
if not issubclass(artifact_format, DirectoryFormat):
raise TypeError("%r is not a directory format." % artifact_format)
if not is_semantic_type(semantic_type):
raise TypeError("%r is not a semantic type." % semantic_type)
if not is_semantic_type(semantic_type):
raise ValueError("%r is not a semantic type expression."
% semantic_type)
for t in semantic_type:
if t.predicate is not None:
raise ValueError("%r has a predicate, differentiating format"
" on predicate is not supported.")
self.type_formats.append(TypeFormatRecord(
type_expression=semantic_type,
format=artifact_format, plugin=self))
class PluginActions(dict):
_subpackage = None
def __init__(self, plugin):
self._plugin = plugin
self._package = 'qiime2.plugins.%s.%s' % (
self._plugin.name.replace('-', '_'), self._subpackage)
super().__init__()
class PluginMethods(PluginActions):
_subpackage = 'methods'
# TODO is `register` a better name now that functions are the only accepted
# source (i.e. markdown support is gone)?
def register_function(self, function, inputs, parameters, outputs, name,
description, input_descriptions=None,
parameter_descriptions=None,
output_descriptions=None, citations=None):
if citations is None:
citations = ()
else:
citations = tuple(citations)
method = qiime2.sdk.Method._init(function, inputs, parameters, outputs,
self._package, name, description,
input_descriptions,
parameter_descriptions,
output_descriptions, citations)
self[method.id] = method
class PluginVisualizers(PluginActions):
_subpackage = 'visualizers'
def register_function(self, function, inputs, parameters, name,
description, input_descriptions=None,
parameter_descriptions=None, citations=None):
if citations is None:
citations = ()
else:
citations = tuple(citations)
visualizer = qiime2.sdk.Visualizer._init(function, inputs, parameters,
self._package, name,
description,
input_descriptions,
parameter_descriptions,
citations)
self[visualizer.id] = visualizer
class PluginPipelines(PluginActions):
_subpackage = 'pipelines'
def register_function(self, function, inputs, parameters, outputs, name,
description, input_descriptions=None,
parameter_descriptions=None,
output_descriptions=None, citations=None):
if citations is None:
citations = ()
else:
citations = tuple(citations)
pipeline = qiime2.sdk.Pipeline._init(function, inputs, parameters,
outputs, self._package, name,
description, input_descriptions,
parameter_descriptions,
output_descriptions, citations)
self[pipeline.id] = pipeline | en | 0.811562 | # ---------------------------------------------------------------------------- # Copyright (c) 2016-2019, QIIME 2 development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- # TODO this doesn't handle method/visualizer name collisions. The # auto-generated `qiime2.plugins.<plugin-name>.actions` API has the # same problem. This should be solved at method/visualizer registration # time, which will solve the problem for both APIs. A transformer has the type Callable[[type], type] # `_fn` allows us to figure out if we are called with or without # arguments in order to support both: # ``` # @plugin.register_transformer # def _(x: A) -> B: # ... # ``` # and # ``` # @plugin.register_transformer(restrict=True) # def _(x: A) -> B: # ... # ``` # Apply the decorator as we were applied with a single function # TODO is `register` a better name now that functions are the only accepted # source (i.e. markdown support is gone)? | 1.778435 | 2 |
burlap/locale.py | tutordelphia/burlap | 0 | 6631208 | from __future__ import print_function
import re
from burlap import Satchel
from burlap.constants import *
from burlap.decorators import task
# Note, using the name "locale" doesn't allow the satchel to be imported due to a conflict with an existing variable/module.
class LocalesSatchel(Satchel):
name = 'locales'
def set_defaults(self):
self.env.language = 'en_US:en' # 'en_US.UTF-8'
self.env.lang = 'C' # 'en_US.UTF-8'
self.env.lc_all = None # 'C' # 'en_US.UTF-8'
@property
def packager_system_packages(self):
return {
UBUNTU: ['locales'],
DEBIAN: ['locales'],
}
@task
def cat_locale(self):
return self.run('cat /etc/default/locale')
def get_locale_dict(self, text=None):
"""
Reads /etc/default/locale and returns a dictionary representing its key pairs.
"""
text = text or self.cat_locale()
# Format NAME="value".
return dict(re.findall(r'^([a-zA-Z_]+)\s*=\s*[\'\"]*([0-8a-zA-Z_\.\:\-]+)[\'\"]*', text, re.MULTILINE))
@task(precursors=['user'])
def configure(self):
r = self.local_renderer
# Locales is an odd case, because it needs to be run before most packages are installed
# but it still needs to ensure it's own package is installed.
self.install_packages()
args = []
if r.env.language:
args.append('LANGUAGE={language}')
if r.env.lang:
args.append('LANG={lang}')
if r.env.lc_all:
args.append('LC_ALL={lc_all}')
r.env.exports = ' '.join('export %s;' % _ for _ in args)
r.env.lang = r.env.lang or r.env.language
if r.env.lang:
r.sudo('{exports} locale-gen {lang}')
r.sudo('{exports} dpkg-reconfigure --frontend=noninteractive locales')
r.env.update_args = ' '.join(args)
r.sudo('{exports} update-locale {update_args}')
locales = LocalesSatchel()
| from __future__ import print_function
import re
from burlap import Satchel
from burlap.constants import *
from burlap.decorators import task
# Note, using the name "locale" doesn't allow the satchel to be imported due to a conflict with an existing variable/module.
class LocalesSatchel(Satchel):
name = 'locales'
def set_defaults(self):
self.env.language = 'en_US:en' # 'en_US.UTF-8'
self.env.lang = 'C' # 'en_US.UTF-8'
self.env.lc_all = None # 'C' # 'en_US.UTF-8'
@property
def packager_system_packages(self):
return {
UBUNTU: ['locales'],
DEBIAN: ['locales'],
}
@task
def cat_locale(self):
return self.run('cat /etc/default/locale')
def get_locale_dict(self, text=None):
"""
Reads /etc/default/locale and returns a dictionary representing its key pairs.
"""
text = text or self.cat_locale()
# Format NAME="value".
return dict(re.findall(r'^([a-zA-Z_]+)\s*=\s*[\'\"]*([0-8a-zA-Z_\.\:\-]+)[\'\"]*', text, re.MULTILINE))
@task(precursors=['user'])
def configure(self):
r = self.local_renderer
# Locales is an odd case, because it needs to be run before most packages are installed
# but it still needs to ensure it's own package is installed.
self.install_packages()
args = []
if r.env.language:
args.append('LANGUAGE={language}')
if r.env.lang:
args.append('LANG={lang}')
if r.env.lc_all:
args.append('LC_ALL={lc_all}')
r.env.exports = ' '.join('export %s;' % _ for _ in args)
r.env.lang = r.env.lang or r.env.language
if r.env.lang:
r.sudo('{exports} locale-gen {lang}')
r.sudo('{exports} dpkg-reconfigure --frontend=noninteractive locales')
r.env.update_args = ' '.join(args)
r.sudo('{exports} update-locale {update_args}')
locales = LocalesSatchel()
| en | 0.868554 | # Note, using the name "locale" doesn't allow the satchel to be imported due to a conflict with an existing variable/module. # 'en_US.UTF-8' # 'en_US.UTF-8' # 'C' # 'en_US.UTF-8' Reads /etc/default/locale and returns a dictionary representing its key pairs. # Format NAME="value". # Locales is an odd case, because it needs to be run before most packages are installed # but it still needs to ensure it's own package is installed. | 2.449201 | 2 |
models/zebra_motionworks.py | tervay/the-blue-alliance | 1 | 6631209 | <reponame>tervay/the-blue-alliance<gh_stars>1-10
import datetime
from google.appengine.ext import ndb
from models.event import Event
class ZebraMotionWorks(ndb.Model):
"""
The ZebraMotionWorks model represents robot tracking data from the
Zebra MotionWorks system
"""
event = ndb.KeyProperty(kind=Event, required=True)
data = ndb.JsonProperty(required=True)
created = ndb.DateTimeProperty(auto_now_add=True, indexed=False, default=datetime.datetime.fromtimestamp(0))
updated = ndb.DateTimeProperty(auto_now=True, indexed=False, default=datetime.datetime.fromtimestamp(0))
| import datetime
from google.appengine.ext import ndb
from models.event import Event
class ZebraMotionWorks(ndb.Model):
"""
The ZebraMotionWorks model represents robot tracking data from the
Zebra MotionWorks system
"""
event = ndb.KeyProperty(kind=Event, required=True)
data = ndb.JsonProperty(required=True)
created = ndb.DateTimeProperty(auto_now_add=True, indexed=False, default=datetime.datetime.fromtimestamp(0))
updated = ndb.DateTimeProperty(auto_now=True, indexed=False, default=datetime.datetime.fromtimestamp(0)) | en | 0.749953 | The ZebraMotionWorks model represents robot tracking data from the Zebra MotionWorks system | 2.841345 | 3 |
nrf_extract_edits/bs_pickup.py | siddacious/SA-45 | 7 | 6631210 | #!/usr/bin/env python2
# to run: python2 pickup.py
#
# OpenOCD library: https://github.com/screwer/OpenOCD
# change line 193
# if not self.Name:
#
# Use the OpenOCD library
from OpenOCD import OpenOCD
import sys
def int_to_bytes(value, length):
result = []
for i in range(0, length):
result.append(value >> (i * 8) & 0xff)
result.reverse()
return bytearray(result)
# create connection to running instance of OpenOCD
ocd = OpenOCD()
# reset and halt the processor
ocd.Reset(Halt=True)
# create a variable for the program counter register
pc = ocd.Reg("pc")
# the address found with drop.py that contains the instruction
# that will copy the contents of memory and store it in a register
########## GOODIES ########################
# found possible instruction at 0x000006D6
# r4 = 0x000006D1, pc = 0x000006D0
#
# found possible instruction at 0x000006DE
# r3 = 0x000006D1, pc = 0x000006D0
###########################################
# Instruction for BB units?
pc_pickup_val = 0x6DC
# one of the instructions I got for my R-series
#pc_pickup_val = 0x6DE
# the regsiter where to write the memory address to be read
write_reg = ocd.Reg("r3")
# the register to read the value stored at the specified memory
read_reg = ocd.Reg("r3")
# the size of the the chip's flash memory
#flash_size = 0x40000
flash_size = 0x1000
# the output filename
outfile = "rinse_and_repeat.bin"
# reset all registers to 0 (do we really need this ??)
reg = []
for i in range(0,13):
reg.append(ocd.Reg("r%d" % i))
for i in range(len(reg)):
reg[i].Write(0)
# create output file
data = open(outfile, 'w+b')
# loop over all memory
for addr in range(0,flash_size,4):
# write the address of the memory copy instruction to the program counter
pc.Write(pc_pickup_val)
# reset all registers to 0 (do we really need this ??)
# reg = []
# for i in range(0,13):
# reg.append(ocd.Reg("r%d" % i))
# for i in range(len(reg)):
# reg[i].Write(0)
# write the memory address to be read
write_reg.Write(addr)
# execute the instruction
ocd.Step()
# read the memory contents back
buf = read_reg.Read()
# convert the int value to bytes and write that to the output file
data.write(int_to_bytes(buf,4))
# create some sort of output so we know the program is still running (it takes a while)
sys.stdout.write('.')
sys.stdout.flush()
print("[0x%08X] 0x%08X" % (addr, buf))
data.close()
print()
print("Done")
| #!/usr/bin/env python2
# to run: python2 pickup.py
#
# OpenOCD library: https://github.com/screwer/OpenOCD
# change line 193
# if not self.Name:
#
# Use the OpenOCD library
from OpenOCD import OpenOCD
import sys
def int_to_bytes(value, length):
result = []
for i in range(0, length):
result.append(value >> (i * 8) & 0xff)
result.reverse()
return bytearray(result)
# create connection to running instance of OpenOCD
ocd = OpenOCD()
# reset and halt the processor
ocd.Reset(Halt=True)
# create a variable for the program counter register
pc = ocd.Reg("pc")
# the address found with drop.py that contains the instruction
# that will copy the contents of memory and store it in a register
########## GOODIES ########################
# found possible instruction at 0x000006D6
# r4 = 0x000006D1, pc = 0x000006D0
#
# found possible instruction at 0x000006DE
# r3 = 0x000006D1, pc = 0x000006D0
###########################################
# Instruction for BB units?
pc_pickup_val = 0x6DC
# one of the instructions I got for my R-series
#pc_pickup_val = 0x6DE
# the regsiter where to write the memory address to be read
write_reg = ocd.Reg("r3")
# the register to read the value stored at the specified memory
read_reg = ocd.Reg("r3")
# the size of the the chip's flash memory
#flash_size = 0x40000
flash_size = 0x1000
# the output filename
outfile = "rinse_and_repeat.bin"
# reset all registers to 0 (do we really need this ??)
reg = []
for i in range(0,13):
reg.append(ocd.Reg("r%d" % i))
for i in range(len(reg)):
reg[i].Write(0)
# create output file
data = open(outfile, 'w+b')
# loop over all memory
for addr in range(0,flash_size,4):
# write the address of the memory copy instruction to the program counter
pc.Write(pc_pickup_val)
# reset all registers to 0 (do we really need this ??)
# reg = []
# for i in range(0,13):
# reg.append(ocd.Reg("r%d" % i))
# for i in range(len(reg)):
# reg[i].Write(0)
# write the memory address to be read
write_reg.Write(addr)
# execute the instruction
ocd.Step()
# read the memory contents back
buf = read_reg.Read()
# convert the int value to bytes and write that to the output file
data.write(int_to_bytes(buf,4))
# create some sort of output so we know the program is still running (it takes a while)
sys.stdout.write('.')
sys.stdout.flush()
print("[0x%08X] 0x%08X" % (addr, buf))
data.close()
print()
print("Done")
| en | 0.723528 | #!/usr/bin/env python2 # to run: python2 pickup.py # # OpenOCD library: https://github.com/screwer/OpenOCD # change line 193 # if not self.Name: # # Use the OpenOCD library # create connection to running instance of OpenOCD # reset and halt the processor # create a variable for the program counter register # the address found with drop.py that contains the instruction # that will copy the contents of memory and store it in a register ########## GOODIES ######################## # found possible instruction at 0x000006D6 # r4 = 0x000006D1, pc = 0x000006D0 # # found possible instruction at 0x000006DE # r3 = 0x000006D1, pc = 0x000006D0 ########################################### # Instruction for BB units? # one of the instructions I got for my R-series #pc_pickup_val = 0x6DE # the regsiter where to write the memory address to be read # the register to read the value stored at the specified memory # the size of the the chip's flash memory #flash_size = 0x40000 # the output filename # reset all registers to 0 (do we really need this ??) # create output file # loop over all memory # write the address of the memory copy instruction to the program counter # reset all registers to 0 (do we really need this ??) # reg = [] # for i in range(0,13): # reg.append(ocd.Reg("r%d" % i)) # for i in range(len(reg)): # reg[i].Write(0) # write the memory address to be read # execute the instruction # read the memory contents back # convert the int value to bytes and write that to the output file # create some sort of output so we know the program is still running (it takes a while) | 2.960784 | 3 |
tests/helpers/__init__.py | krypton-unite/time_series_generator | 4 | 6631211 | from .json_reader import get_json_from_file, get_data_from_file, write_data_to_file | from .json_reader import get_json_from_file, get_data_from_file, write_data_to_file | none | 1 | 1.459998 | 1 |
|
relish/views/__init__.py | mbs-dev/django-relish | 1 | 6631212 | <filename>relish/views/__init__.py
from .messages import SuccessMessageMixin
| <filename>relish/views/__init__.py
from .messages import SuccessMessageMixin
| none | 1 | 1.070977 | 1 |
|
crawler_api/crawlers/base.py | GabrielRocha/tj_crawler | 1 | 6631213 | import asyncio
from abc import ABC, abstractmethod
from parsel import Selector
class BaseCrawler(ABC):
paths = {}
def __init__(self, session):
self.session = session
async def execute(self, **kwargs):
task = [self._start_request(_id, url, **kwargs) for _id, url in self.paths.items()]
result = await asyncio.gather(*task)
return (item for item in result if item)
async def _start_request(self, _id, url, **kwargs):
async with self.session.get(url.format(**kwargs)) as response:
data = await response.text()
return self.parse(Selector(text=data), _id=_id)
@abstractmethod
def parse(self, data, _id):
raise NotImplementedError
| import asyncio
from abc import ABC, abstractmethod
from parsel import Selector
class BaseCrawler(ABC):
paths = {}
def __init__(self, session):
self.session = session
async def execute(self, **kwargs):
task = [self._start_request(_id, url, **kwargs) for _id, url in self.paths.items()]
result = await asyncio.gather(*task)
return (item for item in result if item)
async def _start_request(self, _id, url, **kwargs):
async with self.session.get(url.format(**kwargs)) as response:
data = await response.text()
return self.parse(Selector(text=data), _id=_id)
@abstractmethod
def parse(self, data, _id):
raise NotImplementedError
| none | 1 | 2.836848 | 3 |
|
tpm2_pytss/util/swig.py | pdxjohnny/tpm2-pytss | 0 | 6631214 | import os
import inspect
import logging
from functools import partial, wraps
from typing import Any
logging.basicConfig(
level=getattr(logging, os.environ.get("TPM2_PYTSS_LOG_LEVEL", "CRITICAL").upper())
)
LOGGER = logging.getLogger(__name__)
class PointerAlreadyInUse(Exception):
pass # pragma: no cov
class ContextManagedPointerClass:
"""
By forcing context management we ensure users of the bindings are explicit
about their usage and freeing of allocated resources. Rather than relying on
the garbage collector. This makes it harder for them to leave assets lying
around.
"""
def __init__(self, value: Any = None):
self._init_value = value
self.ptr = None
@property
def value(self) -> Any:
return self._value(self.ptr)
@value.setter
def value(self, value) -> None:
self._assign(self.ptr, value)
@classmethod
def frompointer(cls, ptr: Any) -> "ContextManagedPointerClass":
return cls(ptr)
def __enter__(self):
if self.ptr is not None:
raise PointerAlreadyInUse()
self.ptr = self._new()
if self._init_value is not None:
self.value = self._init_value
return self
def __exit__(self, _exc_type, _exc_value, _traceback):
self._delete(self.ptr)
self.ptr = None
def pointer_class(name, *, module=None):
"""
Creates a class of the requested pointer functions data type
which supports context management.
"""
check = {
"_new": "new_{}",
"_copy": "copy_{}",
"_delete": "delete_{}",
"_assign": "{}_assign",
"_value": "{}_value",
}
# Look up the methods
for key, value in check.items():
check[key] = module.__dict__.get(value.format(name), None)
if not all(check.values()):
return AttributeError
# Ensure we don't pass self to the functions
for key, value in check.items():
check[key] = partial(value)
return type(name, (ContextManagedPointerClass,), check)
class Wrapper:
"""
SWIG does a great job. This class takes SWIG outputs and makes them a bit
more Pythonic.
"""
def __getattribute__(self, name):
try:
return super().__getattribute__(name)
except AttributeError:
for attempt in [
partial(pointer_class, module=self.MODULE),
lambda name: self.MODULE.__dict__.get(name, AttributeError),
]:
prop = attempt(name)
if prop is not AttributeError:
return prop
raise
class WrapperMetaClass(type, Wrapper):
# Enable changing function arguments of one value into another before they
# are passed to the swig function. This allows us to create abstractions on
# top of the swig abstractions to make the interface more user friendly.
CALL_MODS = set()
def __init__(cls, name, bases, namespace, **kwargs):
"""
Needed for compatibility with Python 3.5
"""
super().__init__(name, bases, namespace)
def __new__(cls, name, bases, props, module=None):
# Set the module
props["MODULE"] = module
# Create the class
cls = super(WrapperMetaClass, cls).__new__(cls, name, bases, props)
# Go through all the functions in the module
for key, func in module.__dict__.items():
if not key.startswith("_") and inspect.isfunction(func):
func = cls.wrap(func)
setattr(cls, key, partial(func))
return cls
def __getattribute__(cls, name):
try:
return object.__getattribute__(cls, name)
except AttributeError:
module = object.__getattribute__(cls, "MODULE")
for attempt in [
partial(pointer_class, module=module),
lambda name: module.__dict__.get(name, AttributeError),
]:
prop = attempt(name)
if prop is not AttributeError:
return prop
raise
@classmethod
def register_call_mod(cls, mod):
cls.CALL_MODS.add(mod)
return mod
@classmethod
def wrap(cls, func):
sig = inspect.signature(func)
parameters = list(sig.parameters.values())
@wraps(func)
def wrapper(*args, **kwargs):
"""
wrapper will be assigned to the ESYSContext class as a method. As
such the first argument, self, is an instance of ESYSContext
"""
args = list(args)
# Combine the arguments we were passed and the parameters from the
# signature and loop through them all.
for i, (value, parameter) in enumerate(zip(args, parameters)):
# Go through each of the call modifiers and use the returned
# value as the new value for the argument if it was not None
for modify in cls.CALL_MODS:
modifed = modify(parameter.name, parameter.annotation, value)
if modifed is not None:
args[i] = modifed
LOGGER.debug(
("%s(\n " % (func.__name__,))
+ "\n ".join(
map(lambda x: "%s: %s," % (x[0].name, x[1]), zip(parameters, args))
)
+ "\n)"
)
return func(*args, **kwargs)
return wrapper
@staticmethod
def call_mod_ptr_or_value(annotation, value):
"""
Last step in a call_mod_ for classes which wrap swig types and expose them
via ``value`` and ``ptr`` properties.
"""
# If a pointer is being requested, then pass the SessionContext pointer. Do
# this by checking if the reverse of the string representation of the value
# starts in a *, aka the last charater in the type is a * (for pointer)
if annotation[::-1].startswith("*"):
return value.ptr
# Otherwise we pass the value that is being pointed to by the SessionContext
# pointer
return value.value
@WrapperMetaClass.register_call_mod
def call_mod_context_managed_pointer_class(name, annotation, value):
if isinstance(value, ContextManagedPointerClass):
return WrapperMetaClass.call_mod_ptr_or_value(annotation, value)
| import os
import inspect
import logging
from functools import partial, wraps
from typing import Any
logging.basicConfig(
level=getattr(logging, os.environ.get("TPM2_PYTSS_LOG_LEVEL", "CRITICAL").upper())
)
LOGGER = logging.getLogger(__name__)
class PointerAlreadyInUse(Exception):
pass # pragma: no cov
class ContextManagedPointerClass:
"""
By forcing context management we ensure users of the bindings are explicit
about their usage and freeing of allocated resources. Rather than relying on
the garbage collector. This makes it harder for them to leave assets lying
around.
"""
def __init__(self, value: Any = None):
self._init_value = value
self.ptr = None
@property
def value(self) -> Any:
return self._value(self.ptr)
@value.setter
def value(self, value) -> None:
self._assign(self.ptr, value)
@classmethod
def frompointer(cls, ptr: Any) -> "ContextManagedPointerClass":
return cls(ptr)
def __enter__(self):
if self.ptr is not None:
raise PointerAlreadyInUse()
self.ptr = self._new()
if self._init_value is not None:
self.value = self._init_value
return self
def __exit__(self, _exc_type, _exc_value, _traceback):
self._delete(self.ptr)
self.ptr = None
def pointer_class(name, *, module=None):
"""
Creates a class of the requested pointer functions data type
which supports context management.
"""
check = {
"_new": "new_{}",
"_copy": "copy_{}",
"_delete": "delete_{}",
"_assign": "{}_assign",
"_value": "{}_value",
}
# Look up the methods
for key, value in check.items():
check[key] = module.__dict__.get(value.format(name), None)
if not all(check.values()):
return AttributeError
# Ensure we don't pass self to the functions
for key, value in check.items():
check[key] = partial(value)
return type(name, (ContextManagedPointerClass,), check)
class Wrapper:
"""
SWIG does a great job. This class takes SWIG outputs and makes them a bit
more Pythonic.
"""
def __getattribute__(self, name):
try:
return super().__getattribute__(name)
except AttributeError:
for attempt in [
partial(pointer_class, module=self.MODULE),
lambda name: self.MODULE.__dict__.get(name, AttributeError),
]:
prop = attempt(name)
if prop is not AttributeError:
return prop
raise
class WrapperMetaClass(type, Wrapper):
# Enable changing function arguments of one value into another before they
# are passed to the swig function. This allows us to create abstractions on
# top of the swig abstractions to make the interface more user friendly.
CALL_MODS = set()
def __init__(cls, name, bases, namespace, **kwargs):
"""
Needed for compatibility with Python 3.5
"""
super().__init__(name, bases, namespace)
def __new__(cls, name, bases, props, module=None):
# Set the module
props["MODULE"] = module
# Create the class
cls = super(WrapperMetaClass, cls).__new__(cls, name, bases, props)
# Go through all the functions in the module
for key, func in module.__dict__.items():
if not key.startswith("_") and inspect.isfunction(func):
func = cls.wrap(func)
setattr(cls, key, partial(func))
return cls
def __getattribute__(cls, name):
try:
return object.__getattribute__(cls, name)
except AttributeError:
module = object.__getattribute__(cls, "MODULE")
for attempt in [
partial(pointer_class, module=module),
lambda name: module.__dict__.get(name, AttributeError),
]:
prop = attempt(name)
if prop is not AttributeError:
return prop
raise
@classmethod
def register_call_mod(cls, mod):
cls.CALL_MODS.add(mod)
return mod
@classmethod
def wrap(cls, func):
sig = inspect.signature(func)
parameters = list(sig.parameters.values())
@wraps(func)
def wrapper(*args, **kwargs):
"""
wrapper will be assigned to the ESYSContext class as a method. As
such the first argument, self, is an instance of ESYSContext
"""
args = list(args)
# Combine the arguments we were passed and the parameters from the
# signature and loop through them all.
for i, (value, parameter) in enumerate(zip(args, parameters)):
# Go through each of the call modifiers and use the returned
# value as the new value for the argument if it was not None
for modify in cls.CALL_MODS:
modifed = modify(parameter.name, parameter.annotation, value)
if modifed is not None:
args[i] = modifed
LOGGER.debug(
("%s(\n " % (func.__name__,))
+ "\n ".join(
map(lambda x: "%s: %s," % (x[0].name, x[1]), zip(parameters, args))
)
+ "\n)"
)
return func(*args, **kwargs)
return wrapper
@staticmethod
def call_mod_ptr_or_value(annotation, value):
"""
Last step in a call_mod_ for classes which wrap swig types and expose them
via ``value`` and ``ptr`` properties.
"""
# If a pointer is being requested, then pass the SessionContext pointer. Do
# this by checking if the reverse of the string representation of the value
# starts in a *, aka the last charater in the type is a * (for pointer)
if annotation[::-1].startswith("*"):
return value.ptr
# Otherwise we pass the value that is being pointed to by the SessionContext
# pointer
return value.value
@WrapperMetaClass.register_call_mod
def call_mod_context_managed_pointer_class(name, annotation, value):
if isinstance(value, ContextManagedPointerClass):
return WrapperMetaClass.call_mod_ptr_or_value(annotation, value)
| en | 0.896289 | # pragma: no cov By forcing context management we ensure users of the bindings are explicit about their usage and freeing of allocated resources. Rather than relying on the garbage collector. This makes it harder for them to leave assets lying around. Creates a class of the requested pointer functions data type which supports context management. # Look up the methods # Ensure we don't pass self to the functions SWIG does a great job. This class takes SWIG outputs and makes them a bit more Pythonic. # Enable changing function arguments of one value into another before they # are passed to the swig function. This allows us to create abstractions on # top of the swig abstractions to make the interface more user friendly. Needed for compatibility with Python 3.5 # Set the module # Create the class # Go through all the functions in the module wrapper will be assigned to the ESYSContext class as a method. As such the first argument, self, is an instance of ESYSContext # Combine the arguments we were passed and the parameters from the # signature and loop through them all. # Go through each of the call modifiers and use the returned # value as the new value for the argument if it was not None Last step in a call_mod_ for classes which wrap swig types and expose them via ``value`` and ``ptr`` properties. # If a pointer is being requested, then pass the SessionContext pointer. Do # this by checking if the reverse of the string representation of the value # starts in a *, aka the last charater in the type is a * (for pointer) # Otherwise we pass the value that is being pointed to by the SessionContext # pointer | 2.372627 | 2 |
python/testData/codeInsight/smartEnter/argumentsFirst.py | truthiswill/intellij-community | 2 | 6631215 | <gh_stars>1-10
def foo(*a):
pass
foo<caret>(1, 2, 3 | def foo(*a):
pass
foo<caret>(1, 2, 3 | none | 1 | 1.280604 | 1 |
|
data/data_loader.py | Fodark/PerceptualSimilarity | 2,245 | 6631216 | def CreateDataLoader(datafolder,dataroot='./dataset',dataset_mode='2afc',load_size=64,batch_size=1,serial_batches=True,nThreads=4):
from data.custom_dataset_data_loader import CustomDatasetDataLoader
data_loader = CustomDatasetDataLoader()
# print(data_loader.name())
data_loader.initialize(datafolder,dataroot=dataroot+'/'+dataset_mode,dataset_mode=dataset_mode,load_size=load_size,batch_size=batch_size,serial_batches=serial_batches, nThreads=nThreads)
return data_loader
| def CreateDataLoader(datafolder,dataroot='./dataset',dataset_mode='2afc',load_size=64,batch_size=1,serial_batches=True,nThreads=4):
from data.custom_dataset_data_loader import CustomDatasetDataLoader
data_loader = CustomDatasetDataLoader()
# print(data_loader.name())
data_loader.initialize(datafolder,dataroot=dataroot+'/'+dataset_mode,dataset_mode=dataset_mode,load_size=load_size,batch_size=batch_size,serial_batches=serial_batches, nThreads=nThreads)
return data_loader
| en | 0.079477 | # print(data_loader.name()) | 2.38237 | 2 |
losses/get_loss.py | zdaiot/NAIC-Person-Re-identification | 0 | 6631217 | <reponame>zdaiot/NAIC-Person-Re-identification<filename>losses/get_loss.py<gh_stars>0
import torch
import torch.nn as nn
from losses.triplet_loss import TripletLoss, CrossEntropyLabelSmooth, TripletLossOrigin
class Loss(nn.Module):
def __init__(self, model_name, loss_name, margin, num_classes):
"""
:param model_name: 模型的名称;类型为str
:param loss_name: 损失的名称;类型为str
:param margin: TripletLoss中的参数;类型为float
:param num_classes: 网络的参数
"""
super(Loss, self).__init__()
self.model_name = model_name
self.loss_name = loss_name
self.loss_struct = []
for loss in self.loss_name.split('+'):
weight, loss_type = loss.split('*')
if loss_type == 'CrossEntropy':
loss_function = nn.CrossEntropyLoss()
elif loss_type == 'SmoothCrossEntropy':
loss_function = CrossEntropyLabelSmooth(num_classes=num_classes)
elif loss_type == 'Triplet':
loss_function = TripletLoss(margin)
else:
assert "loss: {} not support yet".format(self.loss_name)
self.loss_struct.append({
'type': loss_type,
'weight': float(weight),
'function': loss_function
})
# 如果有多个损失函数,在加上一个求和操作
if len(self.loss_struct) > 1:
self.loss_struct.append({'type': 'Total', 'weight': 0, 'function': None})
self.loss_module = nn.ModuleList([l['function'] for l in self.loss_struct if l['function'] is not None])
# self.log的维度为[1, len(self.loss)],前面几个分别存放某次迭代各个损失函数的损失值,最后一个存放某次迭代损失值之和
self.log, self.log_sum = torch.zeros(len(self.loss_struct)), torch.zeros(len(self.loss_struct))
if torch.cuda.is_available():
self.loss_module = torch.nn.DataParallel(self.loss_module)
self.loss_module.cuda()
def forward(self, outputs, labels):
"""
:param outputs: 网络的输出,具体维度和网络有关
:param labels: 数据的真实类标,具体维度和网络有关
:return loss_sum: 损失函数之和,未经过item()函数,可用于反向传播
"""
losses = []
# 计算每一个损失函数的损失值
for i, l in enumerate(self.loss_struct):
# 处理MGN网络的损失计算
if self.model_name == 'MGN' and l['type'] == 'Triplet':
loss = [l['function'](output, labels) for output in outputs[8:11]]
loss = sum(loss) / len(loss)
effective_loss = l['weight'] * loss
losses.append(effective_loss)
self.log[i] = effective_loss.item()
self.log_sum[i] += self.log[i]
elif self.model_name == 'MGN' and l['type'] in ['CrossEntropy', 'SmoothCrossEntropy']:
loss = [l['function'](output, labels) for output in outputs[:8]]
loss = sum(loss) / len(loss)
effective_loss = l['weight'] * loss
losses.append(effective_loss)
self.log[i] = effective_loss.item()
self.log_sum[i] += self.log[i]
# 处理其它网络的损失计算
elif self.model_name != 'MGN' and l['type'] == 'Triplet':
loss = l['function'](outputs[1], labels)
effective_loss = l['weight'] * loss
losses.append(effective_loss)
self.log[i] = effective_loss.item()
self.log_sum[i] += self.log[i]
elif self.model_name != 'MGN' and l['type'] in ['CrossEntropy', 'SmoothCrossEntropy']:
loss = l['function'](outputs[0], labels)
effective_loss = l['weight'] * loss
losses.append(effective_loss)
self.log[i] = effective_loss.item()
self.log_sum[i] += self.log[i]
# 保留接口
else:
pass
loss_sum = sum(losses)
if len(self.loss_struct) > 1:
self.log[-1] = loss_sum.item()
self.log_sum[-1] += loss_sum.item()
return loss_sum
def record_loss_iteration(self, writer_function=None, global_step=None):
""" 用于记录每一次迭代的结果
:param writer_function: tensorboard的写入函数;类型为callable
:param global_step: 当前的步数;类型为int
:return: [损失名称: 损失值][损失名称: 损失值][损失名称: 损失值];类型为str
"""
descript = []
for l, each_loss in zip(self.loss_struct, self.log):
if writer_function:
writer_function(l['type'] + 'Iteration', each_loss, global_step)
descript.append('[{}: {:.4f}]'.format(l['type'], each_loss))
return ''.join(descript)
def record_loss_epoch(self, num_iterations, writer_function=None, global_step=None):
""" 用于记录每一个epoch的结果
:param num_iterations:该epoch包含多少个迭代;类型为int
:param writer_function: tensorboard的写入函数;类型为callable
:param global_step: 当前的步数;类型为int
:return: [Average 损失名称: 平均损失值][Average 损失名称: 平均损失值][Average 损失名称: 平均损失值];类型为str
"""
descript = []
for l, each_loss in zip(self.loss_struct, self.log_sum):
if writer_function:
writer_function(l['type'] + 'Epoch', each_loss/num_iterations, global_step)
descript.append('[Average {}: {:.4f}]'.format(l['type'], each_loss/num_iterations))
# 注意要把 self.log_sum清零
self.log_sum = torch.zeros(len(self.loss_struct))
return ''.join(descript)
| import torch
import torch.nn as nn
from losses.triplet_loss import TripletLoss, CrossEntropyLabelSmooth, TripletLossOrigin
class Loss(nn.Module):
def __init__(self, model_name, loss_name, margin, num_classes):
"""
:param model_name: 模型的名称;类型为str
:param loss_name: 损失的名称;类型为str
:param margin: TripletLoss中的参数;类型为float
:param num_classes: 网络的参数
"""
super(Loss, self).__init__()
self.model_name = model_name
self.loss_name = loss_name
self.loss_struct = []
for loss in self.loss_name.split('+'):
weight, loss_type = loss.split('*')
if loss_type == 'CrossEntropy':
loss_function = nn.CrossEntropyLoss()
elif loss_type == 'SmoothCrossEntropy':
loss_function = CrossEntropyLabelSmooth(num_classes=num_classes)
elif loss_type == 'Triplet':
loss_function = TripletLoss(margin)
else:
assert "loss: {} not support yet".format(self.loss_name)
self.loss_struct.append({
'type': loss_type,
'weight': float(weight),
'function': loss_function
})
# 如果有多个损失函数,在加上一个求和操作
if len(self.loss_struct) > 1:
self.loss_struct.append({'type': 'Total', 'weight': 0, 'function': None})
self.loss_module = nn.ModuleList([l['function'] for l in self.loss_struct if l['function'] is not None])
# self.log的维度为[1, len(self.loss)],前面几个分别存放某次迭代各个损失函数的损失值,最后一个存放某次迭代损失值之和
self.log, self.log_sum = torch.zeros(len(self.loss_struct)), torch.zeros(len(self.loss_struct))
if torch.cuda.is_available():
self.loss_module = torch.nn.DataParallel(self.loss_module)
self.loss_module.cuda()
def forward(self, outputs, labels):
"""
:param outputs: 网络的输出,具体维度和网络有关
:param labels: 数据的真实类标,具体维度和网络有关
:return loss_sum: 损失函数之和,未经过item()函数,可用于反向传播
"""
losses = []
# 计算每一个损失函数的损失值
for i, l in enumerate(self.loss_struct):
# 处理MGN网络的损失计算
if self.model_name == 'MGN' and l['type'] == 'Triplet':
loss = [l['function'](output, labels) for output in outputs[8:11]]
loss = sum(loss) / len(loss)
effective_loss = l['weight'] * loss
losses.append(effective_loss)
self.log[i] = effective_loss.item()
self.log_sum[i] += self.log[i]
elif self.model_name == 'MGN' and l['type'] in ['CrossEntropy', 'SmoothCrossEntropy']:
loss = [l['function'](output, labels) for output in outputs[:8]]
loss = sum(loss) / len(loss)
effective_loss = l['weight'] * loss
losses.append(effective_loss)
self.log[i] = effective_loss.item()
self.log_sum[i] += self.log[i]
# 处理其它网络的损失计算
elif self.model_name != 'MGN' and l['type'] == 'Triplet':
loss = l['function'](outputs[1], labels)
effective_loss = l['weight'] * loss
losses.append(effective_loss)
self.log[i] = effective_loss.item()
self.log_sum[i] += self.log[i]
elif self.model_name != 'MGN' and l['type'] in ['CrossEntropy', 'SmoothCrossEntropy']:
loss = l['function'](outputs[0], labels)
effective_loss = l['weight'] * loss
losses.append(effective_loss)
self.log[i] = effective_loss.item()
self.log_sum[i] += self.log[i]
# 保留接口
else:
pass
loss_sum = sum(losses)
if len(self.loss_struct) > 1:
self.log[-1] = loss_sum.item()
self.log_sum[-1] += loss_sum.item()
return loss_sum
def record_loss_iteration(self, writer_function=None, global_step=None):
""" 用于记录每一次迭代的结果
:param writer_function: tensorboard的写入函数;类型为callable
:param global_step: 当前的步数;类型为int
:return: [损失名称: 损失值][损失名称: 损失值][损失名称: 损失值];类型为str
"""
descript = []
for l, each_loss in zip(self.loss_struct, self.log):
if writer_function:
writer_function(l['type'] + 'Iteration', each_loss, global_step)
descript.append('[{}: {:.4f}]'.format(l['type'], each_loss))
return ''.join(descript)
def record_loss_epoch(self, num_iterations, writer_function=None, global_step=None):
""" 用于记录每一个epoch的结果
:param num_iterations:该epoch包含多少个迭代;类型为int
:param writer_function: tensorboard的写入函数;类型为callable
:param global_step: 当前的步数;类型为int
:return: [Average 损失名称: 平均损失值][Average 损失名称: 平均损失值][Average 损失名称: 平均损失值];类型为str
"""
descript = []
for l, each_loss in zip(self.loss_struct, self.log_sum):
if writer_function:
writer_function(l['type'] + 'Epoch', each_loss/num_iterations, global_step)
descript.append('[Average {}: {:.4f}]'.format(l['type'], each_loss/num_iterations))
# 注意要把 self.log_sum清零
self.log_sum = torch.zeros(len(self.loss_struct))
return ''.join(descript) | zh | 0.752311 | :param model_name: 模型的名称;类型为str :param loss_name: 损失的名称;类型为str :param margin: TripletLoss中的参数;类型为float :param num_classes: 网络的参数 # 如果有多个损失函数,在加上一个求和操作 # self.log的维度为[1, len(self.loss)],前面几个分别存放某次迭代各个损失函数的损失值,最后一个存放某次迭代损失值之和 :param outputs: 网络的输出,具体维度和网络有关 :param labels: 数据的真实类标,具体维度和网络有关 :return loss_sum: 损失函数之和,未经过item()函数,可用于反向传播 # 计算每一个损失函数的损失值 # 处理MGN网络的损失计算 # 处理其它网络的损失计算 # 保留接口 用于记录每一次迭代的结果 :param writer_function: tensorboard的写入函数;类型为callable :param global_step: 当前的步数;类型为int :return: [损失名称: 损失值][损失名称: 损失值][损失名称: 损失值];类型为str 用于记录每一个epoch的结果 :param num_iterations:该epoch包含多少个迭代;类型为int :param writer_function: tensorboard的写入函数;类型为callable :param global_step: 当前的步数;类型为int :return: [Average 损失名称: 平均损失值][Average 损失名称: 平均损失值][Average 损失名称: 平均损失值];类型为str # 注意要把 self.log_sum清零 | 2.411145 | 2 |
data-science-essentials-in-python/numpy-gradient.py | zzragida/study-datascience | 0 | 6631218 | import numpy as np
def main():
f = np.array([1, 2, 4, 7, 11, 16], dtype=float)
print(np.gradient(f))
print(np.gradient(f, 2))
if __name__ == "__main__":
main() | import numpy as np
def main():
f = np.array([1, 2, 4, 7, 11, 16], dtype=float)
print(np.gradient(f))
print(np.gradient(f, 2))
if __name__ == "__main__":
main() | none | 1 | 3.431188 | 3 |
|
tests/child_chain/test_child_chain_integration.py | kevjue/plasma-mvp | 1 | 6631219 | <filename>tests/child_chain/test_child_chain_integration.py
from web3 import Web3
from plasma.child_chain.transaction import Transaction
NULL_ADDRESS = b'\x00' * 20
NULL_ADDRESS_HEX = '0x' + NULL_ADDRESS.hex()
def test_deposit(test_lang):
owner_1 = test_lang.get_account()
deposit_id = test_lang.deposit(owner_1, 100)
tx = Transaction(0, 0, 0, 0, 0, 0, NULL_ADDRESS, owner_1['address'], 100, NULL_ADDRESS, 0, 0)
deposit_hash = Web3.soliditySha3(['address', 'address', 'uint256'], [owner_1['address'], NULL_ADDRESS_HEX, 100])
assert test_lang.transactions[deposit_id]['tx'].hash == tx.hash
deposit_blknum = 1
deposit_block = test_lang.child_chain.blocks[deposit_blknum]
assert deposit_block.transaction_set[0].hash == tx.hash
assert test_lang.root_chain.call().getChildChain(deposit_blknum)[0] == deposit_hash
def test_transfer(test_lang):
owner_1 = test_lang.get_account()
owner_2 = test_lang.get_account()
deposit_id = test_lang.deposit(owner_1, 100)
transfer_id = test_lang.transfer(deposit_id, 0, owner_2, 100, owner_1)
tx = Transaction(1, 0, 0,
0, 0, 0,
NULL_ADDRESS,
owner_2['address'], 100,
NULL_ADDRESS, 0,
0)
assert test_lang.transactions[transfer_id]['tx'].hash == tx.hash
assert test_lang.child_chain.current_block.transaction_set[0].hash == tx.hash
def test_submit_block(test_lang):
owner_1 = test_lang.get_account()
owner_2 = test_lang.get_account()
deposit_id = test_lang.deposit(owner_1, 100)
test_lang.transfer(deposit_id, 0, owner_2, 100, owner_1)
test_lang.submit_block()
blknum = 1000
assert test_lang.root_chain.call().getChildChain(blknum)[0] == test_lang.child_chain.blocks[blknum].merklize_transaction_set()
def test_confirm(test_lang):
owner_1 = test_lang.get_account()
owner_2 = test_lang.get_account()
deposit_id = test_lang.deposit(owner_1, 100)
transfer_id = test_lang.transfer(deposit_id, 0, owner_2, 100, owner_1)
test_lang.submit_block()
test_lang.confirm(transfer_id, owner_1)
assert test_lang.transactions[transfer_id]['confirm_sigs'] != ''
def test_withdraw_transfer(test_lang):
owner_1 = test_lang.get_account()
owner_2 = test_lang.get_account()
deposit_id = test_lang.deposit(owner_1, 100)
transfer_id = test_lang.transfer(deposit_id, 0, owner_2, 100, owner_1)
test_lang.submit_block()
test_lang.confirm(transfer_id, owner_1)
test_lang.withdraw(transfer_id, 0, owner_2)
exit_data = test_lang.root_chain.call().getExit(1000000000000)
assert exit_data[0] == owner_2['address']
assert exit_data[1] == NULL_ADDRESS_HEX
assert exit_data[2] == 100
def test_withdraw_deposit(test_lang):
owner_1 = test_lang.get_account()
deposit_id = test_lang.deposit(owner_1, 100)
test_lang.withdraw(deposit_id, 0, owner_1)
exit_data = test_lang.root_chain.call().getExit(1000000001)
assert exit_data[0] == owner_1['address']
assert exit_data[1] == NULL_ADDRESS_HEX
assert exit_data[2] == 100
| <filename>tests/child_chain/test_child_chain_integration.py
from web3 import Web3
from plasma.child_chain.transaction import Transaction
NULL_ADDRESS = b'\x00' * 20
NULL_ADDRESS_HEX = '0x' + NULL_ADDRESS.hex()
def test_deposit(test_lang):
owner_1 = test_lang.get_account()
deposit_id = test_lang.deposit(owner_1, 100)
tx = Transaction(0, 0, 0, 0, 0, 0, NULL_ADDRESS, owner_1['address'], 100, NULL_ADDRESS, 0, 0)
deposit_hash = Web3.soliditySha3(['address', 'address', 'uint256'], [owner_1['address'], NULL_ADDRESS_HEX, 100])
assert test_lang.transactions[deposit_id]['tx'].hash == tx.hash
deposit_blknum = 1
deposit_block = test_lang.child_chain.blocks[deposit_blknum]
assert deposit_block.transaction_set[0].hash == tx.hash
assert test_lang.root_chain.call().getChildChain(deposit_blknum)[0] == deposit_hash
def test_transfer(test_lang):
owner_1 = test_lang.get_account()
owner_2 = test_lang.get_account()
deposit_id = test_lang.deposit(owner_1, 100)
transfer_id = test_lang.transfer(deposit_id, 0, owner_2, 100, owner_1)
tx = Transaction(1, 0, 0,
0, 0, 0,
NULL_ADDRESS,
owner_2['address'], 100,
NULL_ADDRESS, 0,
0)
assert test_lang.transactions[transfer_id]['tx'].hash == tx.hash
assert test_lang.child_chain.current_block.transaction_set[0].hash == tx.hash
def test_submit_block(test_lang):
owner_1 = test_lang.get_account()
owner_2 = test_lang.get_account()
deposit_id = test_lang.deposit(owner_1, 100)
test_lang.transfer(deposit_id, 0, owner_2, 100, owner_1)
test_lang.submit_block()
blknum = 1000
assert test_lang.root_chain.call().getChildChain(blknum)[0] == test_lang.child_chain.blocks[blknum].merklize_transaction_set()
def test_confirm(test_lang):
owner_1 = test_lang.get_account()
owner_2 = test_lang.get_account()
deposit_id = test_lang.deposit(owner_1, 100)
transfer_id = test_lang.transfer(deposit_id, 0, owner_2, 100, owner_1)
test_lang.submit_block()
test_lang.confirm(transfer_id, owner_1)
assert test_lang.transactions[transfer_id]['confirm_sigs'] != ''
def test_withdraw_transfer(test_lang):
owner_1 = test_lang.get_account()
owner_2 = test_lang.get_account()
deposit_id = test_lang.deposit(owner_1, 100)
transfer_id = test_lang.transfer(deposit_id, 0, owner_2, 100, owner_1)
test_lang.submit_block()
test_lang.confirm(transfer_id, owner_1)
test_lang.withdraw(transfer_id, 0, owner_2)
exit_data = test_lang.root_chain.call().getExit(1000000000000)
assert exit_data[0] == owner_2['address']
assert exit_data[1] == NULL_ADDRESS_HEX
assert exit_data[2] == 100
def test_withdraw_deposit(test_lang):
owner_1 = test_lang.get_account()
deposit_id = test_lang.deposit(owner_1, 100)
test_lang.withdraw(deposit_id, 0, owner_1)
exit_data = test_lang.root_chain.call().getExit(1000000001)
assert exit_data[0] == owner_1['address']
assert exit_data[1] == NULL_ADDRESS_HEX
assert exit_data[2] == 100
| none | 1 | 1.895343 | 2 |
|
muxmon.py | bertwesarg/openssh-mux-mon | 0 | 6631220 | <gh_stars>0
#!/usr/bin/env python2
import os
import os.path
import stat
import sys
import subprocess
import pygtk
pygtk.require('2.0')
import gtk
import gobject
import gconf
import pynotify
import pyinotify
import appindicator
import SshMuxClient
GCONF_APP = '/apps/sshmuxmon'
GCONF_APP_PATH = os.path.join(GCONF_APP, 'path')
GCONF_APP_HOSTS = os.path.join(GCONF_APP, 'hosts')
class SshMuxEntry(SshMuxClient.SshMuxClient):
name = ''
item = None
sub = None
n_fwds = 0
n_sessions = 0
def __init__(self, path):
SshMuxClient.SshMuxClient.__init__(self, path)
class SshMuxIndicator(
appindicator.Indicator,
pyinotify.Notifier):
known = {}
new = {}
root = None
def __init__(self):
self.icon_path = os.path.normpath(os.path.join(
os.getcwd(),
os.path.dirname(__file__),
'icons'))
self.icon_name = 'file://' + os.path.join(
self.icon_path, 'openssh-256.png')
self._gcc = gconf.client_get_default()
self._gcc.add_dir(GCONF_APP, gconf.CLIENT_PRELOAD_NONE)
self._gc_nid = self._gcc.notify_add(GCONF_APP, self.gconf_notify, None)
pynotify.init('SSH-MUX-Monitor')
self._wm = pyinotify.WatchManager()
pyinotify.Notifier.__init__(self, self._wm, self.process_inotify_event)
self._wd = None
self._w = gobject.io_add_watch(self._wm.get_fd(), gobject.IO_IN, self.process_io_watch)
appindicator.Indicator.__init__(self,
'ssh-mux-monitor',
'openssh',
appindicator.CATEGORY_COMMUNICATIONS,
self.icon_path)
self.set_status(appindicator.STATUS_ACTIVE)
# create a menu
menu = gtk.Menu()
item = gtk.SeparatorMenuItem()
menu.append(item)
item.show()
self.connect_to = gtk.ImageMenuItem(gtk.STOCK_CONNECT)
self.connect_to.set_label('Connect to')
menu.append(self.connect_to)
self.connect_to.connect('activate', self.connect_to_activate)
self.connect_to.set_submenu(gtk.Menu())
self.connect_to.show()
self.close_all_item = gtk.ImageMenuItem(gtk.STOCK_DISCONNECT)
self.close_all_item.set_label('Disconnect All')
menu.append(self.close_all_item)
self.close_all_item.connect('activate', self.close_all_activate)
self.close_all_item.show()
self.close_all_item.set_sensitive(False)
item = gtk.SeparatorMenuItem()
menu.append(item)
item.show()
item = gtk.ImageMenuItem(gtk.STOCK_PREFERENCES)
item.set_label('Preferences...')
menu.append(item)
item.connect('activate', self.preferences_activate)
item.show()
item = gtk.SeparatorMenuItem()
menu.append(item)
item.show()
item = gtk.ImageMenuItem(gtk.STOCK_QUIT)
menu.append(item)
item.connect('activate', self.quit_activate)
item.show()
self.static_menu_entry_len = len(menu.get_children())
self.set_menu(menu)
self.reread_path()
def __del__(self):
gobject.source_remove(self._w)
if self._gc_nid:
self._gcc.notify_remove(self._gc_nid)
def reread_path(self):
try:
s = self._gcc.get_string(GCONF_APP_PATH)
if self.root and s and os.path.samefile(self.root, s):
return
except:
s = None
# there are not the same, cleanup previous root, if any
if self.root:
# clear previous known mux
for mc in self.known.itervalues():
mc.close()
self.get_menu().remove(mc.item)
self.close_all_item.set_sensitive(False)
if self.root in self._wd:
self._wm.del_watch(self._wd[self.root])
self.known = {}
self.root = None
self._wd = None
if not s:
return
if not os.path.isdir(s):
return
self.root = s
self._wd = self._wm.add_watch(self.root, pyinotify.IN_CREATE | pyinotify.IN_DELETE)
muxs = []
for path in os.listdir(self.root):
full = os.path.join(self.root, path)
try:
sb = os.stat(full)
if not stat.S_ISSOCK(sb.st_mode):
continue
muxs += [(full, sb.st_mtime)]
except:
continue
muxs.sort(key=lambda x: x[1])
for full, mtime in muxs:
try:
mc = SshMuxEntry(full)
res, exts = mc.connect()
if not res:
continue
res, name = mc.info('%r@%h:%p')
if res:
if name[-3:] == ':22':
name = name[:-3]
else:
#print >>sys.stderr, ' could not get info from %s: %s' % (path, name,)
name = os.path.basename(full)
mc.name = name
self.known[full] = mc
#print >>sys.stderr, 'Already existing mux: %s' % (name,)
self.add_to_menu(mc)
except:
continue
def add_to_menu(self, mc):
self.close_all_item.set_sensitive(True)
menu = self.get_menu()
mc.item = gtk.ImageMenuItem()
mc.item.set_label(mc.name)
image = gtk.image_new_from_icon_name('network-server', gtk.ICON_SIZE_MENU)
mc.item.set_image(image)
mc.item.set_always_show_image(True)
menu.insert(mc.item, len(menu.get_children()) - self.static_menu_entry_len)
mc.item.connect('activate', self.mux_activate, mc)
mc.item.show()
mc.sub = gtk.Menu()
item = gtk.MenuItem('Forwards (click to close):')
mc.sub.append(item)
item.set_sensitive(False)
item.show()
item = gtk.ImageMenuItem(gtk.STOCK_ADD)
item.set_label('New...')
mc.sub.append(item)
#item.set_sensitive(False)
item.connect('activate', self.mux_new_forward, mc)
item.show()
item = gtk.SeparatorMenuItem()
mc.sub.append(item)
item.show()
item = gtk.MenuItem('Sessions:')
mc.sub.append(item)
item.set_sensitive(False)
item.show()
item = gtk.SeparatorMenuItem()
mc.sub.append(item)
item.show()
item = gtk.ImageMenuItem(gtk.STOCK_STOP)
mc.sub.append(item)
item.connect('activate', self.mux_stop_activate, mc)
item.show()
item = gtk.ImageMenuItem(gtk.STOCK_DISCONNECT)
mc.sub.append(item)
item.connect('activate', self.mux_close_activate, mc)
item.show()
mc.item.set_submenu(mc.sub)
self.set_menu(menu)
def quit_activate(self, w):
#print 'exit indicator'
gtk.main_quit()
def preferences_activate(self, w):
SshMuxPrefsDialog(self._gcc)
def close_all_activate(self, w):
for mc in self.known.itervalues():
mc.exit()
def connect_to_activate(self, w):
try:
hosts = self._gcc.get_list(GCONF_APP_HOSTS, gconf.VALUE_STRING)
except:
hosts = []
submenu = w.get_submenu()
for child in submenu.get_children():
submenu.remove(child)
# populate devices menu
for host in hosts:
item = gtk.ImageMenuItem()
item.set_label(host)
try:
image = gtk.image_new_from_icon_name('network-server', gtk.ICON_SIZE_MENU)
item.set_image(image)
item.set_always_show_image(True)
except:
pass
submenu.append(item)
item.connect('activate', self.connect_to_host_activate, host)
item.show()
w.set_submenu(submenu)
def connect_to_host_activate(self, w, host):
subprocess.Popen(['ssh', host, '/bin/true'], close_fds=True)
def mux_activate(self, w, mc):
# update forwards and sessions
for i in range(mc.n_fwds):
mc.sub.remove(mc.sub.get_children()[1])
for i in range(mc.n_sessions):
mc.sub.remove(mc.sub.get_children()[4])
mc.n_fwds = 0
mc.n_sessions = 0
res, fwds = mc.forwards()
if not res:
#print >>sys.stderr, 'cannot list forwardings: %s' % (fwds,)
fwds = []
res, sessions = mc.sessions()
if not res:
#print >>sys.stderr, 'cannot list sessions: %s' % (sessions,)
sessions = []
def _hp(h, p):
if p == SshMuxClient.MUX_FWD_PORT_STREAMLOCAL:
return h
else:
return '%s:%d' % (h, p,)
for fwd in fwds:
fid, ftype, lh, lp, ch, cp = fwd
label = ''
lh = lh + ':'
if lh == ':':
lh = ''
if ftype == 'local':
label = '%s -> %s' % (_hp(lh, lp), _hp(ch, cp),)
if ftype == 'remote':
label = '%s <- %s' % (_hp(ch, cp), _hp(lh, lp),)
if ftype == 'dynamic':
label = '%s -> *' % (_hp(lh if lh else 'localhost', lp),)
item = gtk.ImageMenuItem(gtk.STOCK_CANCEL)
item.set_label(label)
mc.sub.insert(item, 1 + mc.n_fwds)
mc.n_fwds += 1
item.connect('activate', self.mux_close_forward, mc, fwd)
item.show()
for s in sessions:
sid, stype, rid, cid, tname, rname = s
#print >>sys.stderr, 'session: %r' % (s,)
try:
session_name, session_action = rname.split(': ', 2)
except:
session_name, session_action = (rname, '',)
try:
session_name, session_args = session_name.split('(', 2)
session_args = session_args[:-1]
except:
session_args = None
item = gtk.ImageMenuItem()
item.set_label('%s' % (rname,))
if tname == 'stdio-forward':
image = gtk.image_new_from_icon_name('preferences-system-network-proxy-symbolic', gtk.ICON_SIZE_MENU)
item.set_image(image)
if session_name == 'subsystem-session' and session_action == 'sftp':
image = gtk.image_new_from_icon_name('folder-remote-ftp', gtk.ICON_SIZE_MENU)
item.set_image(image)
if session_name == 'shell-session':
image = gtk.image_new_from_icon_name('terminal', gtk.ICON_SIZE_MENU)
item.set_image(image)
if session_name == 'exec-session':
image = gtk.image_new_from_stock(gtk.STOCK_EXECUTE, gtk.ICON_SIZE_MENU)
item.set_image(image)
mc.sub.insert(item, 4 + mc.n_fwds + mc.n_sessions)
mc.n_sessions += 1
item.show()
mc.item.set_submenu(mc.sub)
def mux_close_forward(self, w, mc, fwd):
#print 'closing forward [%s] %s:%u -> %s:%u' % (fwd[1], fwd[2], fwd[3], fwd[4], fwd[5],)
mc.close_forward(fwd[1], fwd[2], fwd[3], fwd[4], fwd[5])
def mux_new_forward(self, w, mc):
SshMuxForwardingDialog(mc)
def mux_stop_activate(self, w, mc):
#print 'stoping %s' % (mc.path,)
mc.stop()
def mux_close_activate(self, w, mc):
#print 'closing %s %s:%r' % (mc.path, type(mc), mc,)
mc.exit()
def process_io_watch(self, source, cb_condition):
self.read_events()
self.process_events()
return True
def process_file_create(self, event):
#print >>sys.stderr, 'file_create %s' % (event.pathname,)
try:
sb = os.stat(event.pathname)
except:
#print >>sys.stderr, ' could\'t stat %s' % (event.pathname,)
return
if not stat.S_ISSOCK(sb.st_mode):
#print >>sys.stderr, ' not a socket %s' % (event.pathname,)
return
if event.pathname in self.known:
#print >>sys.stderr, ' already known %s' % (event.pathname,)
return
# defer notification, the mux listener will rename it to the final path
# when he is ready
#print >>sys.stderr, ' starting grace period'
self.new[event.pathname] = gobject.timeout_add(100,
self.process_end_of_grace,
event.pathname)
def process_file_delete(self, event):
#print >>sys.stderr, 'file_delete %s' % (event.pathname,)
if event.pathname in self.new:
#print >>sys.stderr, 'grace period not survided'
gobject.source_remove(self.new[event.pathname])
del self.new[event.pathname]
return
if event.pathname not in self.known:
#print >>sys.stderr, ' not known'
return
mc = self.known[event.pathname]
del self.known[event.pathname]
mc.close()
self.get_menu().remove(mc.item)
if len(self.known) == 0:
self.close_all_item.set_sensitive(False)
n = pynotify.Notification(mc.name, 'MUX Closed', self.icon_name)
n.set_urgency(pynotify.URGENCY_CRITICAL)
n.set_timeout(5000)
n.show()
def process_inotify_event(self, event):
#print >>sys.stderr, ' event %s' % (arg,)
if event.mask == pyinotify.IN_CREATE:
return self.process_file_create(event)
elif event.mask == pyinotify.IN_DELETE:
return self.process_file_delete(event)
def process_end_of_grace(self, path):
del self.new[path]
# lets try to get an connection to the socket
#print >>sys.stderr, ' grace period survived %s' % (path,)
mc = SshMuxEntry(path)
res, exts = mc.connect()
if res:
res, name = mc.info('%r@%h:%p')
if res:
if name[-3:] == ':22':
name = name[:-3]
else:
#print >>sys.stderr, ' could not get info from %s: %s' % (path, name,)
name = os.path.basename(path)
res = True
#else:
#print >>sys.stderr, ' could not connect to %s: ' % (path, exts,)
if res:
#print >>sys.stderr, ' new %r' % (name,)
mc.name = name
self.known[path] = mc
n = pynotify.Notification(name, 'MUX Established', self.icon_name)
n.set_urgency(pynotify.URGENCY_LOW)
n.set_timeout(2500)
n.show()
self.add_to_menu(mc)
return False
def gconf_notify(self, client, cnxn_id, entry, arg):
if entry.key == GCONF_APP_PATH and entry.value is not None and entry.value.type == gconf.VALUE_STRING:
self.reread_path()
class SshMuxPrefsDialog(object):
def __init__(self, gcc):
self._gcc = gcc
self.standalone = False
if not self._gcc:
self._gcc = gconf.client_get_default()
self._gcc.add_dir(GCONF_APP, gconf.CLIENT_PRELOAD_NONE)
self.standalone = True
self.dialog = gtk.Dialog('SSH MUX Monitor Preferences',
None, 0, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_APPLY, gtk.RESPONSE_APPLY))
# response when closing the dialog via the window manager
self.dialog.set_default_response(gtk.RESPONSE_CANCEL)
hbox = gtk.HBox(False, 2)
self.dialog.vbox.pack_start(hbox, False, False, 0)
label = gtk.Label('Directory to monitor: ')
filechooser = gtk.FileChooserButton('Choose directory...', None)
filechooser.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
try:
s = self._gcc.get_string(GCONF_APP_PATH)
if s and os.path.isdir(s):
filechooser.set_filename(s)
except:
filechooser.set_filename(os.path.expanduser('~'))
hbox.pack_start(label, False, False, 0)
hbox.pack_end(filechooser, True, True, 0)
self.dialog.connect('response', self.response_cb, filechooser)
self.dialog.show_all()
def select_mux_path(self, filechooser):
path = filechooser.get_filename()
if filename and os.path.isdir(filename):
entry.set_text(filename)
def response_cb(self, widget, event, filechooser):
if event == gtk.RESPONSE_APPLY:
path = filechooser.get_filename()
if path and os.path.isdir(path):
self._gcc.set_string(GCONF_APP_PATH, path)
widget.destroy()
if self.standalone:
gtk.main_quit()
class SshMuxForwardingDialog(object):
_to_fwd_type = [
SshMuxClient.MUX_FWD_LOCAL,
SshMuxClient.MUX_FWD_REMOTE,
SshMuxClient.MUX_FWD_DYNAMIC
]
def __init__(self, mc):
self.mc = mc
self.dialog = gtk.Dialog('New forwarding for %s' % (self.mc.name,),
None, 0, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_APPLY, gtk.RESPONSE_APPLY))
# response when closing the dialog via the window manager
self.dialog.set_default_response(gtk.RESPONSE_CANCEL)
tab = gtk.Table(5, 2, False)
self.dialog.vbox.pack_start(tab, True, True, 0)
self.fwd_select = gtk.combo_box_new_text()
self.fwd_select.append_text('Local forwarding')
self.fwd_select.append_text('Remote forwarding')
self.fwd_select.append_text('Dynamic forwarding')
self.fwd_select.connect('changed', self.type_changed_cb)
tab.attach(self.fwd_select, 0, 2, 0, 1, gtk.EXPAND|gtk.FILL, 0)
# bind_address
self.ba_label = gtk.Label('Bind address:')
right_alignment = gtk.Alignment(0.0, 0.5, 0.0, 0.0)
right_alignment.add(self.ba_label)
tab.attach(right_alignment, 0, 1, 1, 2, gtk.FILL, gtk.FILL)
# listen_port
self.lp_label = gtk.Label('Listen port:')
right_alignment = gtk.Alignment(0.0, 0.5, 0.0, 0.0)
right_alignment.add(self.lp_label)
tab.attach(right_alignment, 0, 1, 2, 3, gtk.FILL, gtk.FILL)
# connect_host
self.ch_label = gtk.Label('Target host:')
right_alignment = gtk.Alignment(0.0, 0.5, 0.0, 0.0)
right_alignment.add(self.ch_label)
tab.attach(right_alignment, 0, 1, 3, 4, gtk.FILL, gtk.FILL)
# connect_port
self.cp_label = gtk.Label('Target port:')
right_alignment = gtk.Alignment(0.0, 0.5, 0.0, 0.0)
right_alignment.add(self.cp_label)
tab.attach(right_alignment, 0, 1, 4, 5, gtk.FILL, gtk.FILL)
hbox2 = gtk.HBox(False, 2)
self.ba_entry = gtk.Entry()
hbox2.pack_start(self.ba_entry, True, True, 0)
self.ba_all_check = gtk.CheckButton('All')
self.ba_all_check.connect('toggled', self.toggled_cb, self.ba_entry)
hbox2.pack_end(self.ba_all_check, False, False, 0)
tab.attach(hbox2, 1, 2, 1, 2, gtk.EXPAND|gtk.FILL, 0)
hbox2 = gtk.HBox(False, 2)
port_adj = gtk.Adjustment(1.0, 1.0, 65535, 1.0, 10.0, 0.0)
self.lp_entry = gtk.SpinButton(port_adj, 0, 0)
hbox2.pack_start(self.lp_entry, True, True, 0)
self.lp_auto_check = gtk.CheckButton('Auto')
self.lp_auto_check.connect('toggled', self.toggled_cb, self.lp_entry)
hbox2.pack_end(self.lp_auto_check, False, False, 0)
tab.attach(hbox2, 1, 2, 2, 3, gtk.EXPAND|gtk.FILL, 0)
self.ch_entry = gtk.Entry()
tab.attach(self.ch_entry, 1, 2, 3, 4, gtk.EXPAND|gtk.FILL, 0)
port_adj = gtk.Adjustment(1.0, 1.0, 65535, 1.0, 32.0, 0.0)
self.cp_entry = gtk.SpinButton(port_adj, 0, 0)
tab.attach(self.cp_entry, 1, 2, 4, 5, gtk.EXPAND|gtk.FILL, 0)
self.dialog.connect('response', self.response_cb)
self.fwd_select.set_active(0)
self.ba_all_check.set_active(True)
self.dialog.show_all()
def type_changed_cb(self, w):
fwd_type = self._to_fwd_type[w.get_active()]
self.lp_entry.set_sensitive(True)
self.lp_auto_check.set_active(False)
self.lp_auto_check.set_sensitive(False)
self.ch_label.set_sensitive(True)
self.ch_entry.set_sensitive(True)
self.cp_label.set_sensitive(True)
self.cp_entry.set_sensitive(True)
if fwd_type == SshMuxClient.MUX_FWD_REMOTE:
self.lp_auto_check.set_sensitive(True)
elif fwd_type == SshMuxClient.MUX_FWD_DYNAMIC:
self.ch_label.set_sensitive(False)
self.ch_entry.set_sensitive(False)
self.cp_label.set_sensitive(False)
self.cp_entry.set_sensitive(False)
def toggled_cb(self, source, target):
target.set_sensitive(not source.get_active())
def apply_forwarding(self):
fwd_type = self._to_fwd_type[self.fwd_select.get_active()]
ba = ''
if not self.ba_all_check.get_active():
ba = self.ba_entry.get_text()
lp = self.lp_entry.get_value_as_int()
if fwd_type == SshMuxClient.MUX_FWD_REMOTE and self.lp_auto_check.get_active():
lp = 0
ch = ''
cp = 0
if fwd_type != SshMuxClient.MUX_FWD_DYNAMIC:
ch = self.ch_entry.get_text()
cp = self.cp_entry.get_value_as_int()
if fwd_type == SshMuxClient.MUX_FWD_LOCAL:
fwd_descr = '-L %s:%u:%s:%u' % (ba, lp, ch, cp,)
elif fwd_type == SshMuxClient.MUX_FWD_REMOTE:
fwd_descr = '-R %s:%u:%s:%u' % (ba, lp, ch, cp,)
else:
fwd_descr = '-D %s:%u' % (ba, lp,)
res, remote_port = self.mc.open_forward(fwd_type, ba, lp, ch, cp)
if res and fwd_type == SshMuxClient.MUX_FWD_REMOTE and lp == 0:
message = gtk.MessageDialog(
parent=None,
flags=0,
type=gtk.MESSAGE_INFO,
buttons=gtk.BUTTONS_OK,
message_format=None)
message.set_markup('Allocated port on the remote side: %d' % (remote_port,))
message.run()
return res, fwd_descr
def response_cb(self, widget, event):
if event == gtk.RESPONSE_APPLY:
res, pid = self.mc.check()
reason = ''
if res:
res, fwd_desc = self.apply_forwarding()
fwd_desc = ' ' + fwd_desc
else:
reason = 'Connection already closed.'
if not res:
message = gtk.MessageDialog(
parent=None,
flags=0,
type=gtk.MESSAGE_ERROR,
buttons=gtk.BUTTONS_OK,
message_format=None)
message.set_markup('Couldn\'t opening forwarding%s for %s' % (fwd_desc, self.mc.name,))
if reason:
message.format_secondary_text(reason)
message.run()
self.dialog.destroy()
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == '--prefs':
d = SshMuxPrefsDialog(None)
else:
i = SshMuxIndicator()
try:
gtk.main()
except:
pass
| #!/usr/bin/env python2
import os
import os.path
import stat
import sys
import subprocess
import pygtk
pygtk.require('2.0')
import gtk
import gobject
import gconf
import pynotify
import pyinotify
import appindicator
import SshMuxClient
GCONF_APP = '/apps/sshmuxmon'
GCONF_APP_PATH = os.path.join(GCONF_APP, 'path')
GCONF_APP_HOSTS = os.path.join(GCONF_APP, 'hosts')
class SshMuxEntry(SshMuxClient.SshMuxClient):
name = ''
item = None
sub = None
n_fwds = 0
n_sessions = 0
def __init__(self, path):
SshMuxClient.SshMuxClient.__init__(self, path)
class SshMuxIndicator(
appindicator.Indicator,
pyinotify.Notifier):
known = {}
new = {}
root = None
def __init__(self):
self.icon_path = os.path.normpath(os.path.join(
os.getcwd(),
os.path.dirname(__file__),
'icons'))
self.icon_name = 'file://' + os.path.join(
self.icon_path, 'openssh-256.png')
self._gcc = gconf.client_get_default()
self._gcc.add_dir(GCONF_APP, gconf.CLIENT_PRELOAD_NONE)
self._gc_nid = self._gcc.notify_add(GCONF_APP, self.gconf_notify, None)
pynotify.init('SSH-MUX-Monitor')
self._wm = pyinotify.WatchManager()
pyinotify.Notifier.__init__(self, self._wm, self.process_inotify_event)
self._wd = None
self._w = gobject.io_add_watch(self._wm.get_fd(), gobject.IO_IN, self.process_io_watch)
appindicator.Indicator.__init__(self,
'ssh-mux-monitor',
'openssh',
appindicator.CATEGORY_COMMUNICATIONS,
self.icon_path)
self.set_status(appindicator.STATUS_ACTIVE)
# create a menu
menu = gtk.Menu()
item = gtk.SeparatorMenuItem()
menu.append(item)
item.show()
self.connect_to = gtk.ImageMenuItem(gtk.STOCK_CONNECT)
self.connect_to.set_label('Connect to')
menu.append(self.connect_to)
self.connect_to.connect('activate', self.connect_to_activate)
self.connect_to.set_submenu(gtk.Menu())
self.connect_to.show()
self.close_all_item = gtk.ImageMenuItem(gtk.STOCK_DISCONNECT)
self.close_all_item.set_label('Disconnect All')
menu.append(self.close_all_item)
self.close_all_item.connect('activate', self.close_all_activate)
self.close_all_item.show()
self.close_all_item.set_sensitive(False)
item = gtk.SeparatorMenuItem()
menu.append(item)
item.show()
item = gtk.ImageMenuItem(gtk.STOCK_PREFERENCES)
item.set_label('Preferences...')
menu.append(item)
item.connect('activate', self.preferences_activate)
item.show()
item = gtk.SeparatorMenuItem()
menu.append(item)
item.show()
item = gtk.ImageMenuItem(gtk.STOCK_QUIT)
menu.append(item)
item.connect('activate', self.quit_activate)
item.show()
self.static_menu_entry_len = len(menu.get_children())
self.set_menu(menu)
self.reread_path()
def __del__(self):
gobject.source_remove(self._w)
if self._gc_nid:
self._gcc.notify_remove(self._gc_nid)
def reread_path(self):
try:
s = self._gcc.get_string(GCONF_APP_PATH)
if self.root and s and os.path.samefile(self.root, s):
return
except:
s = None
# there are not the same, cleanup previous root, if any
if self.root:
# clear previous known mux
for mc in self.known.itervalues():
mc.close()
self.get_menu().remove(mc.item)
self.close_all_item.set_sensitive(False)
if self.root in self._wd:
self._wm.del_watch(self._wd[self.root])
self.known = {}
self.root = None
self._wd = None
if not s:
return
if not os.path.isdir(s):
return
self.root = s
self._wd = self._wm.add_watch(self.root, pyinotify.IN_CREATE | pyinotify.IN_DELETE)
muxs = []
for path in os.listdir(self.root):
full = os.path.join(self.root, path)
try:
sb = os.stat(full)
if not stat.S_ISSOCK(sb.st_mode):
continue
muxs += [(full, sb.st_mtime)]
except:
continue
muxs.sort(key=lambda x: x[1])
for full, mtime in muxs:
try:
mc = SshMuxEntry(full)
res, exts = mc.connect()
if not res:
continue
res, name = mc.info('%r@%h:%p')
if res:
if name[-3:] == ':22':
name = name[:-3]
else:
#print >>sys.stderr, ' could not get info from %s: %s' % (path, name,)
name = os.path.basename(full)
mc.name = name
self.known[full] = mc
#print >>sys.stderr, 'Already existing mux: %s' % (name,)
self.add_to_menu(mc)
except:
continue
def add_to_menu(self, mc):
self.close_all_item.set_sensitive(True)
menu = self.get_menu()
mc.item = gtk.ImageMenuItem()
mc.item.set_label(mc.name)
image = gtk.image_new_from_icon_name('network-server', gtk.ICON_SIZE_MENU)
mc.item.set_image(image)
mc.item.set_always_show_image(True)
menu.insert(mc.item, len(menu.get_children()) - self.static_menu_entry_len)
mc.item.connect('activate', self.mux_activate, mc)
mc.item.show()
mc.sub = gtk.Menu()
item = gtk.MenuItem('Forwards (click to close):')
mc.sub.append(item)
item.set_sensitive(False)
item.show()
item = gtk.ImageMenuItem(gtk.STOCK_ADD)
item.set_label('New...')
mc.sub.append(item)
#item.set_sensitive(False)
item.connect('activate', self.mux_new_forward, mc)
item.show()
item = gtk.SeparatorMenuItem()
mc.sub.append(item)
item.show()
item = gtk.MenuItem('Sessions:')
mc.sub.append(item)
item.set_sensitive(False)
item.show()
item = gtk.SeparatorMenuItem()
mc.sub.append(item)
item.show()
item = gtk.ImageMenuItem(gtk.STOCK_STOP)
mc.sub.append(item)
item.connect('activate', self.mux_stop_activate, mc)
item.show()
item = gtk.ImageMenuItem(gtk.STOCK_DISCONNECT)
mc.sub.append(item)
item.connect('activate', self.mux_close_activate, mc)
item.show()
mc.item.set_submenu(mc.sub)
self.set_menu(menu)
def quit_activate(self, w):
#print 'exit indicator'
gtk.main_quit()
def preferences_activate(self, w):
SshMuxPrefsDialog(self._gcc)
def close_all_activate(self, w):
for mc in self.known.itervalues():
mc.exit()
def connect_to_activate(self, w):
try:
hosts = self._gcc.get_list(GCONF_APP_HOSTS, gconf.VALUE_STRING)
except:
hosts = []
submenu = w.get_submenu()
for child in submenu.get_children():
submenu.remove(child)
# populate devices menu
for host in hosts:
item = gtk.ImageMenuItem()
item.set_label(host)
try:
image = gtk.image_new_from_icon_name('network-server', gtk.ICON_SIZE_MENU)
item.set_image(image)
item.set_always_show_image(True)
except:
pass
submenu.append(item)
item.connect('activate', self.connect_to_host_activate, host)
item.show()
w.set_submenu(submenu)
def connect_to_host_activate(self, w, host):
subprocess.Popen(['ssh', host, '/bin/true'], close_fds=True)
def mux_activate(self, w, mc):
# update forwards and sessions
for i in range(mc.n_fwds):
mc.sub.remove(mc.sub.get_children()[1])
for i in range(mc.n_sessions):
mc.sub.remove(mc.sub.get_children()[4])
mc.n_fwds = 0
mc.n_sessions = 0
res, fwds = mc.forwards()
if not res:
#print >>sys.stderr, 'cannot list forwardings: %s' % (fwds,)
fwds = []
res, sessions = mc.sessions()
if not res:
#print >>sys.stderr, 'cannot list sessions: %s' % (sessions,)
sessions = []
def _hp(h, p):
if p == SshMuxClient.MUX_FWD_PORT_STREAMLOCAL:
return h
else:
return '%s:%d' % (h, p,)
for fwd in fwds:
fid, ftype, lh, lp, ch, cp = fwd
label = ''
lh = lh + ':'
if lh == ':':
lh = ''
if ftype == 'local':
label = '%s -> %s' % (_hp(lh, lp), _hp(ch, cp),)
if ftype == 'remote':
label = '%s <- %s' % (_hp(ch, cp), _hp(lh, lp),)
if ftype == 'dynamic':
label = '%s -> *' % (_hp(lh if lh else 'localhost', lp),)
item = gtk.ImageMenuItem(gtk.STOCK_CANCEL)
item.set_label(label)
mc.sub.insert(item, 1 + mc.n_fwds)
mc.n_fwds += 1
item.connect('activate', self.mux_close_forward, mc, fwd)
item.show()
for s in sessions:
sid, stype, rid, cid, tname, rname = s
#print >>sys.stderr, 'session: %r' % (s,)
try:
session_name, session_action = rname.split(': ', 2)
except:
session_name, session_action = (rname, '',)
try:
session_name, session_args = session_name.split('(', 2)
session_args = session_args[:-1]
except:
session_args = None
item = gtk.ImageMenuItem()
item.set_label('%s' % (rname,))
if tname == 'stdio-forward':
image = gtk.image_new_from_icon_name('preferences-system-network-proxy-symbolic', gtk.ICON_SIZE_MENU)
item.set_image(image)
if session_name == 'subsystem-session' and session_action == 'sftp':
image = gtk.image_new_from_icon_name('folder-remote-ftp', gtk.ICON_SIZE_MENU)
item.set_image(image)
if session_name == 'shell-session':
image = gtk.image_new_from_icon_name('terminal', gtk.ICON_SIZE_MENU)
item.set_image(image)
if session_name == 'exec-session':
image = gtk.image_new_from_stock(gtk.STOCK_EXECUTE, gtk.ICON_SIZE_MENU)
item.set_image(image)
mc.sub.insert(item, 4 + mc.n_fwds + mc.n_sessions)
mc.n_sessions += 1
item.show()
mc.item.set_submenu(mc.sub)
def mux_close_forward(self, w, mc, fwd):
#print 'closing forward [%s] %s:%u -> %s:%u' % (fwd[1], fwd[2], fwd[3], fwd[4], fwd[5],)
mc.close_forward(fwd[1], fwd[2], fwd[3], fwd[4], fwd[5])
def mux_new_forward(self, w, mc):
SshMuxForwardingDialog(mc)
def mux_stop_activate(self, w, mc):
#print 'stoping %s' % (mc.path,)
mc.stop()
def mux_close_activate(self, w, mc):
#print 'closing %s %s:%r' % (mc.path, type(mc), mc,)
mc.exit()
def process_io_watch(self, source, cb_condition):
self.read_events()
self.process_events()
return True
def process_file_create(self, event):
#print >>sys.stderr, 'file_create %s' % (event.pathname,)
try:
sb = os.stat(event.pathname)
except:
#print >>sys.stderr, ' could\'t stat %s' % (event.pathname,)
return
if not stat.S_ISSOCK(sb.st_mode):
#print >>sys.stderr, ' not a socket %s' % (event.pathname,)
return
if event.pathname in self.known:
#print >>sys.stderr, ' already known %s' % (event.pathname,)
return
# defer notification, the mux listener will rename it to the final path
# when he is ready
#print >>sys.stderr, ' starting grace period'
self.new[event.pathname] = gobject.timeout_add(100,
self.process_end_of_grace,
event.pathname)
def process_file_delete(self, event):
#print >>sys.stderr, 'file_delete %s' % (event.pathname,)
if event.pathname in self.new:
#print >>sys.stderr, 'grace period not survided'
gobject.source_remove(self.new[event.pathname])
del self.new[event.pathname]
return
if event.pathname not in self.known:
#print >>sys.stderr, ' not known'
return
mc = self.known[event.pathname]
del self.known[event.pathname]
mc.close()
self.get_menu().remove(mc.item)
if len(self.known) == 0:
self.close_all_item.set_sensitive(False)
n = pynotify.Notification(mc.name, 'MUX Closed', self.icon_name)
n.set_urgency(pynotify.URGENCY_CRITICAL)
n.set_timeout(5000)
n.show()
def process_inotify_event(self, event):
#print >>sys.stderr, ' event %s' % (arg,)
if event.mask == pyinotify.IN_CREATE:
return self.process_file_create(event)
elif event.mask == pyinotify.IN_DELETE:
return self.process_file_delete(event)
def process_end_of_grace(self, path):
del self.new[path]
# lets try to get an connection to the socket
#print >>sys.stderr, ' grace period survived %s' % (path,)
mc = SshMuxEntry(path)
res, exts = mc.connect()
if res:
res, name = mc.info('%r@%h:%p')
if res:
if name[-3:] == ':22':
name = name[:-3]
else:
#print >>sys.stderr, ' could not get info from %s: %s' % (path, name,)
name = os.path.basename(path)
res = True
#else:
#print >>sys.stderr, ' could not connect to %s: ' % (path, exts,)
if res:
#print >>sys.stderr, ' new %r' % (name,)
mc.name = name
self.known[path] = mc
n = pynotify.Notification(name, 'MUX Established', self.icon_name)
n.set_urgency(pynotify.URGENCY_LOW)
n.set_timeout(2500)
n.show()
self.add_to_menu(mc)
return False
def gconf_notify(self, client, cnxn_id, entry, arg):
if entry.key == GCONF_APP_PATH and entry.value is not None and entry.value.type == gconf.VALUE_STRING:
self.reread_path()
class SshMuxPrefsDialog(object):
def __init__(self, gcc):
self._gcc = gcc
self.standalone = False
if not self._gcc:
self._gcc = gconf.client_get_default()
self._gcc.add_dir(GCONF_APP, gconf.CLIENT_PRELOAD_NONE)
self.standalone = True
self.dialog = gtk.Dialog('SSH MUX Monitor Preferences',
None, 0, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_APPLY, gtk.RESPONSE_APPLY))
# response when closing the dialog via the window manager
self.dialog.set_default_response(gtk.RESPONSE_CANCEL)
hbox = gtk.HBox(False, 2)
self.dialog.vbox.pack_start(hbox, False, False, 0)
label = gtk.Label('Directory to monitor: ')
filechooser = gtk.FileChooserButton('Choose directory...', None)
filechooser.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
try:
s = self._gcc.get_string(GCONF_APP_PATH)
if s and os.path.isdir(s):
filechooser.set_filename(s)
except:
filechooser.set_filename(os.path.expanduser('~'))
hbox.pack_start(label, False, False, 0)
hbox.pack_end(filechooser, True, True, 0)
self.dialog.connect('response', self.response_cb, filechooser)
self.dialog.show_all()
def select_mux_path(self, filechooser):
path = filechooser.get_filename()
if filename and os.path.isdir(filename):
entry.set_text(filename)
def response_cb(self, widget, event, filechooser):
if event == gtk.RESPONSE_APPLY:
path = filechooser.get_filename()
if path and os.path.isdir(path):
self._gcc.set_string(GCONF_APP_PATH, path)
widget.destroy()
if self.standalone:
gtk.main_quit()
class SshMuxForwardingDialog(object):
_to_fwd_type = [
SshMuxClient.MUX_FWD_LOCAL,
SshMuxClient.MUX_FWD_REMOTE,
SshMuxClient.MUX_FWD_DYNAMIC
]
def __init__(self, mc):
self.mc = mc
self.dialog = gtk.Dialog('New forwarding for %s' % (self.mc.name,),
None, 0, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_APPLY, gtk.RESPONSE_APPLY))
# response when closing the dialog via the window manager
self.dialog.set_default_response(gtk.RESPONSE_CANCEL)
tab = gtk.Table(5, 2, False)
self.dialog.vbox.pack_start(tab, True, True, 0)
self.fwd_select = gtk.combo_box_new_text()
self.fwd_select.append_text('Local forwarding')
self.fwd_select.append_text('Remote forwarding')
self.fwd_select.append_text('Dynamic forwarding')
self.fwd_select.connect('changed', self.type_changed_cb)
tab.attach(self.fwd_select, 0, 2, 0, 1, gtk.EXPAND|gtk.FILL, 0)
# bind_address
self.ba_label = gtk.Label('Bind address:')
right_alignment = gtk.Alignment(0.0, 0.5, 0.0, 0.0)
right_alignment.add(self.ba_label)
tab.attach(right_alignment, 0, 1, 1, 2, gtk.FILL, gtk.FILL)
# listen_port
self.lp_label = gtk.Label('Listen port:')
right_alignment = gtk.Alignment(0.0, 0.5, 0.0, 0.0)
right_alignment.add(self.lp_label)
tab.attach(right_alignment, 0, 1, 2, 3, gtk.FILL, gtk.FILL)
# connect_host
self.ch_label = gtk.Label('Target host:')
right_alignment = gtk.Alignment(0.0, 0.5, 0.0, 0.0)
right_alignment.add(self.ch_label)
tab.attach(right_alignment, 0, 1, 3, 4, gtk.FILL, gtk.FILL)
# connect_port
self.cp_label = gtk.Label('Target port:')
right_alignment = gtk.Alignment(0.0, 0.5, 0.0, 0.0)
right_alignment.add(self.cp_label)
tab.attach(right_alignment, 0, 1, 4, 5, gtk.FILL, gtk.FILL)
hbox2 = gtk.HBox(False, 2)
self.ba_entry = gtk.Entry()
hbox2.pack_start(self.ba_entry, True, True, 0)
self.ba_all_check = gtk.CheckButton('All')
self.ba_all_check.connect('toggled', self.toggled_cb, self.ba_entry)
hbox2.pack_end(self.ba_all_check, False, False, 0)
tab.attach(hbox2, 1, 2, 1, 2, gtk.EXPAND|gtk.FILL, 0)
hbox2 = gtk.HBox(False, 2)
port_adj = gtk.Adjustment(1.0, 1.0, 65535, 1.0, 10.0, 0.0)
self.lp_entry = gtk.SpinButton(port_adj, 0, 0)
hbox2.pack_start(self.lp_entry, True, True, 0)
self.lp_auto_check = gtk.CheckButton('Auto')
self.lp_auto_check.connect('toggled', self.toggled_cb, self.lp_entry)
hbox2.pack_end(self.lp_auto_check, False, False, 0)
tab.attach(hbox2, 1, 2, 2, 3, gtk.EXPAND|gtk.FILL, 0)
self.ch_entry = gtk.Entry()
tab.attach(self.ch_entry, 1, 2, 3, 4, gtk.EXPAND|gtk.FILL, 0)
port_adj = gtk.Adjustment(1.0, 1.0, 65535, 1.0, 32.0, 0.0)
self.cp_entry = gtk.SpinButton(port_adj, 0, 0)
tab.attach(self.cp_entry, 1, 2, 4, 5, gtk.EXPAND|gtk.FILL, 0)
self.dialog.connect('response', self.response_cb)
self.fwd_select.set_active(0)
self.ba_all_check.set_active(True)
self.dialog.show_all()
def type_changed_cb(self, w):
fwd_type = self._to_fwd_type[w.get_active()]
self.lp_entry.set_sensitive(True)
self.lp_auto_check.set_active(False)
self.lp_auto_check.set_sensitive(False)
self.ch_label.set_sensitive(True)
self.ch_entry.set_sensitive(True)
self.cp_label.set_sensitive(True)
self.cp_entry.set_sensitive(True)
if fwd_type == SshMuxClient.MUX_FWD_REMOTE:
self.lp_auto_check.set_sensitive(True)
elif fwd_type == SshMuxClient.MUX_FWD_DYNAMIC:
self.ch_label.set_sensitive(False)
self.ch_entry.set_sensitive(False)
self.cp_label.set_sensitive(False)
self.cp_entry.set_sensitive(False)
def toggled_cb(self, source, target):
target.set_sensitive(not source.get_active())
def apply_forwarding(self):
fwd_type = self._to_fwd_type[self.fwd_select.get_active()]
ba = ''
if not self.ba_all_check.get_active():
ba = self.ba_entry.get_text()
lp = self.lp_entry.get_value_as_int()
if fwd_type == SshMuxClient.MUX_FWD_REMOTE and self.lp_auto_check.get_active():
lp = 0
ch = ''
cp = 0
if fwd_type != SshMuxClient.MUX_FWD_DYNAMIC:
ch = self.ch_entry.get_text()
cp = self.cp_entry.get_value_as_int()
if fwd_type == SshMuxClient.MUX_FWD_LOCAL:
fwd_descr = '-L %s:%u:%s:%u' % (ba, lp, ch, cp,)
elif fwd_type == SshMuxClient.MUX_FWD_REMOTE:
fwd_descr = '-R %s:%u:%s:%u' % (ba, lp, ch, cp,)
else:
fwd_descr = '-D %s:%u' % (ba, lp,)
res, remote_port = self.mc.open_forward(fwd_type, ba, lp, ch, cp)
if res and fwd_type == SshMuxClient.MUX_FWD_REMOTE and lp == 0:
message = gtk.MessageDialog(
parent=None,
flags=0,
type=gtk.MESSAGE_INFO,
buttons=gtk.BUTTONS_OK,
message_format=None)
message.set_markup('Allocated port on the remote side: %d' % (remote_port,))
message.run()
return res, fwd_descr
def response_cb(self, widget, event):
if event == gtk.RESPONSE_APPLY:
res, pid = self.mc.check()
reason = ''
if res:
res, fwd_desc = self.apply_forwarding()
fwd_desc = ' ' + fwd_desc
else:
reason = 'Connection already closed.'
if not res:
message = gtk.MessageDialog(
parent=None,
flags=0,
type=gtk.MESSAGE_ERROR,
buttons=gtk.BUTTONS_OK,
message_format=None)
message.set_markup('Couldn\'t opening forwarding%s for %s' % (fwd_desc, self.mc.name,))
if reason:
message.format_secondary_text(reason)
message.run()
self.dialog.destroy()
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == '--prefs':
d = SshMuxPrefsDialog(None)
else:
i = SshMuxIndicator()
try:
gtk.main()
except:
pass | en | 0.375719 | #!/usr/bin/env python2 # create a menu # there are not the same, cleanup previous root, if any # clear previous known mux #print >>sys.stderr, ' could not get info from %s: %s' % (path, name,) #print >>sys.stderr, 'Already existing mux: %s' % (name,) #item.set_sensitive(False) #print 'exit indicator' # populate devices menu # update forwards and sessions #print >>sys.stderr, 'cannot list forwardings: %s' % (fwds,) #print >>sys.stderr, 'cannot list sessions: %s' % (sessions,) #print >>sys.stderr, 'session: %r' % (s,) #print 'closing forward [%s] %s:%u -> %s:%u' % (fwd[1], fwd[2], fwd[3], fwd[4], fwd[5],) #print 'stoping %s' % (mc.path,) #print 'closing %s %s:%r' % (mc.path, type(mc), mc,) #print >>sys.stderr, 'file_create %s' % (event.pathname,) #print >>sys.stderr, ' could\'t stat %s' % (event.pathname,) #print >>sys.stderr, ' not a socket %s' % (event.pathname,) #print >>sys.stderr, ' already known %s' % (event.pathname,) # defer notification, the mux listener will rename it to the final path # when he is ready #print >>sys.stderr, ' starting grace period' #print >>sys.stderr, 'file_delete %s' % (event.pathname,) #print >>sys.stderr, 'grace period not survided' #print >>sys.stderr, ' not known' #print >>sys.stderr, ' event %s' % (arg,) # lets try to get an connection to the socket #print >>sys.stderr, ' grace period survived %s' % (path,) #print >>sys.stderr, ' could not get info from %s: %s' % (path, name,) #else: #print >>sys.stderr, ' could not connect to %s: ' % (path, exts,) #print >>sys.stderr, ' new %r' % (name,) # response when closing the dialog via the window manager # response when closing the dialog via the window manager # bind_address # listen_port # connect_host # connect_port | 2.148119 | 2 |
algorithms_in_python/_4_recursion/examples/disk_usage.py | junteudjio/algorithms_in_python | 0 | 6631221 | <gh_stars>0
import os
__author__ = '<NAME>'
def disk_usage(path):
total = os.path.getsize(path)
if os.path.isdir(path):
children = [ os.path.join(path, child) for child in os.listdir(path)]
for child_path in children:
total += disk_usage(child_path)
return total
if __name__ == '__main__':
print disk_usage('../../../') | import os
__author__ = '<NAME>'
def disk_usage(path):
total = os.path.getsize(path)
if os.path.isdir(path):
children = [ os.path.join(path, child) for child in os.listdir(path)]
for child_path in children:
total += disk_usage(child_path)
return total
if __name__ == '__main__':
print disk_usage('../../../') | none | 1 | 2.936397 | 3 |
|
webtool/server/serializers/frontend/tours.py | wodo/WebTool3 | 13 | 6631222 | <reponame>wodo/WebTool3<gh_stars>10-100
from rest_framework import serializers
from rest_framework.reverse import reverse
from django.core.mail import send_mail
from server.models import (
Tour, Guide, Category, Equipment, State, get_default_state, get_default_season, Event,
Skill, Fitness, Topic)
from server.serializers.frontend.core import EventSerializer, MoneyField, create_event, update_event
class TourListSerializer(serializers.ModelSerializer):
id = serializers.PrimaryKeyRelatedField(source='pk', read_only=True) # ? #
reference = serializers.CharField(source='tour.reference.__str__', read_only=True) # ? #
title = serializers.SerializerMethodField()
startDate = serializers.DateField(source='tour.start_date', read_only=True)
guideId = serializers.PrimaryKeyRelatedField(source='guide_id', read_only=True)
ladiesOnly = serializers.BooleanField(source='ladies_only', read_only=True)
winter = serializers.BooleanField(source='tour.reference.category.winter', read_only=True)
summer = serializers.BooleanField(source='tour.reference.category.summer', read_only=True)
youthOnTour = serializers.BooleanField(source='youth_on_tour', default=False)
minQuantity = serializers.IntegerField(source='min_quantity', read_only=True)
maxQuantity = serializers.IntegerField(source='max_quantity', read_only=True)
curQuantity = serializers.IntegerField(source='cur_quantity', read_only=True)
stateId = serializers.PrimaryKeyRelatedField(source='state_id', read_only=True) # ? #
url = serializers.SerializerMethodField()
class Meta:
model = Tour
fields = (
'id',
'reference',
'title',
'startDate',
'guideId',
'ladiesOnly',
'winter',
'summer',
'youthOnTour',
'minQuantity', 'maxQuantity', 'curQuantity',
'stateId',
'url'
)
def get_url(self, obj):
request = self.context['request']
return reverse('tours-detail', args=[obj.pk], request=request)
def get_title(self, obj):
return obj.tour.title
class TourSerializer(serializers.ModelSerializer):
id = serializers.PrimaryKeyRelatedField(source='pk', queryset=Tour.objects.all(), default=None, allow_null=True)
reference = serializers.CharField(source='tour.reference.__str__', read_only=True)
guideId = serializers.PrimaryKeyRelatedField(
source='guide', default=None, allow_null=True, queryset=Guide.objects.all()
)
teamIds = serializers.PrimaryKeyRelatedField(
source='team', many=True, default=[], queryset=Guide.objects.all()
)
category = serializers.PrimaryKeyRelatedField(
default=None, allow_null=True, write_only=True, queryset=Category.objects.all()
)
categoryId = serializers.PrimaryKeyRelatedField(
default=None, allow_null=True, source='tour.reference.category', queryset=Category.objects.all()
)
categoryIds = serializers.PrimaryKeyRelatedField(
source='categories', many=True, default=[], queryset=Category.objects.all()
)
tour = EventSerializer(default={})
deadline = EventSerializer(default={})
preliminary = EventSerializer(default={}, allow_null=True)
info = serializers.CharField(default='', allow_blank=True)
ladiesOnly = serializers.BooleanField(source='ladies_only', default=False)
youthOnTour = serializers.BooleanField(source='youth_on_tour', default=False)
relaxed = serializers.BooleanField(default=False)
miscCategory = serializers.CharField(source='misc_category', max_length=75, default='', allow_blank=True)
qualificationIds = serializers.PrimaryKeyRelatedField(
source='qualifications', many=True, default=[], queryset=Topic.objects.all()
)
preconditions = serializers.CharField(default='', allow_blank=True)
equipmentIds = serializers.PrimaryKeyRelatedField(
source='equipments', many=True, default=[], queryset=Equipment.objects.all()
)
miscEquipment = serializers.CharField(source='misc_equipment', max_length=75, default='', allow_blank=True)
equipmentService = serializers.BooleanField(source='equipment_service', default=False)
skillId = serializers.PrimaryKeyRelatedField(
source='skill', default=None, allow_null=True, required=False, queryset=Skill.objects.all()
)
fitnessId = serializers.PrimaryKeyRelatedField(
source='fitness', default=None, allow_null=True, required=False, queryset=Fitness.objects.all()
)
admission = MoneyField()
advances = MoneyField()
advancesInfo = serializers.CharField(source='advances_info', default='', allow_blank=True)
extraCharges = MoneyField(source='extra_charges')
extraChargesInfo = serializers.CharField(source='extra_charges_info', max_length=75, default='', allow_blank=True)
minQuantity = serializers.IntegerField(source='min_quantity', default=0)
maxQuantity = serializers.IntegerField(source='max_quantity', default=0)
curQuantity = serializers.IntegerField(source='cur_quantity', default=0)
portal = serializers.URLField(default='', allow_blank=True)
deprecated = serializers.BooleanField(default=False, required=False)
stateId = serializers.PrimaryKeyRelatedField(source='state', required=False, queryset=State.objects.all())
message = serializers.CharField(default='', required=False, allow_null=True, allow_blank=True)
comment = serializers.CharField(default='', required=False, allow_null=True, allow_blank=True)
# Administrative Felder fehlen noch !
class Meta:
model = Tour
fields = (
'id', 'reference',
'guideId', 'teamIds',
'categoryId', 'category', 'categoryIds',
'tour', 'deadline', 'preliminary',
'info',
'ladiesOnly', 'youthOnTour', 'relaxed',
'miscCategory',
'qualificationIds', 'preconditions',
'equipmentIds', 'miscEquipment', 'equipmentService',
'skillId', 'fitnessId',
'admission', 'advances', 'advancesInfo', 'extraCharges', 'extraChargesInfo',
'minQuantity', 'maxQuantity', 'curQuantity',
'portal', 'deprecated', 'stateId',
'message', 'comment'
)
def validate(self, data):
if self.instance is not None:
# This is the Update case
tour = self.instance
instance_data = data.get('pk')
if instance_data is None:
raise serializers.ValidationError("instance Id is missing")
elif instance_data.pk != tour.pk:
raise serializers.ValidationError("Wrong instance Id")
tour_data = data.get('tour')
if tour_data is not None:
tour_instance = tour_data.get('pk')
if tour_instance is None:
raise serializers.ValidationError("tour Id is missing")
elif tour_instance.pk != tour.tour_id:
raise serializers.ValidationError("Wrong meeting Id")
deadline_data = data.get('deadline')
if deadline_data is not None:
deadline_instance = deadline_data.get('pk')
if deadline_instance is None:
raise serializers.ValidationError("deadline is not defined")
elif deadline_instance.pk != tour.deadline_id:
raise serializers.ValidationError("Wrong deadline Id")
preliminary_data = data.get('preliminary')
if preliminary_data is not None:
preliminary_instance = preliminary_data.get('pk')
if preliminary_instance is None:
raise serializers.ValidationError("preliminary is not defined")
elif preliminary_instance.pk != tour.preliminary_id:
raise serializers.ValidationError("Wrong preliminary Id")
return data
def create(self, validated_data):
instance = validated_data.pop('pk')
if instance:
return self.update(instance, validated_data)
else:
tour_data = validated_data.pop('tour')
tour_data.update({'new': True})
deadline_data = validated_data.pop('deadline')
preliminary_data = validated_data.pop('preliminary')
info = validated_data.pop('info')
team = validated_data.pop('team')
qualifications = validated_data.pop('qualifications')
equipments = validated_data.pop('equipments')
state = validated_data.pop('state', get_default_state())
category = validated_data.pop('category')
# Set Youth-On-Tour if tour is especially for youth
if hasattr(category, 'name') and 'Jugend' in category.name:
youth_on_tour = True
validated_data.pop('youth_on_tour')
else:
youth_on_tour = validated_data.pop('youth_on_tour')
categories = validated_data.pop('categories')
season = get_default_season()
if not 'start_date' in tour_data:
raise serializers.ValidationError("Tour 'start_date' have to be defined")
if category:
tour_event = create_event(tour_data, dict(category=category, season=season, type=dict(tour=True)))
else:
raise serializers.ValidationError("Tour needs a category for creation")
if not deadline_data:
raise serializers.ValidationError("Deadline have to be defined")
deadline_event = create_event(deadline_data, dict(category=None, season=season, type=dict(deadline=True)))
if not preliminary_data:
tour = Tour.objects.create(tour=tour_event, deadline=deadline_event, preliminary=None,
state=state, youth_on_tour=youth_on_tour, **validated_data)
else:
preliminary_event = create_event(preliminary_data, dict(category=None, season=season, type=dict(preliminary=True)))
tour = Tour.objects.create(tour=tour_event, deadline=deadline_event, preliminary=preliminary_event,
state=state, youth_on_tour=youth_on_tour, **validated_data)
update_event(Event.objects.get(pk=tour.preliminary.pk), dict(title="VB " + str(tour.tour.reference),
name="Vorbesprechung "+ str(tour.tour.reference)), self.context)
update_event(Event.objects.get(pk=tour.deadline.pk), dict(title="AS " + str(tour.tour.reference),
name="Anmeldeschluss für " + str(tour.tour.reference)), self.context)
tour.categories.set(categories)
tour.info = info
tour.team.set(team)
tour.qualifications.set(qualifications)
tour.equipments.set(equipments)
return tour
def update(self, instance, validated_data):
instance.guide = validated_data.get('guide', instance.guide)
team = validated_data.get('team')
if team is not None:
instance.team.set(team)
tour_data = validated_data.get('tour')
if tour_data is not None:
tour = Event.objects.get(pk=tour_data.get('pk'))
update_event(tour, tour_data, self.context)
deadline_data = validated_data.get('deadline')
if deadline_data is not None:
deadline = Event.objects.get(pk=deadline_data.get('pk'))
update_event(deadline, deadline_data, self.context)
preliminary_data = validated_data.get('preliminary')
if preliminary_data is not None:
preliminary = Event.objects.get(pk=preliminary_data.get('pk'))
update_event(preliminary, preliminary_data, self.context)
instance.info = validated_data.get('info', instance.info)
instance.ladies_only = validated_data.get('ladies_only', instance.ladies_only)
instance.youth_on_tour = validated_data.get('youth_on_tour', instance.youth_on_tour)
instance.relaxed = validated_data.get('relaxed', instance.relaxed)
categories = validated_data.get('categories')
if categories is not None:
instance.categories.set(categories)
qualifications = validated_data.get('qualifications')
if qualifications is not None:
instance.qualifications.set(qualifications)
instance.preconditions = validated_data.get('preconditions', instance.preconditions)
equipments = validated_data.get('equipments')
if equipments is not None:
instance.equipments.set(equipments)
instance.misc_equipment = validated_data.get('misc_equipment', instance.misc_equipment)
instance.equipment_service = validated_data.get('equipment_service', instance.equipment_service)
instance.skill = validated_data.get('skill', instance.skill)
instance.fitness = validated_data.get('fitness', instance.fitness)
instance.admission = validated_data.get('admission', instance.admission)
instance.advances = validated_data.get('advances', instance.advances)
instance.advances_info = validated_data.get('advances_info', instance.advances_info)
instance.extra_charges = validated_data.get('extra_charges', instance.extra_charges)
instance.extra_charges_info = validated_data.get('extra_charges_info', instance.extra_charges_info)
instance.min_quantity = validated_data.get('min_quantity', instance.min_quantity)
instance.max_quantity = validated_data.get('max_quantity', instance.max_quantity)
instance.cur_quantity = validated_data.get('cur_quantity', instance.cur_quantity)
instance.deprecated = validated_data.get('deprecated', instance.deprecated)
instance.state = validated_data.get('state', instance.state)
if instance.state == State.objects.get(name='Fertig'):
self.send_tour_notification(reference=instance.tour.reference.__str__())
if instance.state in (State.objects.get(name='Freigegeben'), State.objects.get(name='Noch nicht buchbar')):
self.send_tour_kv_notification(instance=instance)
instance.message = validated_data.get('message', instance.message)
instance.comment = validated_data.get('comment', instance.comment)
instance.save()
return instance
@staticmethod
def send_tour_notification(reference=None):
send_mail(
subject='Tour ' + reference,
message='Die Tour ' + reference + ' wurde auf Fertig gestellt und kann geprüft werden.',
from_email='<EMAIL>',
recipient_list=['<EMAIL>', '<EMAIL>', '<EMAIL>']
)
def send_tour_kv_notification(self, instance=None):
team_format, equipment_format = '', ''
# Format team-members
for el in instance.team.all():
team_format = team_format + el.__str__() + ', '
# Format equipments
for el in instance.equipments.all():
equipment_format = equipment_format + el.__str__() + ', '
send_mail(
subject='Tour ' + instance.tour.reference.__str__() + ' KV-Update',
message='Die Tour ' + instance.tour.reference.__str__()
+ ' wurde auf "' + instance.state.name + '" gestellt und kann in den KV übertragen werden:' + '\n'
+ 'Buchungscode: ' + instance.tour.reference.__str__() + '\n'
+ 'Kategorie: ' + instance.tour.reference.category.name + '\n'
+ 'Titel: ' + instance.tour.title + '\n'
+ 'TN-Betrag: ' + str(instance.admission) + '\n'
+ 'Anzahlung: ' + str(instance.advances) + '\n'
+ 'Min TN: ' + str(instance.min_quantity) + '\n'
+ 'Geplante TN: ' + str(instance.max_quantity) + '\n'
+ 'Ausrüstung: ' + equipment_format[:-2] + '\n'
+ 'Zusatzausrüstung: ' + instance.misc_equipment + '\n'
+ 'Organisation: ' + self.guide_format(guide=instance.guide) + '\n'
+ 'Team: ' + team_format[:-2] + '\n'
+ 'Anreise: ' + str(instance.tour.distance) + '\n'
+ 'Buchbar bis: ' + instance.deadline.short_date(with_year=True) + '\n'
+ 'Tourtermin: ' + instance.tour.short_date(with_year=True) + '\n'
+ 'Tourtermin Uhrzeit: ' + self.approximation_time_format(event=instance.tour) + '\n'
+ 'Vorbesprechung: ' + self.preliminary_format(instance=instance) + '\n'
+ 'Treffpunkt: ' + instance.tour.rendezvous + '\n'
+ 'Tourziel: ' + instance.tour.location + '\n',
from_email='<EMAIL>',
recipient_list=['<EMAIL>', '<EMAIL>']
)
@staticmethod
def guide_format(guide=None):
if guide:
return guide.__str__()
else:
return 'N.a.'
def preliminary_format(self, instance=None):
if instance.preliminary:
return instance.preliminary.short_date(with_year=True) + ' ' + self.time_format(event=instance.preliminary)
else:
return instance.info
def approximation_time_format(self, event=None):
if event.start_time:
return self.time_format(event=event)
elif event.approximate:
return event.approximate.name
else:
return 'N.a.'
@staticmethod
def time_format(event=None):
if event.end_time:
return str(event.start_time) + ' - ' + str(event.end_time)
else:
return str(event.start_time)
| from rest_framework import serializers
from rest_framework.reverse import reverse
from django.core.mail import send_mail
from server.models import (
Tour, Guide, Category, Equipment, State, get_default_state, get_default_season, Event,
Skill, Fitness, Topic)
from server.serializers.frontend.core import EventSerializer, MoneyField, create_event, update_event
class TourListSerializer(serializers.ModelSerializer):
id = serializers.PrimaryKeyRelatedField(source='pk', read_only=True) # ? #
reference = serializers.CharField(source='tour.reference.__str__', read_only=True) # ? #
title = serializers.SerializerMethodField()
startDate = serializers.DateField(source='tour.start_date', read_only=True)
guideId = serializers.PrimaryKeyRelatedField(source='guide_id', read_only=True)
ladiesOnly = serializers.BooleanField(source='ladies_only', read_only=True)
winter = serializers.BooleanField(source='tour.reference.category.winter', read_only=True)
summer = serializers.BooleanField(source='tour.reference.category.summer', read_only=True)
youthOnTour = serializers.BooleanField(source='youth_on_tour', default=False)
minQuantity = serializers.IntegerField(source='min_quantity', read_only=True)
maxQuantity = serializers.IntegerField(source='max_quantity', read_only=True)
curQuantity = serializers.IntegerField(source='cur_quantity', read_only=True)
stateId = serializers.PrimaryKeyRelatedField(source='state_id', read_only=True) # ? #
url = serializers.SerializerMethodField()
class Meta:
model = Tour
fields = (
'id',
'reference',
'title',
'startDate',
'guideId',
'ladiesOnly',
'winter',
'summer',
'youthOnTour',
'minQuantity', 'maxQuantity', 'curQuantity',
'stateId',
'url'
)
def get_url(self, obj):
request = self.context['request']
return reverse('tours-detail', args=[obj.pk], request=request)
def get_title(self, obj):
return obj.tour.title
class TourSerializer(serializers.ModelSerializer):
id = serializers.PrimaryKeyRelatedField(source='pk', queryset=Tour.objects.all(), default=None, allow_null=True)
reference = serializers.CharField(source='tour.reference.__str__', read_only=True)
guideId = serializers.PrimaryKeyRelatedField(
source='guide', default=None, allow_null=True, queryset=Guide.objects.all()
)
teamIds = serializers.PrimaryKeyRelatedField(
source='team', many=True, default=[], queryset=Guide.objects.all()
)
category = serializers.PrimaryKeyRelatedField(
default=None, allow_null=True, write_only=True, queryset=Category.objects.all()
)
categoryId = serializers.PrimaryKeyRelatedField(
default=None, allow_null=True, source='tour.reference.category', queryset=Category.objects.all()
)
categoryIds = serializers.PrimaryKeyRelatedField(
source='categories', many=True, default=[], queryset=Category.objects.all()
)
tour = EventSerializer(default={})
deadline = EventSerializer(default={})
preliminary = EventSerializer(default={}, allow_null=True)
info = serializers.CharField(default='', allow_blank=True)
ladiesOnly = serializers.BooleanField(source='ladies_only', default=False)
youthOnTour = serializers.BooleanField(source='youth_on_tour', default=False)
relaxed = serializers.BooleanField(default=False)
miscCategory = serializers.CharField(source='misc_category', max_length=75, default='', allow_blank=True)
qualificationIds = serializers.PrimaryKeyRelatedField(
source='qualifications', many=True, default=[], queryset=Topic.objects.all()
)
preconditions = serializers.CharField(default='', allow_blank=True)
equipmentIds = serializers.PrimaryKeyRelatedField(
source='equipments', many=True, default=[], queryset=Equipment.objects.all()
)
miscEquipment = serializers.CharField(source='misc_equipment', max_length=75, default='', allow_blank=True)
equipmentService = serializers.BooleanField(source='equipment_service', default=False)
skillId = serializers.PrimaryKeyRelatedField(
source='skill', default=None, allow_null=True, required=False, queryset=Skill.objects.all()
)
fitnessId = serializers.PrimaryKeyRelatedField(
source='fitness', default=None, allow_null=True, required=False, queryset=Fitness.objects.all()
)
admission = MoneyField()
advances = MoneyField()
advancesInfo = serializers.CharField(source='advances_info', default='', allow_blank=True)
extraCharges = MoneyField(source='extra_charges')
extraChargesInfo = serializers.CharField(source='extra_charges_info', max_length=75, default='', allow_blank=True)
minQuantity = serializers.IntegerField(source='min_quantity', default=0)
maxQuantity = serializers.IntegerField(source='max_quantity', default=0)
curQuantity = serializers.IntegerField(source='cur_quantity', default=0)
portal = serializers.URLField(default='', allow_blank=True)
deprecated = serializers.BooleanField(default=False, required=False)
stateId = serializers.PrimaryKeyRelatedField(source='state', required=False, queryset=State.objects.all())
message = serializers.CharField(default='', required=False, allow_null=True, allow_blank=True)
comment = serializers.CharField(default='', required=False, allow_null=True, allow_blank=True)
# Administrative Felder fehlen noch !
class Meta:
model = Tour
fields = (
'id', 'reference',
'guideId', 'teamIds',
'categoryId', 'category', 'categoryIds',
'tour', 'deadline', 'preliminary',
'info',
'ladiesOnly', 'youthOnTour', 'relaxed',
'miscCategory',
'qualificationIds', 'preconditions',
'equipmentIds', 'miscEquipment', 'equipmentService',
'skillId', 'fitnessId',
'admission', 'advances', 'advancesInfo', 'extraCharges', 'extraChargesInfo',
'minQuantity', 'maxQuantity', 'curQuantity',
'portal', 'deprecated', 'stateId',
'message', 'comment'
)
def validate(self, data):
if self.instance is not None:
# This is the Update case
tour = self.instance
instance_data = data.get('pk')
if instance_data is None:
raise serializers.ValidationError("instance Id is missing")
elif instance_data.pk != tour.pk:
raise serializers.ValidationError("Wrong instance Id")
tour_data = data.get('tour')
if tour_data is not None:
tour_instance = tour_data.get('pk')
if tour_instance is None:
raise serializers.ValidationError("tour Id is missing")
elif tour_instance.pk != tour.tour_id:
raise serializers.ValidationError("Wrong meeting Id")
deadline_data = data.get('deadline')
if deadline_data is not None:
deadline_instance = deadline_data.get('pk')
if deadline_instance is None:
raise serializers.ValidationError("deadline is not defined")
elif deadline_instance.pk != tour.deadline_id:
raise serializers.ValidationError("Wrong deadline Id")
preliminary_data = data.get('preliminary')
if preliminary_data is not None:
preliminary_instance = preliminary_data.get('pk')
if preliminary_instance is None:
raise serializers.ValidationError("preliminary is not defined")
elif preliminary_instance.pk != tour.preliminary_id:
raise serializers.ValidationError("Wrong preliminary Id")
return data
def create(self, validated_data):
instance = validated_data.pop('pk')
if instance:
return self.update(instance, validated_data)
else:
tour_data = validated_data.pop('tour')
tour_data.update({'new': True})
deadline_data = validated_data.pop('deadline')
preliminary_data = validated_data.pop('preliminary')
info = validated_data.pop('info')
team = validated_data.pop('team')
qualifications = validated_data.pop('qualifications')
equipments = validated_data.pop('equipments')
state = validated_data.pop('state', get_default_state())
category = validated_data.pop('category')
# Set Youth-On-Tour if tour is especially for youth
if hasattr(category, 'name') and 'Jugend' in category.name:
youth_on_tour = True
validated_data.pop('youth_on_tour')
else:
youth_on_tour = validated_data.pop('youth_on_tour')
categories = validated_data.pop('categories')
season = get_default_season()
if not 'start_date' in tour_data:
raise serializers.ValidationError("Tour 'start_date' have to be defined")
if category:
tour_event = create_event(tour_data, dict(category=category, season=season, type=dict(tour=True)))
else:
raise serializers.ValidationError("Tour needs a category for creation")
if not deadline_data:
raise serializers.ValidationError("Deadline have to be defined")
deadline_event = create_event(deadline_data, dict(category=None, season=season, type=dict(deadline=True)))
if not preliminary_data:
tour = Tour.objects.create(tour=tour_event, deadline=deadline_event, preliminary=None,
state=state, youth_on_tour=youth_on_tour, **validated_data)
else:
preliminary_event = create_event(preliminary_data, dict(category=None, season=season, type=dict(preliminary=True)))
tour = Tour.objects.create(tour=tour_event, deadline=deadline_event, preliminary=preliminary_event,
state=state, youth_on_tour=youth_on_tour, **validated_data)
update_event(Event.objects.get(pk=tour.preliminary.pk), dict(title="VB " + str(tour.tour.reference),
name="Vorbesprechung "+ str(tour.tour.reference)), self.context)
update_event(Event.objects.get(pk=tour.deadline.pk), dict(title="AS " + str(tour.tour.reference),
name="Anmeldeschluss für " + str(tour.tour.reference)), self.context)
tour.categories.set(categories)
tour.info = info
tour.team.set(team)
tour.qualifications.set(qualifications)
tour.equipments.set(equipments)
return tour
def update(self, instance, validated_data):
instance.guide = validated_data.get('guide', instance.guide)
team = validated_data.get('team')
if team is not None:
instance.team.set(team)
tour_data = validated_data.get('tour')
if tour_data is not None:
tour = Event.objects.get(pk=tour_data.get('pk'))
update_event(tour, tour_data, self.context)
deadline_data = validated_data.get('deadline')
if deadline_data is not None:
deadline = Event.objects.get(pk=deadline_data.get('pk'))
update_event(deadline, deadline_data, self.context)
preliminary_data = validated_data.get('preliminary')
if preliminary_data is not None:
preliminary = Event.objects.get(pk=preliminary_data.get('pk'))
update_event(preliminary, preliminary_data, self.context)
instance.info = validated_data.get('info', instance.info)
instance.ladies_only = validated_data.get('ladies_only', instance.ladies_only)
instance.youth_on_tour = validated_data.get('youth_on_tour', instance.youth_on_tour)
instance.relaxed = validated_data.get('relaxed', instance.relaxed)
categories = validated_data.get('categories')
if categories is not None:
instance.categories.set(categories)
qualifications = validated_data.get('qualifications')
if qualifications is not None:
instance.qualifications.set(qualifications)
instance.preconditions = validated_data.get('preconditions', instance.preconditions)
equipments = validated_data.get('equipments')
if equipments is not None:
instance.equipments.set(equipments)
instance.misc_equipment = validated_data.get('misc_equipment', instance.misc_equipment)
instance.equipment_service = validated_data.get('equipment_service', instance.equipment_service)
instance.skill = validated_data.get('skill', instance.skill)
instance.fitness = validated_data.get('fitness', instance.fitness)
instance.admission = validated_data.get('admission', instance.admission)
instance.advances = validated_data.get('advances', instance.advances)
instance.advances_info = validated_data.get('advances_info', instance.advances_info)
instance.extra_charges = validated_data.get('extra_charges', instance.extra_charges)
instance.extra_charges_info = validated_data.get('extra_charges_info', instance.extra_charges_info)
instance.min_quantity = validated_data.get('min_quantity', instance.min_quantity)
instance.max_quantity = validated_data.get('max_quantity', instance.max_quantity)
instance.cur_quantity = validated_data.get('cur_quantity', instance.cur_quantity)
instance.deprecated = validated_data.get('deprecated', instance.deprecated)
instance.state = validated_data.get('state', instance.state)
if instance.state == State.objects.get(name='Fertig'):
self.send_tour_notification(reference=instance.tour.reference.__str__())
if instance.state in (State.objects.get(name='Freigegeben'), State.objects.get(name='Noch nicht buchbar')):
self.send_tour_kv_notification(instance=instance)
instance.message = validated_data.get('message', instance.message)
instance.comment = validated_data.get('comment', instance.comment)
instance.save()
return instance
@staticmethod
def send_tour_notification(reference=None):
send_mail(
subject='Tour ' + reference,
message='Die Tour ' + reference + ' wurde auf Fertig gestellt und kann geprüft werden.',
from_email='<EMAIL>',
recipient_list=['<EMAIL>', '<EMAIL>', '<EMAIL>']
)
def send_tour_kv_notification(self, instance=None):
team_format, equipment_format = '', ''
# Format team-members
for el in instance.team.all():
team_format = team_format + el.__str__() + ', '
# Format equipments
for el in instance.equipments.all():
equipment_format = equipment_format + el.__str__() + ', '
send_mail(
subject='Tour ' + instance.tour.reference.__str__() + ' KV-Update',
message='Die Tour ' + instance.tour.reference.__str__()
+ ' wurde auf "' + instance.state.name + '" gestellt und kann in den KV übertragen werden:' + '\n'
+ 'Buchungscode: ' + instance.tour.reference.__str__() + '\n'
+ 'Kategorie: ' + instance.tour.reference.category.name + '\n'
+ 'Titel: ' + instance.tour.title + '\n'
+ 'TN-Betrag: ' + str(instance.admission) + '\n'
+ 'Anzahlung: ' + str(instance.advances) + '\n'
+ 'Min TN: ' + str(instance.min_quantity) + '\n'
+ 'Geplante TN: ' + str(instance.max_quantity) + '\n'
+ 'Ausrüstung: ' + equipment_format[:-2] + '\n'
+ 'Zusatzausrüstung: ' + instance.misc_equipment + '\n'
+ 'Organisation: ' + self.guide_format(guide=instance.guide) + '\n'
+ 'Team: ' + team_format[:-2] + '\n'
+ 'Anreise: ' + str(instance.tour.distance) + '\n'
+ 'Buchbar bis: ' + instance.deadline.short_date(with_year=True) + '\n'
+ 'Tourtermin: ' + instance.tour.short_date(with_year=True) + '\n'
+ 'Tourtermin Uhrzeit: ' + self.approximation_time_format(event=instance.tour) + '\n'
+ 'Vorbesprechung: ' + self.preliminary_format(instance=instance) + '\n'
+ 'Treffpunkt: ' + instance.tour.rendezvous + '\n'
+ 'Tourziel: ' + instance.tour.location + '\n',
from_email='<EMAIL>',
recipient_list=['<EMAIL>', '<EMAIL>']
)
@staticmethod
def guide_format(guide=None):
if guide:
return guide.__str__()
else:
return 'N.a.'
def preliminary_format(self, instance=None):
if instance.preliminary:
return instance.preliminary.short_date(with_year=True) + ' ' + self.time_format(event=instance.preliminary)
else:
return instance.info
def approximation_time_format(self, event=None):
if event.start_time:
return self.time_format(event=event)
elif event.approximate:
return event.approximate.name
else:
return 'N.a.'
@staticmethod
def time_format(event=None):
if event.end_time:
return str(event.start_time) + ' - ' + str(event.end_time)
else:
return str(event.start_time) | en | 0.69037 | # ? # # ? # # ? # # Administrative Felder fehlen noch ! # This is the Update case # Set Youth-On-Tour if tour is especially for youth # Format team-members # Format equipments | 2.081945 | 2 |
deck_of_many_things.py | Mego/Tymora | 0 | 6631223 | <gh_stars>0
#!/usr/bin/env python3
import random
import atexit
import pickle
decks = {}
def save_decks():
with open("decks.pickle", 'wb') as f:
pickle.dump(decks, f)
print("Decks saved")
def load_decks():
global decks
try:
with open("decks.pickle", 'rb') as f:
decks = pickle.load(f)
print("Decks loaded")
except:
print("Unable to load decks")
atexit.register(save_decks)
cards13 = """Sun
Moon
Star
Throne
Key
Knight
Void
Flames
Skull
Ruin
Euryale
Rogue
Jester""".split()
cards22 = """Vizier
Comet
Fates
Gem
Talons
Idiot
Donjon
Balance
Fool""".split() + cards13
def init_deck(deck_id, use_22 = False):
cards = (cards13 if not use_22 else cards22)[:]
random.shuffle(cards)
decks[deck_id] = cards
def draw(deck_id):
if deck_id not in decks:
init_deck(deck_id, random.choice([False, False, False, True])) # 75% chance of 13-card deck
cards = decks[deck_id]
card = cards[0]
if card not in ['Fool', 'Jester']:
cards.append(card)
random.shuffle(cards)
return card
| #!/usr/bin/env python3
import random
import atexit
import pickle
decks = {}
def save_decks():
with open("decks.pickle", 'wb') as f:
pickle.dump(decks, f)
print("Decks saved")
def load_decks():
global decks
try:
with open("decks.pickle", 'rb') as f:
decks = pickle.load(f)
print("Decks loaded")
except:
print("Unable to load decks")
atexit.register(save_decks)
cards13 = """Sun
Moon
Star
Throne
Key
Knight
Void
Flames
Skull
Ruin
Euryale
Rogue
Jester""".split()
cards22 = """Vizier
Comet
Fates
Gem
Talons
Idiot
Donjon
Balance
Fool""".split() + cards13
def init_deck(deck_id, use_22 = False):
cards = (cards13 if not use_22 else cards22)[:]
random.shuffle(cards)
decks[deck_id] = cards
def draw(deck_id):
if deck_id not in decks:
init_deck(deck_id, random.choice([False, False, False, True])) # 75% chance of 13-card deck
cards = decks[deck_id]
card = cards[0]
if card not in ['Fool', 'Jester']:
cards.append(card)
random.shuffle(cards)
return card | en | 0.312863 | #!/usr/bin/env python3 Sun
Moon
Star
Throne
Key
Knight
Void
Flames
Skull
Ruin
Euryale
Rogue
Jester Vizier
Comet
Fates
Gem
Talons
Idiot
Donjon
Balance
Fool # 75% chance of 13-card deck | 3.259289 | 3 |
tests/test_geometry/test_area.py | carterbox/xdesign | 18 | 6631224 | <filename>tests/test_geometry/test_area.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# #########################################################################
# Copyright (c) 2016, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2015. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
from xdesign.geometry import *
from numpy.testing import assert_allclose, assert_equal
import numpy as np
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2016, UChicago Argonne, LLC."
__docformat__ = 'restructuredtext en'
def test_Circle_area():
circle = Circle(Point([0, 0]), 1)
assert_allclose(circle.area, 3.14159265359, rtol=1e-6)
negcircle = -circle
assert_allclose(circle.area, 3.14159265359, rtol=1e-6)
assert_allclose(negcircle.area, -3.14159265359, rtol=1e-6)
def test_Mesh_area():
p5 = Point([0, 0])
p1 = Point([1, 1])
p4 = Point([1, -1])
p3 = Point([-1, -1])
p2 = Point([-1, 1])
m = Mesh()
assert_equal(m.area, 0)
m.append(Triangle(p5, p1, p2))
m.append(Triangle(p5, p2, p3))
m.append(Triangle(p5, p3, p4))
m.append(Triangle(p5, p4, p1))
assert_equal(m.area, 4)
def test_Mesh_center():
p5 = Point([0, 0])
p1 = Point([1, 1])
p4 = Point([1, -1])
p3 = Point([-1, -1])
p2 = Point([-1, 1])
m = Mesh()
assert_equal(m.center, Point([0, 0]))
m.append(Triangle(p5, p1, p2))
m.append(Triangle(p5, p2, p3))
m.append(Triangle(p5, p3, p4))
m.append(Triangle(p5, p4, p1))
assert_equal(m.center, Point([0, 0]))
m.pop()
m.pop()
m.pop()
m.pop()
assert_equal(m.center, Point([0, 0]))
def contains_full_overlap(A, B):
"""Tests the contains function for two entities which are arranged such
that A is a subset of B and the edges of A and B do not touch."""
# A = Circle(Point([0, 0]), 0.5)
# B = Circle(Point([0, 0.1]), 2)
assert not A.contains(B)
assert B.contains(A)
assert not (-A).contains(B)
assert not B.contains(-A)
assert not A.contains(-B)
assert not (-B).contains(A)
assert (-A).contains(-B)
assert not (-B).contains(-A)
def contains_partial_overlap(A, B):
"""Tests the contains function for two entities which are arranged such
that A is a partial subset of B i.e. the edges intersect at least once."""
# A = Circle(Point([0, 0]), 0.5)
# B = Circle(Point([0, 1]), 0.5)
assert not A.contains(B)
assert not B.contains(A)
assert not (-A).contains(B)
assert not B.contains(-A)
assert not A.contains(-B)
assert not (-B).contains(A)
assert not (-A).contains(-B)
assert not (-B).contains(-A)
def contains_no_overlap(A, B):
"""Tests the contains function for two entities which are arranged such
that A intersect B is the empty set."""
# A = Circle(Point([0, 0]), 0.5)
# B = Circle(Point([0, 3]), 0.5)
assert not A.contains(B)
assert not B.contains(A)
assert (-A).contains(B)
assert not B.contains(-A)
assert not A.contains(-B)
assert (-B).contains(A)
assert not (-A).contains(-B)
assert not (-B).contains(-A)
def test_Circle_contains():
A = Circle(Point([0, 0]), 0.5)
Bf = Circle(Point([0, 0.1]), 1.5)
Bp = Circle(Point([0.5, 0.5]), 0.5)
Bn = Circle(Point([0.5, 3]), 0.5)
contains_full_overlap(A, Bf)
contains_partial_overlap(A, Bp)
contains_no_overlap(A, Bn)
Bf = Square(Point([0, 0.1]), 3)
Bp = Square(Point([0.5, 0.5]), 1)
Bn = Square(Point([0.5, 3]), 1)
contains_full_overlap(A, Bf)
contains_partial_overlap(A, Bp)
contains_no_overlap(A, Bn)
def test_Polygon_contains():
A = Square(Point([0, 0]), 1)
Bf = Square(Point([0, 0.1]), 3)
Bp = Square(Point([0.5, 0.5]), 1)
Bn = Square(Point([0.5, 3]), 1)
contains_full_overlap(A, Bf)
contains_partial_overlap(A, Bp)
contains_no_overlap(A, Bn)
Bf = Circle(Point([0, 0.1]), 1.5)
Bp = Circle(Point([0.5, 0.5]), 0.5)
Bn = Circle(Point([0.5, 3]), 0.5)
contains_full_overlap(A, Bf)
contains_partial_overlap(A, Bp)
contains_no_overlap(A, Bn)
def test_Mesh_contains():
p0 = Point([0, 0])
p1 = Point([0, 1])
p2 = Point([0, 3])
circle0 = -Square(Point([0, 0]), 1)
circle1 = Square(Point([0, 0]), 2)
assert not circle1.contains(circle0)
assert (circle0).contains(-circle1)
assert circle1.contains(p0)
assert not circle0.contains(circle1)
assert not circle0.contains(p0)
mesh0 = Mesh(faces=[circle1, circle0])
assert not mesh0.contains(p0)
assert mesh0.contains(p1)
assert not mesh0.contains(p2)
if __name__ == '__main__':
import nose
nose.runmodule(exit=False)
| <filename>tests/test_geometry/test_area.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# #########################################################################
# Copyright (c) 2016, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2015. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
from xdesign.geometry import *
from numpy.testing import assert_allclose, assert_equal
import numpy as np
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2016, UChicago Argonne, LLC."
__docformat__ = 'restructuredtext en'
def test_Circle_area():
circle = Circle(Point([0, 0]), 1)
assert_allclose(circle.area, 3.14159265359, rtol=1e-6)
negcircle = -circle
assert_allclose(circle.area, 3.14159265359, rtol=1e-6)
assert_allclose(negcircle.area, -3.14159265359, rtol=1e-6)
def test_Mesh_area():
p5 = Point([0, 0])
p1 = Point([1, 1])
p4 = Point([1, -1])
p3 = Point([-1, -1])
p2 = Point([-1, 1])
m = Mesh()
assert_equal(m.area, 0)
m.append(Triangle(p5, p1, p2))
m.append(Triangle(p5, p2, p3))
m.append(Triangle(p5, p3, p4))
m.append(Triangle(p5, p4, p1))
assert_equal(m.area, 4)
def test_Mesh_center():
p5 = Point([0, 0])
p1 = Point([1, 1])
p4 = Point([1, -1])
p3 = Point([-1, -1])
p2 = Point([-1, 1])
m = Mesh()
assert_equal(m.center, Point([0, 0]))
m.append(Triangle(p5, p1, p2))
m.append(Triangle(p5, p2, p3))
m.append(Triangle(p5, p3, p4))
m.append(Triangle(p5, p4, p1))
assert_equal(m.center, Point([0, 0]))
m.pop()
m.pop()
m.pop()
m.pop()
assert_equal(m.center, Point([0, 0]))
def contains_full_overlap(A, B):
"""Tests the contains function for two entities which are arranged such
that A is a subset of B and the edges of A and B do not touch."""
# A = Circle(Point([0, 0]), 0.5)
# B = Circle(Point([0, 0.1]), 2)
assert not A.contains(B)
assert B.contains(A)
assert not (-A).contains(B)
assert not B.contains(-A)
assert not A.contains(-B)
assert not (-B).contains(A)
assert (-A).contains(-B)
assert not (-B).contains(-A)
def contains_partial_overlap(A, B):
"""Tests the contains function for two entities which are arranged such
that A is a partial subset of B i.e. the edges intersect at least once."""
# A = Circle(Point([0, 0]), 0.5)
# B = Circle(Point([0, 1]), 0.5)
assert not A.contains(B)
assert not B.contains(A)
assert not (-A).contains(B)
assert not B.contains(-A)
assert not A.contains(-B)
assert not (-B).contains(A)
assert not (-A).contains(-B)
assert not (-B).contains(-A)
def contains_no_overlap(A, B):
"""Tests the contains function for two entities which are arranged such
that A intersect B is the empty set."""
# A = Circle(Point([0, 0]), 0.5)
# B = Circle(Point([0, 3]), 0.5)
assert not A.contains(B)
assert not B.contains(A)
assert (-A).contains(B)
assert not B.contains(-A)
assert not A.contains(-B)
assert (-B).contains(A)
assert not (-A).contains(-B)
assert not (-B).contains(-A)
def test_Circle_contains():
A = Circle(Point([0, 0]), 0.5)
Bf = Circle(Point([0, 0.1]), 1.5)
Bp = Circle(Point([0.5, 0.5]), 0.5)
Bn = Circle(Point([0.5, 3]), 0.5)
contains_full_overlap(A, Bf)
contains_partial_overlap(A, Bp)
contains_no_overlap(A, Bn)
Bf = Square(Point([0, 0.1]), 3)
Bp = Square(Point([0.5, 0.5]), 1)
Bn = Square(Point([0.5, 3]), 1)
contains_full_overlap(A, Bf)
contains_partial_overlap(A, Bp)
contains_no_overlap(A, Bn)
def test_Polygon_contains():
A = Square(Point([0, 0]), 1)
Bf = Square(Point([0, 0.1]), 3)
Bp = Square(Point([0.5, 0.5]), 1)
Bn = Square(Point([0.5, 3]), 1)
contains_full_overlap(A, Bf)
contains_partial_overlap(A, Bp)
contains_no_overlap(A, Bn)
Bf = Circle(Point([0, 0.1]), 1.5)
Bp = Circle(Point([0.5, 0.5]), 0.5)
Bn = Circle(Point([0.5, 3]), 0.5)
contains_full_overlap(A, Bf)
contains_partial_overlap(A, Bp)
contains_no_overlap(A, Bn)
def test_Mesh_contains():
p0 = Point([0, 0])
p1 = Point([0, 1])
p2 = Point([0, 3])
circle0 = -Square(Point([0, 0]), 1)
circle1 = Square(Point([0, 0]), 2)
assert not circle1.contains(circle0)
assert (circle0).contains(-circle1)
assert circle1.contains(p0)
assert not circle0.contains(circle1)
assert not circle0.contains(p0)
mesh0 = Mesh(faces=[circle1, circle0])
assert not mesh0.contains(p0)
assert mesh0.contains(p1)
assert not mesh0.contains(p2)
if __name__ == '__main__':
import nose
nose.runmodule(exit=False)
| en | 0.779149 | #!/usr/bin/env python # -*- coding: utf-8 -*- # ######################################################################### # Copyright (c) 2016, UChicago Argonne, LLC. All rights reserved. # # # # Copyright 2015. UChicago Argonne, LLC. This software was produced # # under U.S. Government contract DE-AC02-06CH11357 for Argonne National # # Laboratory (ANL), which is operated by UChicago Argonne, LLC for the # # U.S. Department of Energy. The U.S. Government has rights to use, # # reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR # # UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR # # ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is # # modified to produce derivative works, such modified software should # # be clearly marked, so as not to confuse it with the version available # # from ANL. # # # # Additionally, redistribution and use in source and binary forms, with # # or without modification, are permitted provided that the following # # conditions are met: # # # # * Redistributions of source code must retain the above copyright # # notice, this list of conditions and the following disclaimer. # # # # * Redistributions in binary form must reproduce the above copyright # # notice, this list of conditions and the following disclaimer in # # the documentation and/or other materials provided with the # # distribution. # # # # * Neither the name of UChicago Argonne, LLC, Argonne National # # Laboratory, ANL, the U.S. Government, nor the names of its # # contributors may be used to endorse or promote products derived # # from this software without specific prior written permission. # # # # THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS # # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago # # Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # # POSSIBILITY OF SUCH DAMAGE. # # ######################################################################### Tests the contains function for two entities which are arranged such that A is a subset of B and the edges of A and B do not touch. # A = Circle(Point([0, 0]), 0.5) # B = Circle(Point([0, 0.1]), 2) Tests the contains function for two entities which are arranged such that A is a partial subset of B i.e. the edges intersect at least once. # A = Circle(Point([0, 0]), 0.5) # B = Circle(Point([0, 1]), 0.5) Tests the contains function for two entities which are arranged such that A intersect B is the empty set. # A = Circle(Point([0, 0]), 0.5) # B = Circle(Point([0, 3]), 0.5) | 1.296742 | 1 |
tests/test_integration.py | pandadefi/ape-solidity | 0 | 6631225 | <reponame>pandadefi/ape-solidity
from pathlib import Path
TEST_CONTRACTS = [str(p.stem) for p in (Path(__file__).parent / "contracts").iterdir()]
def test_integration(project):
for contract in TEST_CONTRACTS:
assert contract in project.contracts
contract = project.contracts[contract]
assert contract.source_id == f"{contract.name}.sol"
| from pathlib import Path
TEST_CONTRACTS = [str(p.stem) for p in (Path(__file__).parent / "contracts").iterdir()]
def test_integration(project):
for contract in TEST_CONTRACTS:
assert contract in project.contracts
contract = project.contracts[contract]
assert contract.source_id == f"{contract.name}.sol" | none | 1 | 2.208782 | 2 |
|
GearBot/Util/Utils.py | AEnterprise/GearBot | 20 | 6631226 | import asyncio
import json
import os
import subprocess
from collections import namedtuple, OrderedDict
from datetime import datetime
from json import JSONDecodeError
from subprocess import Popen
import discord
import math
from discord import NotFound
from Util import GearbotLogging, Translator, Emoji
from Util.Matchers import ROLE_ID_MATCHER, CHANNEL_ID_MATCHER, ID_MATCHER, EMOJI_MATCHER, URL_MATCHER
BOT = None
def initialize(actual_bot):
global BOT
BOT = actual_bot
def fetch_from_disk(filename, alternative=None):
try:
with open(f"{filename}.json", encoding="UTF-8") as file:
return json.load(file)
except FileNotFoundError:
if alternative is not None:
return fetch_from_disk(alternative)
except JSONDecodeError:
if alternative is not None:
return fetch_from_disk(alternative)
return dict()
def save_to_disk(filename, dict):
with open(f"{filename}.json", "w", encoding="UTF-8") as file:
json.dump(dict, file, indent=4, skipkeys=True, sort_keys=True)
async def cleanExit(bot, trigger):
await GearbotLogging.bot_log(f"Shutdown triggered by {trigger}.")
await bot.logout()
await bot.close()
bot.aiosession.close()
def trim_message(message, limit):
if len(message) < limit - 4:
return message
return f"{message[:limit-4]}..."
async def empty_list(ctx, action):
message = await ctx.send(f"{Translator.translate('m_nobody', ctx, action=action)} {Emoji.get_chat_emoji('THINK')}")
await asyncio.sleep(3)
message2 = await ctx.send(f"{Translator.translate('m_nobody_2', ctx)} {Emoji.get_chat_emoji('WINK')}")
await asyncio.sleep(3)
await message.edit(content=Translator.translate('intimidation', ctx))
await message2.delete()
replacements = {
"`": "ˋ"
}
def replace_lookalikes(text):
for k, v in replacements.items():
text = text.replace(k, v)
return text
async def clean(text, guild:discord.Guild=None, markdown=True, links=True, emoji=True, lookalikes=True):
text = str(text)
if guild is not None:
# resolve user mentions
for uid in set(ID_MATCHER.findall(text)):
name = "@" + await username(int(uid), False, False)
text = text.replace(f"<@{uid}>", name)
text = text.replace(f"<@!{uid}>", name)
# resolve role mentions
for uid in set(ROLE_ID_MATCHER.findall(text)):
role = discord.utils.get(guild.roles, id=int(uid))
if role is None:
name = "@UNKNOWN ROLE"
else:
name = "@" + role.name
text = text.replace(f"<@&{uid}>", name)
# resolve channel names
for uid in set(CHANNEL_ID_MATCHER.findall(text)):
channel = guild.get_channel(uid)
if channel is None:
name = "#UNKNOWN CHANNEL"
else:
name = "#" + channel.name
text = text.replace(f"<@#{uid}>", name)
# re-assemble emoji so such a way that they don't turn into twermoji
urls = set(URL_MATCHER.findall(text))
if lookalikes:
text = replace_lookalikes(text)
if markdown:
text = escape_markdown(text)
else:
text = text.replace("@", "@\u200b").replace("**", "**").replace("``", "``")
if emoji:
for e in set(EMOJI_MATCHER.findall(text)):
a, b, c = zip(e)
text = text.replace(f"<{a[0]}:{b[0]}:{c[0]}>", f"<{a[0]}\\:{b[0]}\\:{c[0]}>")
if links:
#find urls last so the < escaping doesn't break it
for url in urls:
text = text.replace(escape_markdown(url), f"<{url}>")
return text
def escape_markdown(text):
text = str(text)
for c in ["\\", "*", "_", "~", "|", "{", ">"]:
text = text.replace(c, f"\\{c}")
return text.replace("@", "@\u200b")
def clean_name(text):
if text is None:
return None
return str(text).replace("@","@\u200b").replace("**", "*\u200b*").replace("``", "`\u200b`")
known_invalid_users = []
user_cache = OrderedDict()
async def username(uid, fetch=True, clean=True):
user = await get_user(uid, fetch)
if user is None:
return "UNKNOWN USER"
if clean:
return clean_user(user)
else:
return f"{user.name}#{user.discriminator}"
async def get_user(uid, fetch=True):
UserClass = namedtuple("UserClass", "name id discriminator bot avatar_url created_at is_avatar_animated mention")
user = BOT.get_user(uid)
if user is None:
if uid in known_invalid_users:
return None
if BOT.redis_pool is not None:
userCacheInfo = await BOT.redis_pool.hgetall(f"users:{uid}")
if len(userCacheInfo) == 8: # It existed in the Redis cache, check length cause sometimes somehow things are missing, somehow
userFormed = UserClass(
userCacheInfo["name"],
userCacheInfo["id"],
userCacheInfo["discriminator"],
userCacheInfo["bot"] == "1",
userCacheInfo["avatar_url"],
datetime.fromtimestamp(float(userCacheInfo["created_at"])),
bool(userCacheInfo["is_avatar_animated"]) == "1",
userCacheInfo["mention"]
)
return userFormed
if fetch:
try:
user = await BOT.fetch_user(uid)
pipeline = BOT.redis_pool.pipeline()
pipeline.hmset_dict(f"users:{uid}",
name = user.name,
id = user.id,
discriminator = user.discriminator,
bot = int(user.bot),
avatar_url = str(user.avatar_url),
created_at = user.created_at.timestamp(),
is_avatar_animated = int(user.is_avatar_animated()),
mention = user.mention
)
pipeline.expire(f"users:{uid}", 3000) # 5 minute cache life
BOT.loop.create_task(pipeline.execute())
except NotFound:
known_invalid_users.append(uid)
return None
else: # No Redis, using the dict method instead
if uid in user_cache:
return user_cache[uid]
if fetch:
try:
user = await BOT.fetch_user(uid)
if len(user_cache) >= 10: # Limit the cache size to the most recent 10
user_cache.popitem()
user_cache[uid] = user
except NotFound:
known_invalid_users.append(uid)
return None
return user
def clean_user(user):
if user is None:
return "UNKNOWN USER"
return f"{escape_markdown(user.name)}#{user.discriminator}"
def username_from_user(user):
if user is None:
return "UNKNOWN USER"
return user.name
def pad(text, length, char=' '):
return f"{text}{char * (length-len(text))}"
async def execute(command):
p = Popen(command, cwd=os.getcwd(), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while p.poll() is None:
await asyncio.sleep(1)
out, error = p.communicate()
return p.returncode, out.decode('utf-8').strip(), error.decode('utf-8').strip()
def find_key(data, wanted):
for k, v in data.items():
if v == wanted:
return k
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
async def get_commit():
_, out, __ = await execute('git rev-parse --short HEAD')
return out
def to_pretty_time(seconds, guild_id):
seconds = round(seconds)
partcount = 0
parts = {
'weeks': 60 * 60 * 24 * 7,
'days': 60 * 60 * 24,
'hours_solo': 60 * 60,
'minutes': 60,
'seconds': 1
}
duration = ""
if seconds == 0:
return Translator.translate("seconds", guild_id, amount=0)
for k, v in parts.items():
if seconds / v >= 1:
amount = math.floor(seconds / v)
seconds -= amount * v
if partcount == 1:
duration += ", "
duration += " " + Translator.translate(k, guild_id, amount=amount)
if seconds == 0:
break
return duration.strip()
def assemble_attachment(channel, aid, name):
return f"https://media.discordapp.net/attachments/{channel}/{aid}/{name}" | import asyncio
import json
import os
import subprocess
from collections import namedtuple, OrderedDict
from datetime import datetime
from json import JSONDecodeError
from subprocess import Popen
import discord
import math
from discord import NotFound
from Util import GearbotLogging, Translator, Emoji
from Util.Matchers import ROLE_ID_MATCHER, CHANNEL_ID_MATCHER, ID_MATCHER, EMOJI_MATCHER, URL_MATCHER
BOT = None
def initialize(actual_bot):
global BOT
BOT = actual_bot
def fetch_from_disk(filename, alternative=None):
try:
with open(f"{filename}.json", encoding="UTF-8") as file:
return json.load(file)
except FileNotFoundError:
if alternative is not None:
return fetch_from_disk(alternative)
except JSONDecodeError:
if alternative is not None:
return fetch_from_disk(alternative)
return dict()
def save_to_disk(filename, dict):
with open(f"{filename}.json", "w", encoding="UTF-8") as file:
json.dump(dict, file, indent=4, skipkeys=True, sort_keys=True)
async def cleanExit(bot, trigger):
await GearbotLogging.bot_log(f"Shutdown triggered by {trigger}.")
await bot.logout()
await bot.close()
bot.aiosession.close()
def trim_message(message, limit):
if len(message) < limit - 4:
return message
return f"{message[:limit-4]}..."
async def empty_list(ctx, action):
message = await ctx.send(f"{Translator.translate('m_nobody', ctx, action=action)} {Emoji.get_chat_emoji('THINK')}")
await asyncio.sleep(3)
message2 = await ctx.send(f"{Translator.translate('m_nobody_2', ctx)} {Emoji.get_chat_emoji('WINK')}")
await asyncio.sleep(3)
await message.edit(content=Translator.translate('intimidation', ctx))
await message2.delete()
replacements = {
"`": "ˋ"
}
def replace_lookalikes(text):
for k, v in replacements.items():
text = text.replace(k, v)
return text
async def clean(text, guild:discord.Guild=None, markdown=True, links=True, emoji=True, lookalikes=True):
text = str(text)
if guild is not None:
# resolve user mentions
for uid in set(ID_MATCHER.findall(text)):
name = "@" + await username(int(uid), False, False)
text = text.replace(f"<@{uid}>", name)
text = text.replace(f"<@!{uid}>", name)
# resolve role mentions
for uid in set(ROLE_ID_MATCHER.findall(text)):
role = discord.utils.get(guild.roles, id=int(uid))
if role is None:
name = "@UNKNOWN ROLE"
else:
name = "@" + role.name
text = text.replace(f"<@&{uid}>", name)
# resolve channel names
for uid in set(CHANNEL_ID_MATCHER.findall(text)):
channel = guild.get_channel(uid)
if channel is None:
name = "#UNKNOWN CHANNEL"
else:
name = "#" + channel.name
text = text.replace(f"<@#{uid}>", name)
# re-assemble emoji so such a way that they don't turn into twermoji
urls = set(URL_MATCHER.findall(text))
if lookalikes:
text = replace_lookalikes(text)
if markdown:
text = escape_markdown(text)
else:
text = text.replace("@", "@\u200b").replace("**", "**").replace("``", "``")
if emoji:
for e in set(EMOJI_MATCHER.findall(text)):
a, b, c = zip(e)
text = text.replace(f"<{a[0]}:{b[0]}:{c[0]}>", f"<{a[0]}\\:{b[0]}\\:{c[0]}>")
if links:
#find urls last so the < escaping doesn't break it
for url in urls:
text = text.replace(escape_markdown(url), f"<{url}>")
return text
def escape_markdown(text):
text = str(text)
for c in ["\\", "*", "_", "~", "|", "{", ">"]:
text = text.replace(c, f"\\{c}")
return text.replace("@", "@\u200b")
def clean_name(text):
if text is None:
return None
return str(text).replace("@","@\u200b").replace("**", "*\u200b*").replace("``", "`\u200b`")
known_invalid_users = []
user_cache = OrderedDict()
async def username(uid, fetch=True, clean=True):
user = await get_user(uid, fetch)
if user is None:
return "UNKNOWN USER"
if clean:
return clean_user(user)
else:
return f"{user.name}#{user.discriminator}"
async def get_user(uid, fetch=True):
UserClass = namedtuple("UserClass", "name id discriminator bot avatar_url created_at is_avatar_animated mention")
user = BOT.get_user(uid)
if user is None:
if uid in known_invalid_users:
return None
if BOT.redis_pool is not None:
userCacheInfo = await BOT.redis_pool.hgetall(f"users:{uid}")
if len(userCacheInfo) == 8: # It existed in the Redis cache, check length cause sometimes somehow things are missing, somehow
userFormed = UserClass(
userCacheInfo["name"],
userCacheInfo["id"],
userCacheInfo["discriminator"],
userCacheInfo["bot"] == "1",
userCacheInfo["avatar_url"],
datetime.fromtimestamp(float(userCacheInfo["created_at"])),
bool(userCacheInfo["is_avatar_animated"]) == "1",
userCacheInfo["mention"]
)
return userFormed
if fetch:
try:
user = await BOT.fetch_user(uid)
pipeline = BOT.redis_pool.pipeline()
pipeline.hmset_dict(f"users:{uid}",
name = user.name,
id = user.id,
discriminator = user.discriminator,
bot = int(user.bot),
avatar_url = str(user.avatar_url),
created_at = user.created_at.timestamp(),
is_avatar_animated = int(user.is_avatar_animated()),
mention = user.mention
)
pipeline.expire(f"users:{uid}", 3000) # 5 minute cache life
BOT.loop.create_task(pipeline.execute())
except NotFound:
known_invalid_users.append(uid)
return None
else: # No Redis, using the dict method instead
if uid in user_cache:
return user_cache[uid]
if fetch:
try:
user = await BOT.fetch_user(uid)
if len(user_cache) >= 10: # Limit the cache size to the most recent 10
user_cache.popitem()
user_cache[uid] = user
except NotFound:
known_invalid_users.append(uid)
return None
return user
def clean_user(user):
if user is None:
return "UNKNOWN USER"
return f"{escape_markdown(user.name)}#{user.discriminator}"
def username_from_user(user):
if user is None:
return "UNKNOWN USER"
return user.name
def pad(text, length, char=' '):
return f"{text}{char * (length-len(text))}"
async def execute(command):
p = Popen(command, cwd=os.getcwd(), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while p.poll() is None:
await asyncio.sleep(1)
out, error = p.communicate()
return p.returncode, out.decode('utf-8').strip(), error.decode('utf-8').strip()
def find_key(data, wanted):
for k, v in data.items():
if v == wanted:
return k
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
async def get_commit():
_, out, __ = await execute('git rev-parse --short HEAD')
return out
def to_pretty_time(seconds, guild_id):
seconds = round(seconds)
partcount = 0
parts = {
'weeks': 60 * 60 * 24 * 7,
'days': 60 * 60 * 24,
'hours_solo': 60 * 60,
'minutes': 60,
'seconds': 1
}
duration = ""
if seconds == 0:
return Translator.translate("seconds", guild_id, amount=0)
for k, v in parts.items():
if seconds / v >= 1:
amount = math.floor(seconds / v)
seconds -= amount * v
if partcount == 1:
duration += ", "
duration += " " + Translator.translate(k, guild_id, amount=amount)
if seconds == 0:
break
return duration.strip()
def assemble_attachment(channel, aid, name):
return f"https://media.discordapp.net/attachments/{channel}/{aid}/{name}" | en | 0.774726 | # resolve user mentions # resolve role mentions # resolve channel names #{uid}>", name) # re-assemble emoji so such a way that they don't turn into twermoji #find urls last so the < escaping doesn't break it #{user.discriminator}" # It existed in the Redis cache, check length cause sometimes somehow things are missing, somehow # 5 minute cache life # No Redis, using the dict method instead # Limit the cache size to the most recent 10 #{user.discriminator}" | 2.236742 | 2 |
main.py | ICRC-BME/NoiseDetectionCNN | 1 | 6631227 | from noise_detector import noise_detector, mef3_channel_iterator
from timer import timer
import pymef
def example_0():
"""
Predict probabilities for categories: noise,pathology and physiology based on given 3s long data segment (15000 samples)
"""
# initialize detector instance
detector = noise_detector("./model/model_mayo_5khz")
# load mef3 file
session = './tests/test_signal.mefd'
password = '***'
info = pymef.read_ts_channel_basic_info(session_path=session,password=password)
test_data = pymef.read_ts_channels_sample(session_path=session,password=password,channel_map=[info[0]['name']],sample_map=[0,15000])
test_data = test_data[0].reshape(1,15000)
# predict probabilities for given data segment
yp = detector.predict(test_data)
return yp
def example_1():
"""
Predict probabilities for categories: noise,pathology and physiology for given channel
Predict single example per iteration (minibatch_size = 1). Does not need big GPU memory but exhibits significantly higher computing time
"""
# initialize detector instance
detector = noise_detector("./model/model_mayo_5khz")
# load mef3 file
session = './tests/test_signal.mefd'
password = '***'
info = pymef.read_ts_channel_basic_info(session_path=session, password=password)
# initialize channel iterator instance
mci = mef3_channel_iterator()
# pre-loads data into mci buffer
mci = mci.buffer(session=session,
password=password,
channel=[info[0]['name']],
sample_map=[0,info[0]['nsamp']])
# set buffer options
mci = mci.buffer_options(samples=15000, offset=5000, minibatch_size=1)
yp = list()
for k,data in enumerate(mci):
yp.extend(detector.predict(data))
return yp
def example_2():
"""
Predict probabilities for categories: noise,pathology and physiology for given channel
Predict multiple examples per iteration (minibatch_size > 1).
Depends on GPU memory and speed. In general, should be slightly faster. -> not significant
Do not use on CPU, it is slower then example_1.
"""
# initialize detector instance
detector = noise_detector("./model/model_mayo_5khz")
# load mef3 file
session = './tests/test_signal.mefd'
password = '***'
info = pymef.read_ts_channel_basic_info(session_path=session, password=password)
# initialize channel iterator instance
mci = mef3_channel_iterator()
# pre-loads data into mci buffer
mci = mci.buffer(session=session,
password=password,
channel=[info[0]['name']],
sample_map=[0,info[0]['nsamp']])
# set buffer options
mci = mci.buffer_options(samples=15000, offset=5000, minibatch_size=100)
yp = list()
for k,data in enumerate(mci):
yp.extend(detector.predict_minibatch(data))
return yp
if __name__ == "__main__":
with timer():
y0 = example_0()
with timer():
y1 = example_1()
with timer():
y2 = example_2()
| from noise_detector import noise_detector, mef3_channel_iterator
from timer import timer
import pymef
def example_0():
"""
Predict probabilities for categories: noise,pathology and physiology based on given 3s long data segment (15000 samples)
"""
# initialize detector instance
detector = noise_detector("./model/model_mayo_5khz")
# load mef3 file
session = './tests/test_signal.mefd'
password = '***'
info = pymef.read_ts_channel_basic_info(session_path=session,password=password)
test_data = pymef.read_ts_channels_sample(session_path=session,password=password,channel_map=[info[0]['name']],sample_map=[0,15000])
test_data = test_data[0].reshape(1,15000)
# predict probabilities for given data segment
yp = detector.predict(test_data)
return yp
def example_1():
"""
Predict probabilities for categories: noise,pathology and physiology for given channel
Predict single example per iteration (minibatch_size = 1). Does not need big GPU memory but exhibits significantly higher computing time
"""
# initialize detector instance
detector = noise_detector("./model/model_mayo_5khz")
# load mef3 file
session = './tests/test_signal.mefd'
password = '***'
info = pymef.read_ts_channel_basic_info(session_path=session, password=password)
# initialize channel iterator instance
mci = mef3_channel_iterator()
# pre-loads data into mci buffer
mci = mci.buffer(session=session,
password=password,
channel=[info[0]['name']],
sample_map=[0,info[0]['nsamp']])
# set buffer options
mci = mci.buffer_options(samples=15000, offset=5000, minibatch_size=1)
yp = list()
for k,data in enumerate(mci):
yp.extend(detector.predict(data))
return yp
def example_2():
"""
Predict probabilities for categories: noise,pathology and physiology for given channel
Predict multiple examples per iteration (minibatch_size > 1).
Depends on GPU memory and speed. In general, should be slightly faster. -> not significant
Do not use on CPU, it is slower then example_1.
"""
# initialize detector instance
detector = noise_detector("./model/model_mayo_5khz")
# load mef3 file
session = './tests/test_signal.mefd'
password = '***'
info = pymef.read_ts_channel_basic_info(session_path=session, password=password)
# initialize channel iterator instance
mci = mef3_channel_iterator()
# pre-loads data into mci buffer
mci = mci.buffer(session=session,
password=password,
channel=[info[0]['name']],
sample_map=[0,info[0]['nsamp']])
# set buffer options
mci = mci.buffer_options(samples=15000, offset=5000, minibatch_size=100)
yp = list()
for k,data in enumerate(mci):
yp.extend(detector.predict_minibatch(data))
return yp
if __name__ == "__main__":
with timer():
y0 = example_0()
with timer():
y1 = example_1()
with timer():
y2 = example_2()
| en | 0.741839 | Predict probabilities for categories: noise,pathology and physiology based on given 3s long data segment (15000 samples) # initialize detector instance # load mef3 file # predict probabilities for given data segment Predict probabilities for categories: noise,pathology and physiology for given channel Predict single example per iteration (minibatch_size = 1). Does not need big GPU memory but exhibits significantly higher computing time # initialize detector instance # load mef3 file # initialize channel iterator instance # pre-loads data into mci buffer # set buffer options Predict probabilities for categories: noise,pathology and physiology for given channel Predict multiple examples per iteration (minibatch_size > 1). Depends on GPU memory and speed. In general, should be slightly faster. -> not significant Do not use on CPU, it is slower then example_1. # initialize detector instance # load mef3 file # initialize channel iterator instance # pre-loads data into mci buffer # set buffer options | 2.345066 | 2 |
pexception/__init__.py | rchui/pexception | 1 | 6631228 | <reponame>rchui/pexception<filename>pexception/__init__.py
from .pexception import hook # noqa: disable
| from .pexception import hook # noqa: disable | en | 0.363427 | # noqa: disable | 1.089404 | 1 |
gym_puyopuyo/gym_puyopuyo/__init__.py | brnor/dipl | 12 | 6631229 | <reponame>brnor/dipl
from gym_puyopuyo.env import register # noqa: F401
| from gym_puyopuyo.env import register # noqa: F401 | uz | 0.465103 | # noqa: F401 | 1.039246 | 1 |
GCN/wsd_sent_embeddings.py | AakashSrinivasan03/GlossBert-GraphEmbeddings | 0 | 6631230 | <gh_stars>0
# coding=utf-8
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function
import argparse
from collections import OrderedDict
import csv
import logging
import os
import random
import sys
import pandas as pd
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from torch.nn import CrossEntropyLoss, MSELoss
from file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
from modeling import BertForSequenceClassification, BertConfig
from tokenization import BertTokenizer
from optimization import BertAdam, warmup_linear
import scipy.io
import re
from nltk.corpus import wordnet as wn
import scipy as sp
from nltk.corpus import wordnet as ewn
def sc2ss(sensekey):
'''Look up a synset given the information from SemCor'''
### Assuming it is the same WN version (e.g. 3.0)
return ewn.lemma_from_key(sensekey).synset()
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None,sense_key=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.sense_key = sense_key
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the test set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class WSD_sent_Processor(DataProcessor):
"""Processor for the WSD data set."""
def get_train_examples(self, data_dir):
"""See base class."""
train_data = pd.read_csv(data_dir, sep="\t", na_filter=False).values
return self._create_examples(train_data, "train")
def get_dev_examples(self, data_dir):
"""See base class."""
dev_data = pd.read_csv(data_dir, sep="\t", na_filter=False).values
return self._create_examples(dev_data, "dev")
def get_labels(self):
"""See base class."""
return ["0","1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines): ###
###if set_type == 'train' and i >=100: break ###############
###if set_type == 'dev' and i>=100: break ##################
guid = "%s-%s" % (set_type, i)
text_a = str(line[2])
text_b = str(line[3])
label = str(line[1])
##print(i,str(line[-1]))
###sense_key = sc2ss(str(line[-1]))
sense_key = str(line[-1])
if i%1000==0: ######1000
print(i)
print("guid=",guid)
print("text_a=",text_a)
print("text_b=",text_b)
print("label=",label)
print("sense_key",sense_key)
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label,sense_key=sense_key))
return examples
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(tqdm(examples)):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
choices=["WSD"],
help="The name of the task to train.")
parser.add_argument("--train_data_dir",
default=None,
type=str,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--eval_data_dir",
default=None,
type=str,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the embeddings are written")
parser.add_argument("--file_name",
default=None,
type=str,
required=True,
help="The output file where the embeddings are written")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help='''a path or url to a pretrained model archive containing:
'bert_config.json' a configuration file for the model
'pytorch_model.bin' a PyTorch dump of a BertForPreTraining instance''')
## Other parameters
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test",
action='store_true',
help="Whether to run test on the test set.")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--train_batch_size",
default=128,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=128,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=1.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
args = parser.parse_args()
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
assert args.train_data_dir != None, "train_data_dir can not be None"
if args.do_eval:
assert args.eval_data_dir != None, "eval_data_dir can not be None"
os.makedirs(args.output_dir, exist_ok=True)
# prepare dataloaders
processors = {
"WSD":WSD_sent_Processor
}
output_modes = {
"WSD": "classification"
}
processor = processors[args.task_name]()
output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
# training set
train_examples = None
num_train_optimization_steps = None
train_examples = processor.get_train_examples(args.train_data_dir)
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank))
model = BertForSequenceClassification.from_pretrained(args.bert_model,
cache_dir=cache_dir,
num_labels=num_labels)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
# load data
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer, output_mode)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
#if args.local_rank == -1:
# train_sampler = RandomSampler(train_data)
#else:
# train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, batch_size=args.train_batch_size,shuffle=False)
model.eval()
N = len(train_examples)
contextualized_embeddings = np.zeros((N,768))
labels = np.zeros(N)
synsets = np.array([t.sense_key for t in train_examples])
l = 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
sentence_embeddings = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=None).cpu()
h = l + sentence_embeddings.shape[0]
contextualized_embeddings[l:h] = sentence_embeddings
labels[l:h] = label_ids.cpu()
l = h
print(contextualized_embeddings.shape)
print(labels.shape)
d = {'embeddings':contextualized_embeddings,'labels':labels,'synsets':synsets}
np.save(os.path.join(args.output_dir,args.file_name), d)
if __name__ == "__main__":
main()
| # coding=utf-8
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function
import argparse
from collections import OrderedDict
import csv
import logging
import os
import random
import sys
import pandas as pd
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from torch.nn import CrossEntropyLoss, MSELoss
from file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
from modeling import BertForSequenceClassification, BertConfig
from tokenization import BertTokenizer
from optimization import BertAdam, warmup_linear
import scipy.io
import re
from nltk.corpus import wordnet as wn
import scipy as sp
from nltk.corpus import wordnet as ewn
def sc2ss(sensekey):
'''Look up a synset given the information from SemCor'''
### Assuming it is the same WN version (e.g. 3.0)
return ewn.lemma_from_key(sensekey).synset()
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None,sense_key=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.sense_key = sense_key
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the test set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class WSD_sent_Processor(DataProcessor):
"""Processor for the WSD data set."""
def get_train_examples(self, data_dir):
"""See base class."""
train_data = pd.read_csv(data_dir, sep="\t", na_filter=False).values
return self._create_examples(train_data, "train")
def get_dev_examples(self, data_dir):
"""See base class."""
dev_data = pd.read_csv(data_dir, sep="\t", na_filter=False).values
return self._create_examples(dev_data, "dev")
def get_labels(self):
"""See base class."""
return ["0","1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines): ###
###if set_type == 'train' and i >=100: break ###############
###if set_type == 'dev' and i>=100: break ##################
guid = "%s-%s" % (set_type, i)
text_a = str(line[2])
text_b = str(line[3])
label = str(line[1])
##print(i,str(line[-1]))
###sense_key = sc2ss(str(line[-1]))
sense_key = str(line[-1])
if i%1000==0: ######1000
print(i)
print("guid=",guid)
print("text_a=",text_a)
print("text_b=",text_b)
print("label=",label)
print("sense_key",sense_key)
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label,sense_key=sense_key))
return examples
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(tqdm(examples)):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
choices=["WSD"],
help="The name of the task to train.")
parser.add_argument("--train_data_dir",
default=None,
type=str,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--eval_data_dir",
default=None,
type=str,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the embeddings are written")
parser.add_argument("--file_name",
default=None,
type=str,
required=True,
help="The output file where the embeddings are written")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help='''a path or url to a pretrained model archive containing:
'bert_config.json' a configuration file for the model
'pytorch_model.bin' a PyTorch dump of a BertForPreTraining instance''')
## Other parameters
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test",
action='store_true',
help="Whether to run test on the test set.")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--train_batch_size",
default=128,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=128,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=1.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
args = parser.parse_args()
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
assert args.train_data_dir != None, "train_data_dir can not be None"
if args.do_eval:
assert args.eval_data_dir != None, "eval_data_dir can not be None"
os.makedirs(args.output_dir, exist_ok=True)
# prepare dataloaders
processors = {
"WSD":WSD_sent_Processor
}
output_modes = {
"WSD": "classification"
}
processor = processors[args.task_name]()
output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
# training set
train_examples = None
num_train_optimization_steps = None
train_examples = processor.get_train_examples(args.train_data_dir)
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank))
model = BertForSequenceClassification.from_pretrained(args.bert_model,
cache_dir=cache_dir,
num_labels=num_labels)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
# load data
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer, output_mode)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
#if args.local_rank == -1:
# train_sampler = RandomSampler(train_data)
#else:
# train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, batch_size=args.train_batch_size,shuffle=False)
model.eval()
N = len(train_examples)
contextualized_embeddings = np.zeros((N,768))
labels = np.zeros(N)
synsets = np.array([t.sense_key for t in train_examples])
l = 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
sentence_embeddings = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=None).cpu()
h = l + sentence_embeddings.shape[0]
contextualized_embeddings[l:h] = sentence_embeddings
labels[l:h] = label_ids.cpu()
l = h
print(contextualized_embeddings.shape)
print(labels.shape)
d = {'embeddings':contextualized_embeddings,'labels':labels,'synsets':synsets}
np.save(os.path.join(args.output_dir,args.file_name), d)
if __name__ == "__main__":
main() | en | 0.809242 | # coding=utf-8 BERT finetuning runner. Look up a synset given the information from SemCor ### Assuming it is the same WN version (e.g. 3.0) A single training/test example for simple sequence classification. Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. Base class for data converters for sequence classification data sets. Gets a collection of `InputExample`s for the train set. Gets a collection of `InputExample`s for the dev set. Gets a collection of `InputExample`s for the test set. Gets the list of labels for this data set. Reads a tab separated value file. Processor for the WSD data set. See base class. See base class. See base class. Creates examples for the training and dev sets. ### ###if set_type == 'train' and i >=100: break ############### ###if set_type == 'dev' and i>=100: break ################## ##print(i,str(line[-1])) ###sense_key = sc2ss(str(line[-1])) ######1000 A single set of features of data. Loads a data file into a list of `InputBatch`s. # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" # Account for [CLS] and [SEP] with "- 2" # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. # Zero-pad up to the sequence length. Truncates a sequence pair in place to the maximum length. # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. ## Required parameters a path or url to a pretrained model archive containing: 'bert_config.json' a configuration file for the model 'pytorch_model.bin' a PyTorch dump of a BertForPreTraining instance ## Other parameters # Initializes the distributed backend which will take care of sychronizing nodes/GPUs # prepare dataloaders # training set # Prepare model # Prepare optimizer # load data #if args.local_rank == -1: # train_sampler = RandomSampler(train_data) #else: # train_sampler = DistributedSampler(train_data) | 2.331173 | 2 |
cli/backend_cloud_formation.py | cprecup/pnda-cli | 3 | 6631231 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Cisco and/or its affiliates.
# This software is licensed to you under the terms of the Apache License, Version 2.0
# (the "License").
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
# The code, technical concepts, and all information contained herein, are the property of
# Cisco Technology, Inc.and/or its affiliated entities, under various laws including copyright,
# international treaties, patent, and/or contract.
# Any use of the material herein must be in accordance with the terms of the License.
# All rights not expressly granted by the License are reserved.
# Unless required by applicable law or agreed to separately in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied.
#
# Purpose: Backend implementation for creating PNDA on Amazon Web Services EC2
import sys
import json
import time
import traceback
import ssl
import boto.cloudformation
import boto.ec2
from backend_base import BaseBackend
import pnda_cli_utils as utils
utils.init_logging()
CONSOLE = utils.CONSOLE_LOGGER
LOG = utils.FILE_LOGGER
LOG_FILE_NAME = utils.LOG_FILE_NAME
class CloudFormationBackend(BaseBackend):
'''
Deployment specific implementation for AWS Cloud Formation
'''
def __init__(self, pnda_env, cluster, no_config_check, flavor, keyname, branch, dry_run):
self._dry_run = dry_run
self._exclude_cfn_params = ['AWS_SECRET_ACCESS_KEY', 'AWS_AVAILABILITY_ZONE', 'AWS_REGION', 'AWS_ACCESS_KEY_ID']
super(CloudFormationBackend, self).__init__(
pnda_env, cluster, no_config_check, flavor, self._keyfile_from_keyname(keyname), branch)
def check_target_specific_config(self):
'''
Check AWS specific configuration has been entered correctly
'''
self._check_aws_connection()
name = self._keyname_from_keyfile(self._keyfile)
self._check_keypair(name)
def load_node_config(self):
'''
Load a node config descriptor from a config.json file in the cloud-formation flavor specific directory
'''
node_config_file = file('cloud-formation/%s/config.json' % self._flavor)
config = json.load(node_config_file)
node_config_file.close()
return config
def fill_instance_map(self):
'''
Use the AWS Ec2 API to generate a list of the target instances
'''
CONSOLE.debug('Checking details of created instances')
region = self._pnda_env['aws_parameters']['AWS_REGION']
ec2 = boto.ec2.connect_to_region(region)
reservations = self._retry(ec2.get_all_reservations)
instance_map = {}
for reservation in reservations:
for instance in reservation.instances:
if 'pnda_cluster' in instance.tags and instance.tags['pnda_cluster'] == self._cluster and instance.state == 'running':
CONSOLE.debug(instance.private_ip_address + ' ' + instance.tags['Name'])
instance_map[instance.tags['Name']] = {
"bootstrapped": False,
"public_dns": instance.public_dns_name,
"ip_address": instance.ip_address,
"private_ip_address":instance.private_ip_address,
"name": instance.tags['Name'],
"node_idx": instance.tags['node_idx'],
"node_type": instance.tags['node_type']
}
return instance_map
def pre_install_pnda(self, node_counts):
'''
Use the AWS Cloud Formation API to launch a stack that PNDA can be installed on
The cloud formation stack is defined in json files in the flavor specific cloud-formation directory
'''
template_data = self._generate_template_file(
self._flavor, node_counts['datanodes'], node_counts['opentsdb_nodes'], node_counts['kafka_nodes'], node_counts['zk_nodes'])
region = self._pnda_env['aws_parameters']['AWS_REGION']
aws_availability_zone = self._pnda_env['aws_parameters']['AWS_AVAILABILITY_ZONE']
cf_parameters = [('keyName', self._keyname_from_keyfile(self._keyfile)), ('pndaCluster', self._cluster), ('awsAvailabilityZone', aws_availability_zone)]
for parameter in self._pnda_env['aws_parameters']:
if parameter not in self._exclude_cfn_params:
cf_parameters.append((parameter, self._pnda_env['aws_parameters'][parameter]))
self._save_cf_resources('create_%s' % utils.MILLI_TIME(), self._cluster, cf_parameters, template_data)
if self._dry_run:
CONSOLE.info('Dry run mode completed')
sys.exit(0)
CONSOLE.info('Creating Cloud Formation stack')
conn = boto.cloudformation.connect_to_region(region)
stack_status = 'CREATING'
stack_status_new = None
conn.create_stack(self._cluster,
template_body=template_data,
parameters=cf_parameters)
while stack_status in ['CREATE_IN_PROGRESS', 'CREATING']:
time.sleep(5)
if stack_status != stack_status_new:
if stack_status_new is not None:
stack_status = stack_status_new
CONSOLE.info('Stack is: %s', stack_status)
else:
CONSOLE.debug('Stack is: %s', stack_status)
stacks = self._retry(conn.describe_stacks, self._cluster)
if stacks:
stack_status_new = stacks[0].stack_status
if stack_status != 'CREATE_COMPLETE':
CONSOLE.error('Stack did not come up, status is: %s', stack_status)
self._fetch_stack_events(conn, self._cluster)
sys.exit(1)
self.clear_instance_map_cache()
def pre_expand_pnda(self, node_counts):
'''
Use the AWS Cloud Formation API to launch a stack that PNDA can be installed on
The cloud formation stack is defined in json files in the flavor specific cloud-formation directory
'''
template_data = self._generate_template_file(
self._flavor, node_counts['datanodes'], node_counts['opentsdb_nodes'], node_counts['kafka_nodes'], node_counts['zk_nodes'])
region = self._pnda_env['aws_parameters']['AWS_REGION']
cf_parameters = [('keyName', self._keyname_from_keyfile(self._keyfile)), ('pndaCluster', self._cluster)]
for parameter in self._pnda_env['aws_parameters']:
if parameter not in self._exclude_cfn_params:
cf_parameters.append((parameter, self._pnda_env['aws_parameters'][parameter]))
self._save_cf_resources('expand_%s' % utils.MILLI_TIME(), self._cluster, cf_parameters, template_data)
if self._dry_run:
CONSOLE.info('Dry run mode completed')
sys.exit(0)
CONSOLE.info('Updating Cloud Formation stack')
conn = boto.cloudformation.connect_to_region(region)
stack_status = 'UPDATING'
stack_status_new = None
self._retry(conn.update_stack, self._cluster,
template_body=template_data,
parameters=cf_parameters)
while stack_status in ['UPDATE_IN_PROGRESS', 'UPDATING', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS']:
time.sleep(5)
if stack_status != stack_status_new:
if stack_status_new is not None:
stack_status = stack_status_new
CONSOLE.info('Stack is: %s', stack_status)
else:
CONSOLE.debug('Stack is: %s', stack_status)
stacks = self._retry(conn.describe_stacks, self._cluster)
if stacks:
stack_status_new = stacks[0].stack_status
if stack_status != 'UPDATE_COMPLETE':
CONSOLE.error('Stack did not come up, status is: %s', stack_status)
self._fetch_stack_events(conn, self._cluster)
sys.exit(1)
self.clear_instance_map_cache()
def pre_destroy_pnda(self):
'''
Use the AWS Cloud Formation API to delete the cloud formation stack that PNDA was installed on
'''
CONSOLE.info('Deleting Cloud Formation stack')
region = self._pnda_env['aws_parameters']['AWS_REGION']
conn = boto.cloudformation.connect_to_region(region)
stack_status = 'DELETING'
stack_status_new = None
self._retry(conn.delete_stack, self._cluster)
while stack_status in ['DELETE_IN_PROGRESS', 'DELETING']:
time.sleep(5)
if stack_status != stack_status_new:
if stack_status_new is not None:
stack_status = stack_status_new
CONSOLE.info('Stack is: %s', stack_status)
else:
CONSOLE.debug('Stack is: %s', stack_status)
try:
stacks = self._retry(conn.describe_stacks, self._cluster)
except:
stacks = []
if stacks:
stack_status_new = stacks[0].stack_status
else:
stack_status_new = 'DELETE_COMPLETE'
def _retry(self, do_func, *args, **kwargs):
ret = None
for _ in xrange(3):
try:
ret = do_func(*args, **kwargs)
break
except ssl.SSLError, exception:
LOG.warning(exception)
return ret
def _fetch_stack_events(self, cfn_cnxn, stack_name):
page_token = True
while page_token is not None:
event_page = cfn_cnxn.describe_stack_events(stack_name, page_token)
for event in event_page:
resource_id = event.logical_resource_id
status = event.resource_status
reason = event.resource_status_reason
message = "%s: %s%s" % (resource_id, status, '' if reason is None else ' - %s' % reason)
if status in ['CREATE_FAILED', 'UPDATE_FAILED'] and reason != 'Resource creation cancelled':
CONSOLE.error(message)
else:
LOG.debug(message)
page_token = event_page.next_token
def _save_cf_resources(self, context, cluster_name, params, template):
params_file = 'cli/logs/%s_%s_cloud-formation-parameters.json' % (cluster_name, context)
CONSOLE.info('Writing Cloud Formation parameters for %s to %s', cluster_name, params_file)
with open(params_file, 'w') as outfile:
json.dump(params, outfile, sort_keys=True, indent=4)
template_file = 'cli/logs/%s_%s_cloud-formation-template.json' % (cluster_name, context)
CONSOLE.info('Writing Cloud Formation template for %s to %s', cluster_name, template_file)
with open(template_file, 'w') as outfile:
json.dump(json.loads(template), outfile, sort_keys=True, indent=4)
def _generate_instance_templates(self, template_data, instance_name, instance_count):
if instance_name in template_data['Resources']:
instance_def = json.dumps(template_data['Resources'].pop(instance_name))
for instance_index in range(0, instance_count):
instance_def_n = instance_def.replace('$node_idx$', str(instance_index))
template_data['Resources']['%s%s' % (instance_name, instance_index)] = json.loads(instance_def_n)
def _generate_template_file(self, flavor, datanodes, opentsdbs, kafkas, zookeepers):
common_filepath = 'cloud-formation/cf-common.json'
with open(common_filepath, 'r') as template_file:
template_data = json.loads(template_file.read())
flavor_filepath = 'cloud-formation/%s/cf-flavor.json' % flavor
with open(flavor_filepath, 'r') as template_file:
flavor_data = json.loads(template_file.read())
for element in flavor_data:
if element not in template_data:
template_data[element] = flavor_data[element]
else:
for child in flavor_data[element]:
template_data[element][child] = flavor_data[element][child]
self._generate_instance_templates(template_data, 'instanceCdhDn', datanodes)
self._generate_instance_templates(template_data, 'instanceOpenTsdb', opentsdbs)
self._generate_instance_templates(template_data, 'instanceKafka', kafkas)
self._generate_instance_templates(template_data, 'instanceZookeeper', zookeepers)
return json.dumps(template_data)
def _check_keypair(self, keyname):
try:
region = self._pnda_env['aws_parameters']['AWS_REGION']
ec2 = boto.ec2.connect_to_region(region)
stored_key = ec2.get_key_pair(keyname)
if stored_key is None:
raise Exception("Key not found %s" % keyname)
CONSOLE.info('Keyfile.......... OK')
except:
CONSOLE.info('Keyfile.......... ERROR')
CONSOLE.error('Failed to find key %s in ec2.', keyname)
CONSOLE.error(traceback.format_exc())
sys.exit(1)
def _check_aws_connection(self):
region = self._pnda_env['aws_parameters']['AWS_REGION']
valid_regions = [valid_region.name for valid_region in boto.ec2.regions()]
if region not in valid_regions:
CONSOLE.info('AWS connection... ERROR')
CONSOLE.error('Failed to connect to cloud formation API, ec2 region "%s" was not valid. Valid options are %s', region, json.dumps(valid_regions))
sys.exit(1)
conn = boto.cloudformation.connect_to_region(region)
if conn is None:
CONSOLE.info('AWS connection... ERROR')
CONSOLE.error('Failed to connect to cloud formation API, verify aws_parameters settings in "pnda_env.yaml" and try again.')
sys.exit(1)
try:
conn.list_stacks()
CONSOLE.info('AWS connection... OK')
except:
CONSOLE.info('AWS connection... ERROR')
CONSOLE.error('Failed to query cloud formation API, verify aws_parameters settings in "pnda_env.yaml" and try again.')
CONSOLE.error(traceback.format_exc())
sys.exit(1)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Cisco and/or its affiliates.
# This software is licensed to you under the terms of the Apache License, Version 2.0
# (the "License").
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
# The code, technical concepts, and all information contained herein, are the property of
# Cisco Technology, Inc.and/or its affiliated entities, under various laws including copyright,
# international treaties, patent, and/or contract.
# Any use of the material herein must be in accordance with the terms of the License.
# All rights not expressly granted by the License are reserved.
# Unless required by applicable law or agreed to separately in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied.
#
# Purpose: Backend implementation for creating PNDA on Amazon Web Services EC2
import sys
import json
import time
import traceback
import ssl
import boto.cloudformation
import boto.ec2
from backend_base import BaseBackend
import pnda_cli_utils as utils
utils.init_logging()
CONSOLE = utils.CONSOLE_LOGGER
LOG = utils.FILE_LOGGER
LOG_FILE_NAME = utils.LOG_FILE_NAME
class CloudFormationBackend(BaseBackend):
'''
Deployment specific implementation for AWS Cloud Formation
'''
def __init__(self, pnda_env, cluster, no_config_check, flavor, keyname, branch, dry_run):
self._dry_run = dry_run
self._exclude_cfn_params = ['AWS_SECRET_ACCESS_KEY', 'AWS_AVAILABILITY_ZONE', 'AWS_REGION', 'AWS_ACCESS_KEY_ID']
super(CloudFormationBackend, self).__init__(
pnda_env, cluster, no_config_check, flavor, self._keyfile_from_keyname(keyname), branch)
def check_target_specific_config(self):
'''
Check AWS specific configuration has been entered correctly
'''
self._check_aws_connection()
name = self._keyname_from_keyfile(self._keyfile)
self._check_keypair(name)
def load_node_config(self):
'''
Load a node config descriptor from a config.json file in the cloud-formation flavor specific directory
'''
node_config_file = file('cloud-formation/%s/config.json' % self._flavor)
config = json.load(node_config_file)
node_config_file.close()
return config
def fill_instance_map(self):
'''
Use the AWS Ec2 API to generate a list of the target instances
'''
CONSOLE.debug('Checking details of created instances')
region = self._pnda_env['aws_parameters']['AWS_REGION']
ec2 = boto.ec2.connect_to_region(region)
reservations = self._retry(ec2.get_all_reservations)
instance_map = {}
for reservation in reservations:
for instance in reservation.instances:
if 'pnda_cluster' in instance.tags and instance.tags['pnda_cluster'] == self._cluster and instance.state == 'running':
CONSOLE.debug(instance.private_ip_address + ' ' + instance.tags['Name'])
instance_map[instance.tags['Name']] = {
"bootstrapped": False,
"public_dns": instance.public_dns_name,
"ip_address": instance.ip_address,
"private_ip_address":instance.private_ip_address,
"name": instance.tags['Name'],
"node_idx": instance.tags['node_idx'],
"node_type": instance.tags['node_type']
}
return instance_map
def pre_install_pnda(self, node_counts):
'''
Use the AWS Cloud Formation API to launch a stack that PNDA can be installed on
The cloud formation stack is defined in json files in the flavor specific cloud-formation directory
'''
template_data = self._generate_template_file(
self._flavor, node_counts['datanodes'], node_counts['opentsdb_nodes'], node_counts['kafka_nodes'], node_counts['zk_nodes'])
region = self._pnda_env['aws_parameters']['AWS_REGION']
aws_availability_zone = self._pnda_env['aws_parameters']['AWS_AVAILABILITY_ZONE']
cf_parameters = [('keyName', self._keyname_from_keyfile(self._keyfile)), ('pndaCluster', self._cluster), ('awsAvailabilityZone', aws_availability_zone)]
for parameter in self._pnda_env['aws_parameters']:
if parameter not in self._exclude_cfn_params:
cf_parameters.append((parameter, self._pnda_env['aws_parameters'][parameter]))
self._save_cf_resources('create_%s' % utils.MILLI_TIME(), self._cluster, cf_parameters, template_data)
if self._dry_run:
CONSOLE.info('Dry run mode completed')
sys.exit(0)
CONSOLE.info('Creating Cloud Formation stack')
conn = boto.cloudformation.connect_to_region(region)
stack_status = 'CREATING'
stack_status_new = None
conn.create_stack(self._cluster,
template_body=template_data,
parameters=cf_parameters)
while stack_status in ['CREATE_IN_PROGRESS', 'CREATING']:
time.sleep(5)
if stack_status != stack_status_new:
if stack_status_new is not None:
stack_status = stack_status_new
CONSOLE.info('Stack is: %s', stack_status)
else:
CONSOLE.debug('Stack is: %s', stack_status)
stacks = self._retry(conn.describe_stacks, self._cluster)
if stacks:
stack_status_new = stacks[0].stack_status
if stack_status != 'CREATE_COMPLETE':
CONSOLE.error('Stack did not come up, status is: %s', stack_status)
self._fetch_stack_events(conn, self._cluster)
sys.exit(1)
self.clear_instance_map_cache()
def pre_expand_pnda(self, node_counts):
'''
Use the AWS Cloud Formation API to launch a stack that PNDA can be installed on
The cloud formation stack is defined in json files in the flavor specific cloud-formation directory
'''
template_data = self._generate_template_file(
self._flavor, node_counts['datanodes'], node_counts['opentsdb_nodes'], node_counts['kafka_nodes'], node_counts['zk_nodes'])
region = self._pnda_env['aws_parameters']['AWS_REGION']
cf_parameters = [('keyName', self._keyname_from_keyfile(self._keyfile)), ('pndaCluster', self._cluster)]
for parameter in self._pnda_env['aws_parameters']:
if parameter not in self._exclude_cfn_params:
cf_parameters.append((parameter, self._pnda_env['aws_parameters'][parameter]))
self._save_cf_resources('expand_%s' % utils.MILLI_TIME(), self._cluster, cf_parameters, template_data)
if self._dry_run:
CONSOLE.info('Dry run mode completed')
sys.exit(0)
CONSOLE.info('Updating Cloud Formation stack')
conn = boto.cloudformation.connect_to_region(region)
stack_status = 'UPDATING'
stack_status_new = None
self._retry(conn.update_stack, self._cluster,
template_body=template_data,
parameters=cf_parameters)
while stack_status in ['UPDATE_IN_PROGRESS', 'UPDATING', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS']:
time.sleep(5)
if stack_status != stack_status_new:
if stack_status_new is not None:
stack_status = stack_status_new
CONSOLE.info('Stack is: %s', stack_status)
else:
CONSOLE.debug('Stack is: %s', stack_status)
stacks = self._retry(conn.describe_stacks, self._cluster)
if stacks:
stack_status_new = stacks[0].stack_status
if stack_status != 'UPDATE_COMPLETE':
CONSOLE.error('Stack did not come up, status is: %s', stack_status)
self._fetch_stack_events(conn, self._cluster)
sys.exit(1)
self.clear_instance_map_cache()
def pre_destroy_pnda(self):
'''
Use the AWS Cloud Formation API to delete the cloud formation stack that PNDA was installed on
'''
CONSOLE.info('Deleting Cloud Formation stack')
region = self._pnda_env['aws_parameters']['AWS_REGION']
conn = boto.cloudformation.connect_to_region(region)
stack_status = 'DELETING'
stack_status_new = None
self._retry(conn.delete_stack, self._cluster)
while stack_status in ['DELETE_IN_PROGRESS', 'DELETING']:
time.sleep(5)
if stack_status != stack_status_new:
if stack_status_new is not None:
stack_status = stack_status_new
CONSOLE.info('Stack is: %s', stack_status)
else:
CONSOLE.debug('Stack is: %s', stack_status)
try:
stacks = self._retry(conn.describe_stacks, self._cluster)
except:
stacks = []
if stacks:
stack_status_new = stacks[0].stack_status
else:
stack_status_new = 'DELETE_COMPLETE'
def _retry(self, do_func, *args, **kwargs):
ret = None
for _ in xrange(3):
try:
ret = do_func(*args, **kwargs)
break
except ssl.SSLError, exception:
LOG.warning(exception)
return ret
def _fetch_stack_events(self, cfn_cnxn, stack_name):
page_token = True
while page_token is not None:
event_page = cfn_cnxn.describe_stack_events(stack_name, page_token)
for event in event_page:
resource_id = event.logical_resource_id
status = event.resource_status
reason = event.resource_status_reason
message = "%s: %s%s" % (resource_id, status, '' if reason is None else ' - %s' % reason)
if status in ['CREATE_FAILED', 'UPDATE_FAILED'] and reason != 'Resource creation cancelled':
CONSOLE.error(message)
else:
LOG.debug(message)
page_token = event_page.next_token
def _save_cf_resources(self, context, cluster_name, params, template):
params_file = 'cli/logs/%s_%s_cloud-formation-parameters.json' % (cluster_name, context)
CONSOLE.info('Writing Cloud Formation parameters for %s to %s', cluster_name, params_file)
with open(params_file, 'w') as outfile:
json.dump(params, outfile, sort_keys=True, indent=4)
template_file = 'cli/logs/%s_%s_cloud-formation-template.json' % (cluster_name, context)
CONSOLE.info('Writing Cloud Formation template for %s to %s', cluster_name, template_file)
with open(template_file, 'w') as outfile:
json.dump(json.loads(template), outfile, sort_keys=True, indent=4)
def _generate_instance_templates(self, template_data, instance_name, instance_count):
if instance_name in template_data['Resources']:
instance_def = json.dumps(template_data['Resources'].pop(instance_name))
for instance_index in range(0, instance_count):
instance_def_n = instance_def.replace('$node_idx$', str(instance_index))
template_data['Resources']['%s%s' % (instance_name, instance_index)] = json.loads(instance_def_n)
def _generate_template_file(self, flavor, datanodes, opentsdbs, kafkas, zookeepers):
common_filepath = 'cloud-formation/cf-common.json'
with open(common_filepath, 'r') as template_file:
template_data = json.loads(template_file.read())
flavor_filepath = 'cloud-formation/%s/cf-flavor.json' % flavor
with open(flavor_filepath, 'r') as template_file:
flavor_data = json.loads(template_file.read())
for element in flavor_data:
if element not in template_data:
template_data[element] = flavor_data[element]
else:
for child in flavor_data[element]:
template_data[element][child] = flavor_data[element][child]
self._generate_instance_templates(template_data, 'instanceCdhDn', datanodes)
self._generate_instance_templates(template_data, 'instanceOpenTsdb', opentsdbs)
self._generate_instance_templates(template_data, 'instanceKafka', kafkas)
self._generate_instance_templates(template_data, 'instanceZookeeper', zookeepers)
return json.dumps(template_data)
def _check_keypair(self, keyname):
try:
region = self._pnda_env['aws_parameters']['AWS_REGION']
ec2 = boto.ec2.connect_to_region(region)
stored_key = ec2.get_key_pair(keyname)
if stored_key is None:
raise Exception("Key not found %s" % keyname)
CONSOLE.info('Keyfile.......... OK')
except:
CONSOLE.info('Keyfile.......... ERROR')
CONSOLE.error('Failed to find key %s in ec2.', keyname)
CONSOLE.error(traceback.format_exc())
sys.exit(1)
def _check_aws_connection(self):
region = self._pnda_env['aws_parameters']['AWS_REGION']
valid_regions = [valid_region.name for valid_region in boto.ec2.regions()]
if region not in valid_regions:
CONSOLE.info('AWS connection... ERROR')
CONSOLE.error('Failed to connect to cloud formation API, ec2 region "%s" was not valid. Valid options are %s', region, json.dumps(valid_regions))
sys.exit(1)
conn = boto.cloudformation.connect_to_region(region)
if conn is None:
CONSOLE.info('AWS connection... ERROR')
CONSOLE.error('Failed to connect to cloud formation API, verify aws_parameters settings in "pnda_env.yaml" and try again.')
sys.exit(1)
try:
conn.list_stacks()
CONSOLE.info('AWS connection... OK')
except:
CONSOLE.info('AWS connection... ERROR')
CONSOLE.error('Failed to query cloud formation API, verify aws_parameters settings in "pnda_env.yaml" and try again.')
CONSOLE.error(traceback.format_exc())
sys.exit(1) | en | 0.866943 | #!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2016 Cisco and/or its affiliates. # This software is licensed to you under the terms of the Apache License, Version 2.0 # (the "License"). # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # The code, technical concepts, and all information contained herein, are the property of # Cisco Technology, Inc.and/or its affiliated entities, under various laws including copyright, # international treaties, patent, and/or contract. # Any use of the material herein must be in accordance with the terms of the License. # All rights not expressly granted by the License are reserved. # Unless required by applicable law or agreed to separately in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. # # Purpose: Backend implementation for creating PNDA on Amazon Web Services EC2 Deployment specific implementation for AWS Cloud Formation Check AWS specific configuration has been entered correctly Load a node config descriptor from a config.json file in the cloud-formation flavor specific directory Use the AWS Ec2 API to generate a list of the target instances Use the AWS Cloud Formation API to launch a stack that PNDA can be installed on The cloud formation stack is defined in json files in the flavor specific cloud-formation directory Use the AWS Cloud Formation API to launch a stack that PNDA can be installed on The cloud formation stack is defined in json files in the flavor specific cloud-formation directory Use the AWS Cloud Formation API to delete the cloud formation stack that PNDA was installed on | 1.897178 | 2 |
Apps/Recommendations/Python/checkColdRecomFeatureMatch.py | jfindlay/Azure-MachineLearning-DataScience | 390 | 6631232 | #
# In this script we check the returned scoring items when the seed item is cold
# In terms of checking, we check if there is any features with the same value.
# In this version, only one seed item is supported.
# list of input files:
# 1. catalog file
# 2. trainining file
# 3. seed file
# 4. the scoring file using cold item support
# Also some file format parameters are provided.
# Another important parameter: cold_upper_bound
# It specifies the largest number of occurrences in
# training is still considered as C2 cold item. If the
# occurrence is C2+1, then it is considered as warm.
#========== Parameter for PT dataset =========
f_prefix = 'PT3'
f_catalog = 'catalog.csv'
f_train = 'train-sorted.csv'
f_seed = 'seed_as_train.csv'
f_recom = 'scores-sar-cold_reversed.tsv'
f_output = 'list_of_recom_no_feature_match.csv'
f_catalog_header = True
f_seed_header = False
f_seed_sep = ','
f_recom_sep = '\t'
f_recom_beginning_comment = True
cold_upper_bound = 2
#========== Parameter for PT dataset =========
# update file names based on f_prefix. Users need to change them
# accordingly based on your own file organization.
f_train = f_prefix + '/' + f_train
f_catalog = f_prefix + '/' + f_catalog
f_seed = f_prefix + '/' + f_seed
f_recom = f_prefix + '/data/' + f_recom
f_output = f_prefix + '/data/' + f_output
#=============================================================================
# The rest should be be changed in running for different datasets.
# Read the catalog file
print('Read the catalog file')
fin_catalog = open(f_catalog)
line = fin_catalog.readline()
D_catalog = {}
if f_catalog_header:
# extract feature name
fnames = line.strip().split(',')[2:]
line = fin_catalog.readline()
else:
# use default feature name
f_num = len(line.strip().split(',')) - 2
fnames = ['f_' + str(i) for i in range(f_num)]
while line:
fs = line.strip().split(',')
itemId = fs[0]
if itemId not in D_catalog:
D_catalog[itemId] = {}
# We need to save all feature values for the current item
fs_feature = fs[2:]
fs_feature_mvalue = [v.strip().strip('"').split(';') for v in fs_feature]
for fi in range(len(fs_feature_mvalue)):
if len(fs_feature_mvalue[fi])==1 and len(fs_feature_mvalue[fi][0])==0:
# This is an empty feature value
pass
else:
# We process non-empty feature value only
fi_value_list = fs_feature_mvalue[fi]
D_catalog[itemId][fi] = {}
for fv in fi_value_list:
D_catalog[itemId][fi][fv] = 1
line = fin_catalog.readline()
fin_catalog.close()
# Read the training file
print('Read the training file')
fin_train = open(f_train)
line = fin_train.readline()
D_item_user = {}
while line:
fs = line.strip().split(',')
userId = fs[0]
itemId = fs[1]
if itemId not in D_item_user:
D_item_user[itemId] = {}
D_item_user[itemId][userId] = 1
line = fin_train.readline()
fin_train.close()
# Read the seed file
print('Read the seed file')
fin_seed = open(f_seed)
D_seed = {}
D_item_type = {}
line = fin_seed.readline()
if f_seed_header:
line = fin_seed.readline()
while line:
fs = line.strip().split(f_seed_sep)
userId = fs[0]
itemId = fs[1]
D_seed[userId] = itemId
# Determine the type of the seed item
if itemId not in D_item_type:
itemFreq = 0
if itemId in D_item_user:
itemFreq = len(D_item_user[itemId])
if itemId in D_catalog:
if itemFreq > cold_upper_bound:
itemType = 'W'
elif itemFreq > 0:
itemType = 'C2'
else:
itemType = 'C1'
else:
# M means item missing in the catalog file
itemType = 'M'
D_item_type[itemId] = itemType
line = fin_seed.readline()
fin_seed.close()
# In this function we compute the pairwise similarity of items
# based on their features.
def compareItemFeatures(D_item1, D_item2):
# This function return the number of matched feature values
# for multi-valued feature. If at least one value is matched,
# we will consider it as matched
f1_index = D_item1.keys()
c_count = 0
for fi in f1_index:
if fi in D_item2:
# if both items have this feature
# then we will compare their feature values
for fv in D_item1[fi].keys():
if fv in D_item2[fi]:
c_count += 1
break
return c_count
# Read the recomdation file
print('Read the recommendation file')
# We use D_item_sim to cache item pairwise similarity
D_item_sim = {}
# We use D_item_nomatch to cache all seed items with unmatched items returned
D_item_nomatch = {}
fout = open(f_output, 'w')
fin_recom = open(f_recom)
line = fin_recom.readline()
if f_recom_beginning_comment:
print('Skip the first few lines of comments')
while line[0]=='#':
line = fin_recom.readline()
# Process the valid lines one by one
while line:
fs = line.strip().split(f_recom_sep)
userId = fs[0]
itemId = fs[1]
if userId in D_seed:
seedItemId = D_seed[userId]
seedItemType = D_item_type[seedItemId]
if seedItemType=='C1' or seedItemType=='C2':
# compare item features
if itemId <= seedItemId:
itemA = itemId
itemB = seedItemId
else:
itemA = seedItemId
itemB = itemId
if itemA not in D_item_sim:
D_item_sim[itemA] = {}
if itemB not in D_item_sim[itemA]:
D_itemA_ft = D_catalog[itemA]
D_itemB_ft = D_catalog[itemB]
D_item_sim[itemA][itemB] = compareItemFeatures(D_itemA_ft, D_itemB_ft)
# logical check
simAB = D_item_sim[itemA][itemB]
if simAB==0:
# the case we need to investigate
fout.write('userId,' + userId + '\n')
fout.write('seedItemId,' + seedItemId + '\n')
fout.write('recomItemId,' + itemId + '\n')
D_item_nomatch[seedItemId] = D_item_nomatch.get(seedItemId, 0) + 1
line = fin_recom.readline()
fin_recom.close()
fout.close()
# For all items in the catalog, determine their types, and summarize number of
# items of different types.
for itemId in D_catalog:
if itemId not in D_item_type:
itemFreq = 0
if itemId in D_item_user:
itemFreq = len(D_item_user[itemId])
if itemFreq > cold_upper_bound:
itemType = 'W'
elif itemFreq > 0:
itemType = 'C2'
else:
itemType = 'C1'
D_item_type[itemId] = itemType
all_item_type_list = list(D_item_type.values())
n_item_warm = all_item_type_list.count('W')
n_item_C1 = all_item_type_list.count('C1')
n_item_C2 = all_item_type_list.count('C2')
# Summarize some statistics in the end
n_item_total = len(D_catalog)
n_seed_nomatch = len(D_item_nomatch)
percent_nomatch = float(n_seed_nomatch) / n_item_total
print('the total number of items in catalog is %d'%n_item_total)
print('the total number of seed items which generate recom items with no feature match is %d'%n_seed_nomatch)
print('the percentage of seed items which generate recom items with no feature match is %f'%percent_nomatch)
print('the total number of warm item is %d'%n_item_warm)
print('the percentage of warm item is %f'%(float(n_item_warm)/n_item_total))
print('the total number of C1 item is %d'%n_item_C1)
print('the percentage of C1 item is %f'%(float(n_item_C1)/n_item_total))
print('the total number of C2 item is %d'%n_item_C2)
print('the percentage of C2 item is %f'%(float(n_item_C2)/n_item_total))
| #
# In this script we check the returned scoring items when the seed item is cold
# In terms of checking, we check if there is any features with the same value.
# In this version, only one seed item is supported.
# list of input files:
# 1. catalog file
# 2. trainining file
# 3. seed file
# 4. the scoring file using cold item support
# Also some file format parameters are provided.
# Another important parameter: cold_upper_bound
# It specifies the largest number of occurrences in
# training is still considered as C2 cold item. If the
# occurrence is C2+1, then it is considered as warm.
#========== Parameter for PT dataset =========
f_prefix = 'PT3'
f_catalog = 'catalog.csv'
f_train = 'train-sorted.csv'
f_seed = 'seed_as_train.csv'
f_recom = 'scores-sar-cold_reversed.tsv'
f_output = 'list_of_recom_no_feature_match.csv'
f_catalog_header = True
f_seed_header = False
f_seed_sep = ','
f_recom_sep = '\t'
f_recom_beginning_comment = True
cold_upper_bound = 2
#========== Parameter for PT dataset =========
# update file names based on f_prefix. Users need to change them
# accordingly based on your own file organization.
f_train = f_prefix + '/' + f_train
f_catalog = f_prefix + '/' + f_catalog
f_seed = f_prefix + '/' + f_seed
f_recom = f_prefix + '/data/' + f_recom
f_output = f_prefix + '/data/' + f_output
#=============================================================================
# The rest should be be changed in running for different datasets.
# Read the catalog file
print('Read the catalog file')
fin_catalog = open(f_catalog)
line = fin_catalog.readline()
D_catalog = {}
if f_catalog_header:
# extract feature name
fnames = line.strip().split(',')[2:]
line = fin_catalog.readline()
else:
# use default feature name
f_num = len(line.strip().split(',')) - 2
fnames = ['f_' + str(i) for i in range(f_num)]
while line:
fs = line.strip().split(',')
itemId = fs[0]
if itemId not in D_catalog:
D_catalog[itemId] = {}
# We need to save all feature values for the current item
fs_feature = fs[2:]
fs_feature_mvalue = [v.strip().strip('"').split(';') for v in fs_feature]
for fi in range(len(fs_feature_mvalue)):
if len(fs_feature_mvalue[fi])==1 and len(fs_feature_mvalue[fi][0])==0:
# This is an empty feature value
pass
else:
# We process non-empty feature value only
fi_value_list = fs_feature_mvalue[fi]
D_catalog[itemId][fi] = {}
for fv in fi_value_list:
D_catalog[itemId][fi][fv] = 1
line = fin_catalog.readline()
fin_catalog.close()
# Read the training file
print('Read the training file')
fin_train = open(f_train)
line = fin_train.readline()
D_item_user = {}
while line:
fs = line.strip().split(',')
userId = fs[0]
itemId = fs[1]
if itemId not in D_item_user:
D_item_user[itemId] = {}
D_item_user[itemId][userId] = 1
line = fin_train.readline()
fin_train.close()
# Read the seed file
print('Read the seed file')
fin_seed = open(f_seed)
D_seed = {}
D_item_type = {}
line = fin_seed.readline()
if f_seed_header:
line = fin_seed.readline()
while line:
fs = line.strip().split(f_seed_sep)
userId = fs[0]
itemId = fs[1]
D_seed[userId] = itemId
# Determine the type of the seed item
if itemId not in D_item_type:
itemFreq = 0
if itemId in D_item_user:
itemFreq = len(D_item_user[itemId])
if itemId in D_catalog:
if itemFreq > cold_upper_bound:
itemType = 'W'
elif itemFreq > 0:
itemType = 'C2'
else:
itemType = 'C1'
else:
# M means item missing in the catalog file
itemType = 'M'
D_item_type[itemId] = itemType
line = fin_seed.readline()
fin_seed.close()
# In this function we compute the pairwise similarity of items
# based on their features.
def compareItemFeatures(D_item1, D_item2):
# This function return the number of matched feature values
# for multi-valued feature. If at least one value is matched,
# we will consider it as matched
f1_index = D_item1.keys()
c_count = 0
for fi in f1_index:
if fi in D_item2:
# if both items have this feature
# then we will compare their feature values
for fv in D_item1[fi].keys():
if fv in D_item2[fi]:
c_count += 1
break
return c_count
# Read the recomdation file
print('Read the recommendation file')
# We use D_item_sim to cache item pairwise similarity
D_item_sim = {}
# We use D_item_nomatch to cache all seed items with unmatched items returned
D_item_nomatch = {}
fout = open(f_output, 'w')
fin_recom = open(f_recom)
line = fin_recom.readline()
if f_recom_beginning_comment:
print('Skip the first few lines of comments')
while line[0]=='#':
line = fin_recom.readline()
# Process the valid lines one by one
while line:
fs = line.strip().split(f_recom_sep)
userId = fs[0]
itemId = fs[1]
if userId in D_seed:
seedItemId = D_seed[userId]
seedItemType = D_item_type[seedItemId]
if seedItemType=='C1' or seedItemType=='C2':
# compare item features
if itemId <= seedItemId:
itemA = itemId
itemB = seedItemId
else:
itemA = seedItemId
itemB = itemId
if itemA not in D_item_sim:
D_item_sim[itemA] = {}
if itemB not in D_item_sim[itemA]:
D_itemA_ft = D_catalog[itemA]
D_itemB_ft = D_catalog[itemB]
D_item_sim[itemA][itemB] = compareItemFeatures(D_itemA_ft, D_itemB_ft)
# logical check
simAB = D_item_sim[itemA][itemB]
if simAB==0:
# the case we need to investigate
fout.write('userId,' + userId + '\n')
fout.write('seedItemId,' + seedItemId + '\n')
fout.write('recomItemId,' + itemId + '\n')
D_item_nomatch[seedItemId] = D_item_nomatch.get(seedItemId, 0) + 1
line = fin_recom.readline()
fin_recom.close()
fout.close()
# For all items in the catalog, determine their types, and summarize number of
# items of different types.
for itemId in D_catalog:
if itemId not in D_item_type:
itemFreq = 0
if itemId in D_item_user:
itemFreq = len(D_item_user[itemId])
if itemFreq > cold_upper_bound:
itemType = 'W'
elif itemFreq > 0:
itemType = 'C2'
else:
itemType = 'C1'
D_item_type[itemId] = itemType
all_item_type_list = list(D_item_type.values())
n_item_warm = all_item_type_list.count('W')
n_item_C1 = all_item_type_list.count('C1')
n_item_C2 = all_item_type_list.count('C2')
# Summarize some statistics in the end
n_item_total = len(D_catalog)
n_seed_nomatch = len(D_item_nomatch)
percent_nomatch = float(n_seed_nomatch) / n_item_total
print('the total number of items in catalog is %d'%n_item_total)
print('the total number of seed items which generate recom items with no feature match is %d'%n_seed_nomatch)
print('the percentage of seed items which generate recom items with no feature match is %f'%percent_nomatch)
print('the total number of warm item is %d'%n_item_warm)
print('the percentage of warm item is %f'%(float(n_item_warm)/n_item_total))
print('the total number of C1 item is %d'%n_item_C1)
print('the percentage of C1 item is %f'%(float(n_item_C1)/n_item_total))
print('the total number of C2 item is %d'%n_item_C2)
print('the percentage of C2 item is %f'%(float(n_item_C2)/n_item_total))
| en | 0.829907 | # # In this script we check the returned scoring items when the seed item is cold # In terms of checking, we check if there is any features with the same value. # In this version, only one seed item is supported. # list of input files: # 1. catalog file # 2. trainining file # 3. seed file # 4. the scoring file using cold item support # Also some file format parameters are provided. # Another important parameter: cold_upper_bound # It specifies the largest number of occurrences in # training is still considered as C2 cold item. If the # occurrence is C2+1, then it is considered as warm. #========== Parameter for PT dataset ========= #========== Parameter for PT dataset ========= # update file names based on f_prefix. Users need to change them # accordingly based on your own file organization. #============================================================================= # The rest should be be changed in running for different datasets. # Read the catalog file # extract feature name # use default feature name # We need to save all feature values for the current item # This is an empty feature value # We process non-empty feature value only # Read the training file # Read the seed file # Determine the type of the seed item # M means item missing in the catalog file # In this function we compute the pairwise similarity of items # based on their features. # This function return the number of matched feature values # for multi-valued feature. If at least one value is matched, # we will consider it as matched # if both items have this feature # then we will compare their feature values # Read the recomdation file # We use D_item_sim to cache item pairwise similarity # We use D_item_nomatch to cache all seed items with unmatched items returned # Process the valid lines one by one # compare item features # logical check # the case we need to investigate # For all items in the catalog, determine their types, and summarize number of # items of different types. # Summarize some statistics in the end | 2.611547 | 3 |
shopyo/api/tests/conftest.py | ChaseKnowlden/shopyo | 235 | 6631233 | """
file: api/tests/conftest.py
All pytest fixtures local only to the api/tests are placed here
"""
import pytest
import os
import shutil
import tempfile
@pytest.fixture
def cleandir():
old_cwd = os.getcwd()
newpath = tempfile.mkdtemp()
os.chdir(newpath)
yield
os.chdir(old_cwd)
shutil.rmtree(newpath)
@pytest.fixture
def restore_cwd():
old = os.getcwd()
yield
os.chdir(old)
@pytest.fixture
def fake_foo_proj(tmp_path):
"""creates a fake shopyo like directory structure as shown below
foo/
foo/
modules/
bar/
static/
bar.css
baz/
static/
baz.css
box__bizhelp/
demo/
demo.py
box__default/
foo/
static/
foo.css
foozoo/
foozoo.py
zoo/
static/
zoo.css
static/
Parameters
----------
tmp_path : pathlib.Path
built in pytest fixture which will provide a temporary directory unique
to the test invocation, created in the base temporary directory.
"""
# create the tmp_path/foo/foo
project_path = tmp_path / "foo" / "foo"
project_path.mkdir(parents=True)
# create the static and modules inside foo/foo
static_path = project_path / "static"
module_path = project_path / "modules"
static_path.mkdir()
module_path.mkdir()
# create the dummy boxes and modules
demo_path = module_path / "box__bizhelp/demo/demo.py"
foo_path = module_path / "box__default/foo/static/foo.css"
zoo_path = module_path / "box__default/zoo/static/zoo.css"
foozoo_path = module_path / "box__default/foozoo/foozoo.py"
bar_path = module_path / "bar/static/bar.css"
baz_path = module_path / "baz/model/baz.py"
demo_path.parent.mkdir(parents=True)
foo_path.parent.mkdir(parents=True)
zoo_path.parent.mkdir(parents=True)
foozoo_path.parent.mkdir(parents=True)
bar_path.parent.mkdir(parents=True)
baz_path.parent.mkdir(parents=True)
demo_path.write_text("demo")
foo_path.write_text("foo")
zoo_path.write_text("zoo")
foozoo_path.write_text("foozoo")
bar_path.write_text("bar")
baz_path.write_text("baz")
# save cwd and chage to test project directory
old = os.getcwd()
os.chdir(project_path)
yield project_path
# restore old cwd directory
os.chdir(old)
| """
file: api/tests/conftest.py
All pytest fixtures local only to the api/tests are placed here
"""
import pytest
import os
import shutil
import tempfile
@pytest.fixture
def cleandir():
old_cwd = os.getcwd()
newpath = tempfile.mkdtemp()
os.chdir(newpath)
yield
os.chdir(old_cwd)
shutil.rmtree(newpath)
@pytest.fixture
def restore_cwd():
old = os.getcwd()
yield
os.chdir(old)
@pytest.fixture
def fake_foo_proj(tmp_path):
"""creates a fake shopyo like directory structure as shown below
foo/
foo/
modules/
bar/
static/
bar.css
baz/
static/
baz.css
box__bizhelp/
demo/
demo.py
box__default/
foo/
static/
foo.css
foozoo/
foozoo.py
zoo/
static/
zoo.css
static/
Parameters
----------
tmp_path : pathlib.Path
built in pytest fixture which will provide a temporary directory unique
to the test invocation, created in the base temporary directory.
"""
# create the tmp_path/foo/foo
project_path = tmp_path / "foo" / "foo"
project_path.mkdir(parents=True)
# create the static and modules inside foo/foo
static_path = project_path / "static"
module_path = project_path / "modules"
static_path.mkdir()
module_path.mkdir()
# create the dummy boxes and modules
demo_path = module_path / "box__bizhelp/demo/demo.py"
foo_path = module_path / "box__default/foo/static/foo.css"
zoo_path = module_path / "box__default/zoo/static/zoo.css"
foozoo_path = module_path / "box__default/foozoo/foozoo.py"
bar_path = module_path / "bar/static/bar.css"
baz_path = module_path / "baz/model/baz.py"
demo_path.parent.mkdir(parents=True)
foo_path.parent.mkdir(parents=True)
zoo_path.parent.mkdir(parents=True)
foozoo_path.parent.mkdir(parents=True)
bar_path.parent.mkdir(parents=True)
baz_path.parent.mkdir(parents=True)
demo_path.write_text("demo")
foo_path.write_text("foo")
zoo_path.write_text("zoo")
foozoo_path.write_text("foozoo")
bar_path.write_text("bar")
baz_path.write_text("baz")
# save cwd and chage to test project directory
old = os.getcwd()
os.chdir(project_path)
yield project_path
# restore old cwd directory
os.chdir(old)
| en | 0.61302 | file: api/tests/conftest.py All pytest fixtures local only to the api/tests are placed here creates a fake shopyo like directory structure as shown below foo/ foo/ modules/ bar/ static/ bar.css baz/ static/ baz.css box__bizhelp/ demo/ demo.py box__default/ foo/ static/ foo.css foozoo/ foozoo.py zoo/ static/ zoo.css static/ Parameters ---------- tmp_path : pathlib.Path built in pytest fixture which will provide a temporary directory unique to the test invocation, created in the base temporary directory. # create the tmp_path/foo/foo # create the static and modules inside foo/foo # create the dummy boxes and modules # save cwd and chage to test project directory # restore old cwd directory | 2.48114 | 2 |
ch01-arrays-and-strings/q09-string-rotation.py | AdityaSinghShekhawat/ctci-python | 0 | 6631234 | #! /usr/bin/python
"""
String Rotation:Assumeyou have a method isSubstring which checks if one word is a substring of another. Given two strings, sl and s2, write code to check if s2 is a rotation of sl using only one call to isSubstring (e.g.,"waterbottle" is a rotation of"erbottlewat").
"""
def string_rotation(s1: str, s2: str):
# We have to use is_substring
# In the rotation, there will be two parts of the string which will switch places.
# e.g. in waterbottle and erbottlewat; wat is first part and erbottle is the second part
return len(s1) == len(s2) and is_substring(s1 + s1, s2)
def is_substring(word: str, probable_substring: str):
return probable_substring in word
if __name__ == "__main__":
import sys
for line in sys.stdin:
str1, str2 = line.split(", ")
str2 = str2[:-1] # This is done to remove the ending \n
print(string_rotation(str1, str2))
| #! /usr/bin/python
"""
String Rotation:Assumeyou have a method isSubstring which checks if one word is a substring of another. Given two strings, sl and s2, write code to check if s2 is a rotation of sl using only one call to isSubstring (e.g.,"waterbottle" is a rotation of"erbottlewat").
"""
def string_rotation(s1: str, s2: str):
# We have to use is_substring
# In the rotation, there will be two parts of the string which will switch places.
# e.g. in waterbottle and erbottlewat; wat is first part and erbottle is the second part
return len(s1) == len(s2) and is_substring(s1 + s1, s2)
def is_substring(word: str, probable_substring: str):
return probable_substring in word
if __name__ == "__main__":
import sys
for line in sys.stdin:
str1, str2 = line.split(", ")
str2 = str2[:-1] # This is done to remove the ending \n
print(string_rotation(str1, str2))
| en | 0.831731 | #! /usr/bin/python String Rotation:Assumeyou have a method isSubstring which checks if one word is a substring of another. Given two strings, sl and s2, write code to check if s2 is a rotation of sl using only one call to isSubstring (e.g.,"waterbottle" is a rotation of"erbottlewat"). # We have to use is_substring # In the rotation, there will be two parts of the string which will switch places. # e.g. in waterbottle and erbottlewat; wat is first part and erbottle is the second part # This is done to remove the ending \n | 4.330295 | 4 |
ionical/ionical.py | danyul/ionical | 4 | 6631235 | <filename>ionical/ionical.py
"""Multipurpose ics util - changelogs, CSVs, schedule viewing."""
import csv
import re
import sys
import urllib.request
from collections import OrderedDict, defaultdict
from datetime import date, datetime, time, timedelta # , tzinfo
from pathlib import Path
from typing import DefaultDict, Dict, List, NamedTuple, Optional
from typing import Set, Tuple
from textwrap import dedent
import icalendar # type: ignore
import pytz
import recurring_ical_events # type: ignore
DEF_ICS_DIR = "./"
DEF_TIME_FMT = "%H:%M:%S"
DEF_DATE_FMT = "%Y-%m-%d"
DEF_TIME_GROUP_FMT = ""
DEF_SUMMARY_LINE = "Start: {:12} Time: {:12} {} {}"
CHANGELOG_DEF_DATE_FMT = "%b %d, %Y"
CHANGELOG_DEF_TIME_FMT = " %I%p"
CHANGELOG_DEF_TIME_REPLACEMENTS = {" 0": " ", "AM": "am", "PM": "pm"}
DEF_CHANGE_REPORT_FMT = (
" {label:8} {name:17} {start_str} {summary} [comp {compare_date}]\n"
)
DEF_START_TIME_CAT_DICT = {
"shift": {
"All-Day": False,
"AM": [[0, 12]],
"PM": [[12, 24]],
}
}
class Cal:
"""Cal (or entity) with a schedule specified via .ics format."""
def __init__(
self,
cal_id: str,
name: str,
feed_url: Optional[str] = None,
ics_dir: Optional[str] = DEF_ICS_DIR,
timezone=None,
):
self.cal_id = cal_id
self.name = name
self.ics_dir = ics_dir
self.timezone = timezone
if feed_url is not None:
self.schedule_feed: Optional[ScheduleFeed] = ScheduleFeed(
cal=self, url=feed_url
)
else:
self.schedule_feed = None
self._schedule_history = None
def download_latest_schedule_version(self):
assert self.ics_dir is not None, f"No ics_dir specified for {self}."
assert self.schedule_feed is not None, f"No schedule_feed for {self}."
self.schedule_feed.download_latest_schedule_version(ics_dir=self.ics_dir)
# TODO: for performance, probably no need to get a whole new
# ScheduleHistory (Can instead just add the newly downloaded
# schedule to existing schedule history, if available)
self._schedule_history = None # clear cache to force new load
@property
def schedule_history(self):
assert self.ics_dir is not None, f"No ics_dir specified for {self}."
if self._schedule_history is None:
self._schedule_history = ScheduleHistory.from_files_for_cal(
cal=self,
ics_dir=self.ics_dir,
)
return self._schedule_history
@classmethod
def from_tuple(cls, cal_tuple, ics_dir=DEF_ICS_DIR):
id_, name, url, timezone = cal_tuple
timezone = None if timezone == "" else timezone
return cls(
cal_id=id_,
name=name,
feed_url=url,
ics_dir=ics_dir,
timezone=timezone,
)
def current_schedule_and_version_date(self) -> Tuple["Schedule", date]:
try:
d, ical = self.schedule_history.most_recent_version_date_and_ical()
except IndexError:
print(
dedent(
f"""\
Uh oh! Could not find .ics file for the calendar "{self.name}".\n
Are you specifying the correct directory for your ics files?
(command line option -d)?\n
Did you download the latest ics files (option -g)?\n
For help, type 'ionical -h'. Quitting."""
)
)
sys.exit(1)
schedule = Schedule.from_icalendar(ical, self)
return schedule, d
@property
def current_schedule(self) -> "Schedule":
schedule, _ = self.current_schedule_and_version_date()
return schedule
def __str__(self):
return f"{self.name} ({self.cal_id})"
# TODO More flexible implementation to allow user-specification
# of what should be monitored for changes.
# TODO Better handle offset-naive vis-a-vis offset-aware dts.
class MonitoredEventData:
"""Data to be monitored for changes.
ics files read by the icalendar and
recurreng_ical_events packages produce
both datetime.date and datetime.datetime
objects. Those objects get stored within MonitoredEventData
objects *as they were generated* by the icalendar package.
"""
def __init__(self, event_date_or_datetime, summary, cal):
self._date_or_datetime = event_date_or_datetime
self._summary = summary
self.cal = cal
def __eq__(self, other) -> bool:
return all(
(
isinstance(other, MonitoredEventData),
self._date_or_datetime == other._date_or_datetime,
self.cal.cal_id == other.cal.cal_id,
self._summary == other._summary,
)
)
def __hash__(self):
return hash((self._date_or_datetime, self._summary, self.cal.cal_id))
@property
def date_or_datetime(self) -> date:
return self._date_or_datetime
@property
def forced_date(self) -> date:
if isinstance(self._date_or_datetime, datetime):
return self._date_or_datetime.date()
else: # it must be a datettime.date
return self._date_or_datetime
@property
def forced_datetime(self) -> datetime:
if isinstance(self._date_or_datetime, datetime):
return self._date_or_datetime
else: # it must be a datettime.date
return datetime.combine(self._date_or_datetime, datetime.min.time())
@property
def time(self) -> Optional[time]:
if isinstance(self._date_or_datetime, datetime):
return self._date_or_datetime.time()
else: # it must be a datetime.date, so there's no time
return None
@property
def local_time(self):
tz = pytz.timezone(self.cal.timezone)
if isinstance(self._date_or_datetime, datetime):
local_datetime = self._date_or_datetime.astimezone(tz)
return local_datetime.time()
else:
return None
@property
def summary(self):
return self._summary
def start_time_cats(self, cat_class) -> Dict[str, str]:
start_time_cats = {}
for cat_type, cat_rules in cat_class.items():
default_group_if_not_specified = "No Group Default Specified"
default_group = default_group_if_not_specified
start_time_cats[cat_type] = default_group
# print(cat_rules)
for cat, ranges_list in cat_rules.items():
if ranges_list == "missing":
if not self.time: # TODO: Make sure no falsy error
start_time_cats[cat_type] = cat
break
continue
if ranges_list == "default":
default_group = cat
break
for _range in ranges_list:
if not self.local_time:
break
start_time = self.local_time
lower_bound_in_hours, upper_bound_in_hours = _range
lower_bound_in_mins = lower_bound_in_hours * 60
upper_bound_in_mins = upper_bound_in_hours * 60
event_time_in_mins = start_time.hour * 60 + start_time.minute
if (lower_bound_in_mins <= event_time_in_mins) and (
event_time_in_mins < upper_bound_in_mins
):
start_time_cats[cat_type] = cat
break # not great, because should really break out of 2 loops
if (
default_group != default_group_if_not_specified
and start_time_cats[cat_type] == default_group_if_not_specified
):
start_time_cats[cat_type] = default_group
return start_time_cats
def display(self, fmt_cfg=None, classification_rules=None):
if fmt_cfg is None:
fmt_cfg = {}
date_fmt = sub_cfg(fmt_cfg, "date_fmt", DEF_DATE_FMT)
time_fmt = sub_cfg(fmt_cfg, "time_fmt", DEF_TIME_FMT)
time_replacements = sub_cfg(fmt_cfg, "time_replacements", None)
schedule_summary_line = sub_cfg(fmt_cfg, "event_summary", None)
grouping_field = sub_cfg(fmt_cfg, "time_group", None)
shift_str_template = sub_cfg(fmt_cfg, "time_group_fmt", None)
start_time_cat_dict = sub_cfg(
classification_rules, "by_start_time", DEF_START_TIME_CAT_DICT
)
if schedule_summary_line is None:
schedule_summary_line = DEF_SUMMARY_LINE
date_str = self.forced_date.strftime(date_fmt)
time_str = self.local_time.strftime(time_fmt) if self.local_time else ""
if time_replacements is not None:
for pre, post in time_replacements.items():
time_str = time_str.replace(pre, post)
if shift_str_template is None:
shift_str_template = DEF_TIME_GROUP_FMT
shift_str = shift_str_template.format(
self.start_time_cats(start_time_cat_dict)[grouping_field]
)
return schedule_summary_line.format(
date_str,
time_str,
shift_str,
self.summary,
)
def __str__(self):
return self.display()
class Schedule:
"""Contain a set of MonitoredEventData objects."""
def __init__(self, cal: Cal):
self.events: Set[MonitoredEventData] = set()
self.cal: Cal = cal
@classmethod
def from_icalendar(
cls,
icalCal: icalendar.cal.Calendar,
cal: Cal,
extra_timedelta_days_for_repeating_events: int = 1,
) -> "Schedule":
"""Initialize a schedule from an .ics file (icalCal).
This is the primary way a Schedule object will be
created in this package.
Because the icalendar package will only return the
first occurence in a repeating event, need to also obtain
a set of event data using the recurring_ics_events package,
and combine the two sets.
"""
new_instance: Schedule = cls(cal=cal)
kerr_count = 0
events_by_icalendar_lookup: Set[MonitoredEventData] = set()
for ical_event in icalCal.subcomponents:
try:
med: MonitoredEventData = MonitoredEventData(
event_date_or_datetime=ical_event["DTSTART"].dt,
summary=ical_event["SUMMARY"],
cal=new_instance.cal,
)
events_by_icalendar_lookup.add(med)
except KeyError:
# ignore timezone from ics file (maybe implement later?)
if not isinstance(ical_event, icalendar.cal.Timezone):
kerr_count = kerr_count + 1
# TODO KeyError may represent difficulty reading Google Calendar
# ics format's iniital TIMEZONE section in ics file. For at least
# one test case, removing that section solved the
# sole encountered KeyError.
if kerr_count > 0:
msg = (
f"{kerr_count} non-TimeZone KeyErrors encountered reading ical"
+ f' for "{cal.cal_id}".\n'
)
sys.stderr.write(msg)
# Get the earliest and laetst dates that are explicitly specified in
# the ics file (ie, not specified by recurrence).
# These will be used when querying for recurrent events.
min_date = min(
[x.forced_date for x in events_by_icalendar_lookup],
default=None,
)
max_date = max(
[x.forced_date for x in events_by_icalendar_lookup],
default=None,
)
# Search for recurrent events that occur a specified # of days
# beyond the latest explicitly-stated event date.
if min_date is None and max_date is None:
new_instance.events = events_by_icalendar_lookup
return new_instance
if min_date is None or max_date is None:
raise ValueError(f"Problem: min_date={min_date}, max_date={max_date}")
max_date += timedelta(days=extra_timedelta_days_for_repeating_events)
events_by_RIE_lookup: Set[MonitoredEventData] = {
MonitoredEventData(
event_date_or_datetime=ical_event["DTSTART"].dt,
summary=ical_event["SUMMARY"],
cal=new_instance.cal,
)
for ical_event in recurring_ical_events.of(icalCal).between(
(min_date.year, min_date.month, min_date.day),
(max_date.year, max_date.month, max_date.day),
)
}
merged_events: Set[MonitoredEventData] = (
events_by_RIE_lookup | events_by_icalendar_lookup
)
new_instance.events = merged_events
return new_instance
def filtered_events(
self,
earliest_date: date = None,
latest_date: date = None,
summary_filters: Optional[List[str]] = None,
) -> List[MonitoredEventData]:
"""Get MonitoredEventData objects filtered by summary and date."""
def meets_filter_criteria(event: MonitoredEventData) -> bool:
return not any(
(
summary_filters
and not any(f in event.summary for f in summary_filters),
earliest_date and event.forced_date < earliest_date,
latest_date and event.forced_date > latest_date,
)
)
if summary_filters is None:
summary_filters = []
return [
event
for event in sorted(self.events, key=lambda x: (x.forced_date, x.summary))
if meets_filter_criteria(event)
]
def display(
self,
earliest_date: date = None,
latest_date: date = None,
summary_filters: Optional[List[str]] = None,
version_date: Optional[date] = None,
fmt_cfg=None,
classification_rules=None,
) -> str:
if summary_filters is None:
summary_filters = []
tz = pytz.timezone(self.cal.timezone)
header = f"\n\nSchedule for {self.cal.name} ({tz})"
if version_date:
header += f" [version {version_date}]:"
header += "\n\n"
body = "\n".join(
[
event.display(fmt_cfg, classification_rules)
for event in self.filtered_events(
earliest_date=earliest_date,
latest_date=latest_date,
summary_filters=summary_filters,
)
]
)
return header + body
def __str__(self):
return self.display()
class ScheduleFeed:
"""Holder for a Cal's .ics URL."""
downloaded_ics_default_filename_pattern = re.compile(
r"""
^(?P<cal_id>.*) # cal_id at the start (any string)
__ # double _ delimiter
(?P<ymd> # to capture concatenated year/month/day
(?P<year>[0-9]{4}) # 4 digit year
(?P<month>[0-9]{2}) # 2 digit month
(?P<day>[0-9]{2}) # 2 digit day of month
) # end capturing of <ymd>
\.ics # suffix
""",
re.VERBOSE,
)
def __init__(self, cal: Cal, url: str):
self.cal = cal
self.url = url
def ics_filename_for_today(self):
f = f"{self.cal.cal_id}__{date.today().strftime('%Y%m%d')}.ics"
return f
def download_latest_schedule_version(self, ics_dir) -> None:
"""Save the current .ics file version of the Cal's schedule."""
try:
req=urllib.request.Request(self.url, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req) as ics_http_response:
ics_text = ics_http_response.read().decode()
except urllib.error.HTTPError as e:
raise Exception(f"Got an HTTP error: url={self.url}. e={e}")
except Exception as e:
print(f"Excepted url={self.url} e={e}")
raise e
with open(
file=Path(ics_dir) / self.ics_filename_for_today(),
mode="w",
encoding="utf-8",
newline="",
) as ics_file:
ics_file.write(ics_text)
# TODO: consider making SC full class
# if we do that, then switch to direct reference to Cal object
# (rather than indirect lookup via Cal.cal_id)
# ? Pros vs Cons ?
class ScheduleChange(NamedTuple):
"""Data to be displayed on a change log report."""
reference_date: date
comparison_date: date
cal_id: str
event_summary: str
event_start: datetime # TODO: ???? clarify naive/local/aware issues
change_type: str # either "a" for addition, or "r" for removal
class ScheduleHistory:
"""Container for multiple versions of .ics file data."""
def __init__(self, cal):
self.cal: Cal = cal
self.sched_versions_by_date: OrderedDict[
date, icalendar.cal.Calendar
] = OrderedDict([])
@classmethod
def from_files_for_cal(cls, cal: Cal, ics_dir, file_pat=None) -> "ScheduleHistory":
"""Instantiate by reading in .ics files for a Cal.
Determination of which ics files correspond to
Cal is made by matching Cal.cal_id to
the id embedded in the filenames, as specified
by the regex found in ScheduleFeed class.
"""
if file_pat is None:
file_pat = ScheduleFeed.downloaded_ics_default_filename_pattern
new_hx = cls(cal)
d = Path(ics_dir)
files_matches = [
(f, file_pat.match(f.name))
for f in d.iterdir()
if (
file_pat.match(f.name)
and file_pat.match(f.name).group("cal_id") == str(cal.cal_id)
)
]
for f, m in sorted(files_matches, key=lambda x: (x[1].group("ymd"))):
yr, mo, day = m.group("year"), m.group("month"), m.group("day")
vers_date = date(int(yr), int(mo), int(day))
new_hx.sched_versions_by_date[vers_date] = cls.get_icalendar_cal(f)
return new_hx
def get_changes_for_date(self, version_date) -> List[ScheduleChange]:
"""Get a cal's schedule changes for a given date.
Get the ScheduleChanges for the Cal referenced by
this ScheduleHistory object, comparing the version
of calendar events for the date given in the
parameter version_date with the next older schedule
for that cal.
"""
i = list(self.sched_versions_by_date.keys()).index(version_date)
ref_date, ref_vers = list(self.sched_versions_by_date.items())[i]
comp_date, comp_vers = list(self.sched_versions_by_date.items())[i - 1]
reference_schedule = Schedule.from_icalendar(
icalCal=ref_vers,
cal=self.cal,
)
comparison_schedule = Schedule.from_icalendar(
icalCal=comp_vers,
cal=self.cal,
)
additions = reference_schedule.events - comparison_schedule.events
removals = comparison_schedule.events - reference_schedule.events
pid = self.cal.cal_id
a = [
ScheduleChange(ref_date, comp_date, pid, x.summary, x.forced_datetime, "a")
for x in additions
]
r = [
ScheduleChange(ref_date, comp_date, pid, x.summary, x.forced_datetime, "r")
for x in removals
]
return a + r
# TODO: consider directly referencing Cal object from ScheduleChange?
# (rather than indirect lookup via Cal.cal_id)
def change_log(self, num_changelogs=None) -> Dict[date, List[ScheduleChange]]:
"""Get a list of ScheduleChanges from multiple version dates.
Compare each schedule version with the immediately preceding
version (except for the very oldest version, for which there
will be nothing available for comparison.) For each schedule
version date, provide a list of the changes.
"""
length = len(list(self.sched_versions_by_date))
if num_changelogs is None:
change_slice = slice(1, length)
else:
change_slice = slice(max(1, length - num_changelogs), length)
return {
date_: self.get_changes_for_date(date_)
for date_ in list(self.sched_versions_by_date.keys())[change_slice]
}
# TODO implement user option for which versions to analyze?
# TODO allow user to specify sorting/grouping
# TODO consider putting in its own class
@classmethod
def change_log_report_for_cals(
cls,
cals: List[Cal],
earliest_date: Optional[date] = None,
latest_date: Optional[date] = None,
summary_filters: Optional[List[str]] = None,
num_changelogs=None,
changelog_action_dict=None,
fmt_cfg=None,
) -> str:
"""Return a filtered/sorted list of changes.
Return a history of changes for multiple
dates/cals, filtering events by a user-specifiable
list of search terms (matched to an event's
summary field), and a user-specifiable date
range.
If no filters are provided, then
no search filter is applied.
"""
# fmt_cfg = {} if fmt_cfg is None else fmt_cfg
date_fmt = sub_cfg(fmt_cfg, "date_fmt", CHANGELOG_DEF_DATE_FMT)
time_fmt = sub_cfg(fmt_cfg, "time_fmt", CHANGELOG_DEF_TIME_FMT)
time_replacements = sub_cfg(
fmt_cfg, "time_replacement", CHANGELOG_DEF_TIME_REPLACEMENTS
)
change_report_record_template = sub_cfg(
fmt_cfg, "change_report", DEF_CHANGE_REPORT_FMT
)
def cal_by_id(cal_id: str) -> Cal:
for p in cals:
if p.cal_id == cal_id:
return p
raise KeyError(f"Did not find id {cal_id}.")
def meets_filter_criteria(c: ScheduleChange) -> bool:
return not any(
(
summary_filters
and not any(f in c.event_summary for f in summary_filters),
earliest_date and c.event_start.date() < earliest_date,
latest_date and c.event_start.date() > latest_date,
)
)
def local_format_dt(
datetime_: datetime,
cal: Cal,
date_fmt: str = CHANGELOG_DEF_DATE_FMT,
time_fmt=CHANGELOG_DEF_TIME_FMT,
time_replacements=None,
) -> str:
if time_replacements is None:
time_replacements = CHANGELOG_DEF_TIME_REPLACEMENTS
tz_datetime = datetime_.astimezone(pytz.timezone(cal.timezone))
date_str = tz_datetime.date().strftime(date_fmt)
time_str = tz_datetime.time().strftime(time_fmt)
if time_replacements is not None:
for pre, post in time_replacements.items():
time_str = time_str.replace(pre, post)
return date_str + time_str
if summary_filters is None:
summary_filters = []
if changelog_action_dict is None:
changelog_action_dict = {"a": "ADD:", "r": "REMOVE:"}
changes_by_ver_date: DefaultDict[date, List[ScheduleChange]] = defaultdict(list)
for p in cals:
for date_, changes in p.schedule_history.change_log(
num_changelogs=num_changelogs,
).items():
changes_by_ver_date[date_] = changes_by_ver_date[date_] + (
[c for c in changes if meets_filter_criteria(c)]
)
report = "\n" # ""
cbvd = sorted(changes_by_ver_date.items(), key=lambda x: x[0])
for version_date, changes in cbvd:
report += f"\n\nUpdates for sched vers dated {str(version_date)}:"
if len(changes) == 0:
report += " NO CHANGES"
report += "\n\n"
for c in sorted(
changes,
key=lambda x: (
x.event_start.year,
x.event_start.month,
x.event_start.day,
cal_by_id(x.cal_id).name,
x.event_summary,
),
):
cal = cal_by_id(c.cal_id)
event_start_str = local_format_dt(
datetime_=c.event_start,
cal=cal,
date_fmt=date_fmt,
time_fmt=time_fmt,
time_replacements=time_replacements,
)
report += change_report_record_template.format(
name=cal.name,
label=changelog_action_dict[c.change_type],
start_str=event_start_str,
summary=c.event_summary,
compare_date=c.comparison_date,
)
return report
def most_recent_version_date_and_ical(
self,
) -> Tuple[date, icalendar.cal.Calendar]:
"""Return most recent available schedule version/version date."""
last_version_index = len(self.sched_versions_by_date) - 1
return list(self.sched_versions_by_date.items())[last_version_index]
@classmethod
def get_icalendar_cal(cls, filepathname) -> icalendar.cal.Calendar:
with open(filepathname, "r", encoding="utf-8") as file_:
c = icalendar.Calendar.from_ical(file_.read())
return c
class ScheduleWriter:
def __init__(
self,
cals: List[Cal],
earliest_date: Optional[date] = None,
latest_date: Optional[date] = None,
summary_filters: Optional[List[str]] = None,
):
self.summary_filters = summary_filters
self.cals = cals
self.events_by_cal_id: Dict[str, List[MonitoredEventData]] = {
cal.cal_id: cal.current_schedule.filtered_events(
earliest_date=earliest_date,
latest_date=latest_date,
summary_filters=summary_filters,
)
for cal in cals
}
event_dates = [
event.forced_date
for cal_id, events in self.events_by_cal_id.items()
for event in events
]
self.earliest_date = earliest_date if earliest_date else min(event_dates)
self.latest_date = latest_date if latest_date else max(event_dates)
def csv_write(
self,
csv_file,
csv_dialect: str = "excel",
include_empty_dates: bool = False,
conversion_table: Dict[str, str] = None,
classification_rules=None,
csv_cfg=None,
):
start_time_cat_dict = sub_cfg(
classification_rules, "by_start_time", None
) # DEF_START_TIME_CAT_DICT
if start_time_cat_dict is None:
print("Quitting- can't find by_start_time confg info.\n")
sys.exit(1)
# https://stackoverflow.com/questions/1060279/iterating-through-a-range-of-dates-in-python
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
conversion_table = {} if conversion_table is None else conversion_table
def convert_if_lookup_found(summary):
return conversion_table[summary] if summary in conversion_table else summary
cat_type = sub_cfg(csv_cfg, "grouping")
if cat_type is None:
print("Quitting- can't find grouping confg info.\n")
sys.exit(1)
all_day_field_name = sub_cfg(csv_cfg, "all_day_category", None)
plists_by_date = OrderedDict([])
for date_ in daterange(self.earliest_date, self.latest_date):
plist = list("" for _ in range(len(self.cals)))
for cal in self.cals:
events = self.events_by_cal_id[cal.cal_id]
index_ = self.cals.index(cal)
cat_range_names = start_time_cat_dict[
cat_type
].keys() # csv_cfg["output"][ "order" ]
event_date_groups = {}
for range_name in cat_range_names:
event_date_groups[range_name] = next(
(
x
for x in events
if x.forced_date == date_
and x.start_time_cats(start_time_cat_dict)[cat_type]
== range_name
),
None,
)
shown_options = sub_cfg(csv_cfg, "order")
if shown_options is None:
print("Quitting- can't find 'order' confg info.\n")
sys.exit(1)
csv_exp_str = sub_cfg(csv_cfg, "format")
if csv_exp_str is None:
print("Quitting- can't find 'format' confg info.\n")
sys.exit(1)
not_found_str = sub_cfg(csv_cfg, "text_if_not_present", "None")
text = (
csv_exp_str.format(
*[
convert_if_lookup_found(
event_date_groups[c].summary # type: ignore
)
if event_date_groups[c]
else not_found_str
for c in shown_options
]
)
if any([event_date_groups[c] for c in shown_options])
else ""
)
# below hack addresses scenario when all-day events need to fill in other shifts
all_day_spec_case = sub_cfg(
csv_cfg, "all_day_behavior_workaround", False
)
if all_day_spec_case:
if all_day_field_name is None:
print(
"You opted for the all-day "
"workaround but no all-day category found in config."
)
all_day_spec_case = False
if all_day_spec_case and event_date_groups[all_day_field_name]:
if not any([event_date_groups[c] for c in shown_options]):
special_event = convert_if_lookup_found(
event_date_groups[all_day_field_name].summary # type: ignore
)
text = csv_exp_str.format(
*([special_event] * len(shown_options))
)
else:
text = csv_exp_str.format(
*[
convert_if_lookup_found(
event_date_groups[c].summary # type: ignore
)
if event_date_groups[c]
else convert_if_lookup_found(
event_date_groups[ # type: ignore
all_day_field_name
].summary
)
for c in shown_options
]
)
plist[index_] = text
if set(plist) != {""} or include_empty_dates:
plists_by_date[date_] = plist
with open(csv_file, "w", newline="", encoding="utf-8") as f:
writer = csv.writer(f, dialect=csv_dialect)
writer.writerow([""] + [p.cal_id for p in self.cals])
for date_, plist in plists_by_date.items():
writer.writerow([date_] + plist)
def sub_cfg(
cfg: Optional[Dict],
sub_key: str,
default_val=None,
noisy: bool = False,
success_msg: str = "Located config sub_key: {0}. Value: {1}.",
no_sub_key_msg: str = "Could not locate config sub_key '{0}'."
"Setting {0} to default value: {1}.",
no_cfg_msg: str = "No config dict to seek sub_key '{0}'."
"Setting {0} to default value: {1}.",
):
if cfg is None:
if noisy:
print(no_cfg_msg.format(sub_key, default_val))
return default_val
else:
try:
if noisy:
print(success_msg.format(sub_key, cfg[sub_key]))
return cfg[sub_key]
except KeyError:
if noisy:
print(no_sub_key_msg.format(sub_key, default_val))
return default_val
def main(
cals_data: List[Tuple[str, str, str, str]],
cals_filter: Optional[List[str]] = None,
ics_dir=DEF_ICS_DIR,
download_option: bool = False,
show_schedule: bool = False,
show_changelog: bool = False,
csv_export_file: str = None,
earliest_date: Optional[date] = None,
latest_date: Optional[date] = None,
summary_filters: Optional[List[str]] = None,
num_changelogs=None, # (for changelogs)
cfg=None,
verbose=0,
) -> None:
output = ""
classification_rules = sub_cfg(cfg, "event_classifications")
fmt_cfg = sub_cfg(cfg, "formatting")
all_cals = [
Cal.from_tuple(cal_tuple=cal_tuple, ics_dir=ics_dir) for cal_tuple in cals_data
]
if cals_filter:
chosen_cals = [p for p in all_cals if p.cal_id in cals_filter]
else:
chosen_cals = all_cals
if download_option:
for p in chosen_cals:
p.download_latest_schedule_version()
if show_changelog:
report = ScheduleHistory.change_log_report_for_cals(
cals=chosen_cals,
earliest_date=earliest_date,
latest_date=latest_date,
summary_filters=summary_filters,
num_changelogs=num_changelogs,
fmt_cfg=sub_cfg(fmt_cfg, "changelog"),
)
output += report
if show_schedule:
for cal in chosen_cals:
schedule, version_date = cal.current_schedule_and_version_date()
schedule_display = schedule.display(
earliest_date=earliest_date,
latest_date=latest_date,
summary_filters=summary_filters,
version_date=version_date,
fmt_cfg=sub_cfg(fmt_cfg, "schedule_view"),
classification_rules=classification_rules,
)
output += schedule_display
if csv_export_file:
csv_cfg = sub_cfg(cfg, "csv")
csv_substitutions = sub_cfg(csv_cfg, "substitutions", {})
writer = ScheduleWriter(
cals=chosen_cals,
earliest_date=earliest_date,
latest_date=latest_date,
summary_filters=summary_filters,
)
empty = sub_cfg(csv_cfg, "include_empty_dates", verbose, False)
writer.csv_write(
conversion_table=csv_substitutions,
csv_file=csv_export_file,
include_empty_dates=empty,
classification_rules=classification_rules,
csv_cfg=csv_cfg,
)
print(output, end="")
| <filename>ionical/ionical.py
"""Multipurpose ics util - changelogs, CSVs, schedule viewing."""
import csv
import re
import sys
import urllib.request
from collections import OrderedDict, defaultdict
from datetime import date, datetime, time, timedelta # , tzinfo
from pathlib import Path
from typing import DefaultDict, Dict, List, NamedTuple, Optional
from typing import Set, Tuple
from textwrap import dedent
import icalendar # type: ignore
import pytz
import recurring_ical_events # type: ignore
DEF_ICS_DIR = "./"
DEF_TIME_FMT = "%H:%M:%S"
DEF_DATE_FMT = "%Y-%m-%d"
DEF_TIME_GROUP_FMT = ""
DEF_SUMMARY_LINE = "Start: {:12} Time: {:12} {} {}"
CHANGELOG_DEF_DATE_FMT = "%b %d, %Y"
CHANGELOG_DEF_TIME_FMT = " %I%p"
CHANGELOG_DEF_TIME_REPLACEMENTS = {" 0": " ", "AM": "am", "PM": "pm"}
DEF_CHANGE_REPORT_FMT = (
" {label:8} {name:17} {start_str} {summary} [comp {compare_date}]\n"
)
DEF_START_TIME_CAT_DICT = {
"shift": {
"All-Day": False,
"AM": [[0, 12]],
"PM": [[12, 24]],
}
}
class Cal:
"""Cal (or entity) with a schedule specified via .ics format."""
def __init__(
self,
cal_id: str,
name: str,
feed_url: Optional[str] = None,
ics_dir: Optional[str] = DEF_ICS_DIR,
timezone=None,
):
self.cal_id = cal_id
self.name = name
self.ics_dir = ics_dir
self.timezone = timezone
if feed_url is not None:
self.schedule_feed: Optional[ScheduleFeed] = ScheduleFeed(
cal=self, url=feed_url
)
else:
self.schedule_feed = None
self._schedule_history = None
def download_latest_schedule_version(self):
assert self.ics_dir is not None, f"No ics_dir specified for {self}."
assert self.schedule_feed is not None, f"No schedule_feed for {self}."
self.schedule_feed.download_latest_schedule_version(ics_dir=self.ics_dir)
# TODO: for performance, probably no need to get a whole new
# ScheduleHistory (Can instead just add the newly downloaded
# schedule to existing schedule history, if available)
self._schedule_history = None # clear cache to force new load
@property
def schedule_history(self):
assert self.ics_dir is not None, f"No ics_dir specified for {self}."
if self._schedule_history is None:
self._schedule_history = ScheduleHistory.from_files_for_cal(
cal=self,
ics_dir=self.ics_dir,
)
return self._schedule_history
@classmethod
def from_tuple(cls, cal_tuple, ics_dir=DEF_ICS_DIR):
id_, name, url, timezone = cal_tuple
timezone = None if timezone == "" else timezone
return cls(
cal_id=id_,
name=name,
feed_url=url,
ics_dir=ics_dir,
timezone=timezone,
)
def current_schedule_and_version_date(self) -> Tuple["Schedule", date]:
try:
d, ical = self.schedule_history.most_recent_version_date_and_ical()
except IndexError:
print(
dedent(
f"""\
Uh oh! Could not find .ics file for the calendar "{self.name}".\n
Are you specifying the correct directory for your ics files?
(command line option -d)?\n
Did you download the latest ics files (option -g)?\n
For help, type 'ionical -h'. Quitting."""
)
)
sys.exit(1)
schedule = Schedule.from_icalendar(ical, self)
return schedule, d
@property
def current_schedule(self) -> "Schedule":
schedule, _ = self.current_schedule_and_version_date()
return schedule
def __str__(self):
return f"{self.name} ({self.cal_id})"
# TODO More flexible implementation to allow user-specification
# of what should be monitored for changes.
# TODO Better handle offset-naive vis-a-vis offset-aware dts.
class MonitoredEventData:
"""Data to be monitored for changes.
ics files read by the icalendar and
recurreng_ical_events packages produce
both datetime.date and datetime.datetime
objects. Those objects get stored within MonitoredEventData
objects *as they were generated* by the icalendar package.
"""
def __init__(self, event_date_or_datetime, summary, cal):
self._date_or_datetime = event_date_or_datetime
self._summary = summary
self.cal = cal
def __eq__(self, other) -> bool:
return all(
(
isinstance(other, MonitoredEventData),
self._date_or_datetime == other._date_or_datetime,
self.cal.cal_id == other.cal.cal_id,
self._summary == other._summary,
)
)
def __hash__(self):
return hash((self._date_or_datetime, self._summary, self.cal.cal_id))
@property
def date_or_datetime(self) -> date:
return self._date_or_datetime
@property
def forced_date(self) -> date:
if isinstance(self._date_or_datetime, datetime):
return self._date_or_datetime.date()
else: # it must be a datettime.date
return self._date_or_datetime
@property
def forced_datetime(self) -> datetime:
if isinstance(self._date_or_datetime, datetime):
return self._date_or_datetime
else: # it must be a datettime.date
return datetime.combine(self._date_or_datetime, datetime.min.time())
@property
def time(self) -> Optional[time]:
if isinstance(self._date_or_datetime, datetime):
return self._date_or_datetime.time()
else: # it must be a datetime.date, so there's no time
return None
@property
def local_time(self):
tz = pytz.timezone(self.cal.timezone)
if isinstance(self._date_or_datetime, datetime):
local_datetime = self._date_or_datetime.astimezone(tz)
return local_datetime.time()
else:
return None
@property
def summary(self):
return self._summary
def start_time_cats(self, cat_class) -> Dict[str, str]:
start_time_cats = {}
for cat_type, cat_rules in cat_class.items():
default_group_if_not_specified = "No Group Default Specified"
default_group = default_group_if_not_specified
start_time_cats[cat_type] = default_group
# print(cat_rules)
for cat, ranges_list in cat_rules.items():
if ranges_list == "missing":
if not self.time: # TODO: Make sure no falsy error
start_time_cats[cat_type] = cat
break
continue
if ranges_list == "default":
default_group = cat
break
for _range in ranges_list:
if not self.local_time:
break
start_time = self.local_time
lower_bound_in_hours, upper_bound_in_hours = _range
lower_bound_in_mins = lower_bound_in_hours * 60
upper_bound_in_mins = upper_bound_in_hours * 60
event_time_in_mins = start_time.hour * 60 + start_time.minute
if (lower_bound_in_mins <= event_time_in_mins) and (
event_time_in_mins < upper_bound_in_mins
):
start_time_cats[cat_type] = cat
break # not great, because should really break out of 2 loops
if (
default_group != default_group_if_not_specified
and start_time_cats[cat_type] == default_group_if_not_specified
):
start_time_cats[cat_type] = default_group
return start_time_cats
def display(self, fmt_cfg=None, classification_rules=None):
if fmt_cfg is None:
fmt_cfg = {}
date_fmt = sub_cfg(fmt_cfg, "date_fmt", DEF_DATE_FMT)
time_fmt = sub_cfg(fmt_cfg, "time_fmt", DEF_TIME_FMT)
time_replacements = sub_cfg(fmt_cfg, "time_replacements", None)
schedule_summary_line = sub_cfg(fmt_cfg, "event_summary", None)
grouping_field = sub_cfg(fmt_cfg, "time_group", None)
shift_str_template = sub_cfg(fmt_cfg, "time_group_fmt", None)
start_time_cat_dict = sub_cfg(
classification_rules, "by_start_time", DEF_START_TIME_CAT_DICT
)
if schedule_summary_line is None:
schedule_summary_line = DEF_SUMMARY_LINE
date_str = self.forced_date.strftime(date_fmt)
time_str = self.local_time.strftime(time_fmt) if self.local_time else ""
if time_replacements is not None:
for pre, post in time_replacements.items():
time_str = time_str.replace(pre, post)
if shift_str_template is None:
shift_str_template = DEF_TIME_GROUP_FMT
shift_str = shift_str_template.format(
self.start_time_cats(start_time_cat_dict)[grouping_field]
)
return schedule_summary_line.format(
date_str,
time_str,
shift_str,
self.summary,
)
def __str__(self):
return self.display()
class Schedule:
"""Contain a set of MonitoredEventData objects."""
def __init__(self, cal: Cal):
self.events: Set[MonitoredEventData] = set()
self.cal: Cal = cal
@classmethod
def from_icalendar(
cls,
icalCal: icalendar.cal.Calendar,
cal: Cal,
extra_timedelta_days_for_repeating_events: int = 1,
) -> "Schedule":
"""Initialize a schedule from an .ics file (icalCal).
This is the primary way a Schedule object will be
created in this package.
Because the icalendar package will only return the
first occurence in a repeating event, need to also obtain
a set of event data using the recurring_ics_events package,
and combine the two sets.
"""
new_instance: Schedule = cls(cal=cal)
kerr_count = 0
events_by_icalendar_lookup: Set[MonitoredEventData] = set()
for ical_event in icalCal.subcomponents:
try:
med: MonitoredEventData = MonitoredEventData(
event_date_or_datetime=ical_event["DTSTART"].dt,
summary=ical_event["SUMMARY"],
cal=new_instance.cal,
)
events_by_icalendar_lookup.add(med)
except KeyError:
# ignore timezone from ics file (maybe implement later?)
if not isinstance(ical_event, icalendar.cal.Timezone):
kerr_count = kerr_count + 1
# TODO KeyError may represent difficulty reading Google Calendar
# ics format's iniital TIMEZONE section in ics file. For at least
# one test case, removing that section solved the
# sole encountered KeyError.
if kerr_count > 0:
msg = (
f"{kerr_count} non-TimeZone KeyErrors encountered reading ical"
+ f' for "{cal.cal_id}".\n'
)
sys.stderr.write(msg)
# Get the earliest and laetst dates that are explicitly specified in
# the ics file (ie, not specified by recurrence).
# These will be used when querying for recurrent events.
min_date = min(
[x.forced_date for x in events_by_icalendar_lookup],
default=None,
)
max_date = max(
[x.forced_date for x in events_by_icalendar_lookup],
default=None,
)
# Search for recurrent events that occur a specified # of days
# beyond the latest explicitly-stated event date.
if min_date is None and max_date is None:
new_instance.events = events_by_icalendar_lookup
return new_instance
if min_date is None or max_date is None:
raise ValueError(f"Problem: min_date={min_date}, max_date={max_date}")
max_date += timedelta(days=extra_timedelta_days_for_repeating_events)
events_by_RIE_lookup: Set[MonitoredEventData] = {
MonitoredEventData(
event_date_or_datetime=ical_event["DTSTART"].dt,
summary=ical_event["SUMMARY"],
cal=new_instance.cal,
)
for ical_event in recurring_ical_events.of(icalCal).between(
(min_date.year, min_date.month, min_date.day),
(max_date.year, max_date.month, max_date.day),
)
}
merged_events: Set[MonitoredEventData] = (
events_by_RIE_lookup | events_by_icalendar_lookup
)
new_instance.events = merged_events
return new_instance
def filtered_events(
self,
earliest_date: date = None,
latest_date: date = None,
summary_filters: Optional[List[str]] = None,
) -> List[MonitoredEventData]:
"""Get MonitoredEventData objects filtered by summary and date."""
def meets_filter_criteria(event: MonitoredEventData) -> bool:
return not any(
(
summary_filters
and not any(f in event.summary for f in summary_filters),
earliest_date and event.forced_date < earliest_date,
latest_date and event.forced_date > latest_date,
)
)
if summary_filters is None:
summary_filters = []
return [
event
for event in sorted(self.events, key=lambda x: (x.forced_date, x.summary))
if meets_filter_criteria(event)
]
def display(
self,
earliest_date: date = None,
latest_date: date = None,
summary_filters: Optional[List[str]] = None,
version_date: Optional[date] = None,
fmt_cfg=None,
classification_rules=None,
) -> str:
if summary_filters is None:
summary_filters = []
tz = pytz.timezone(self.cal.timezone)
header = f"\n\nSchedule for {self.cal.name} ({tz})"
if version_date:
header += f" [version {version_date}]:"
header += "\n\n"
body = "\n".join(
[
event.display(fmt_cfg, classification_rules)
for event in self.filtered_events(
earliest_date=earliest_date,
latest_date=latest_date,
summary_filters=summary_filters,
)
]
)
return header + body
def __str__(self):
return self.display()
class ScheduleFeed:
"""Holder for a Cal's .ics URL."""
downloaded_ics_default_filename_pattern = re.compile(
r"""
^(?P<cal_id>.*) # cal_id at the start (any string)
__ # double _ delimiter
(?P<ymd> # to capture concatenated year/month/day
(?P<year>[0-9]{4}) # 4 digit year
(?P<month>[0-9]{2}) # 2 digit month
(?P<day>[0-9]{2}) # 2 digit day of month
) # end capturing of <ymd>
\.ics # suffix
""",
re.VERBOSE,
)
def __init__(self, cal: Cal, url: str):
self.cal = cal
self.url = url
def ics_filename_for_today(self):
f = f"{self.cal.cal_id}__{date.today().strftime('%Y%m%d')}.ics"
return f
def download_latest_schedule_version(self, ics_dir) -> None:
"""Save the current .ics file version of the Cal's schedule."""
try:
req=urllib.request.Request(self.url, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req) as ics_http_response:
ics_text = ics_http_response.read().decode()
except urllib.error.HTTPError as e:
raise Exception(f"Got an HTTP error: url={self.url}. e={e}")
except Exception as e:
print(f"Excepted url={self.url} e={e}")
raise e
with open(
file=Path(ics_dir) / self.ics_filename_for_today(),
mode="w",
encoding="utf-8",
newline="",
) as ics_file:
ics_file.write(ics_text)
# TODO: consider making SC full class
# if we do that, then switch to direct reference to Cal object
# (rather than indirect lookup via Cal.cal_id)
# ? Pros vs Cons ?
class ScheduleChange(NamedTuple):
"""Data to be displayed on a change log report."""
reference_date: date
comparison_date: date
cal_id: str
event_summary: str
event_start: datetime # TODO: ???? clarify naive/local/aware issues
change_type: str # either "a" for addition, or "r" for removal
class ScheduleHistory:
"""Container for multiple versions of .ics file data."""
def __init__(self, cal):
self.cal: Cal = cal
self.sched_versions_by_date: OrderedDict[
date, icalendar.cal.Calendar
] = OrderedDict([])
@classmethod
def from_files_for_cal(cls, cal: Cal, ics_dir, file_pat=None) -> "ScheduleHistory":
"""Instantiate by reading in .ics files for a Cal.
Determination of which ics files correspond to
Cal is made by matching Cal.cal_id to
the id embedded in the filenames, as specified
by the regex found in ScheduleFeed class.
"""
if file_pat is None:
file_pat = ScheduleFeed.downloaded_ics_default_filename_pattern
new_hx = cls(cal)
d = Path(ics_dir)
files_matches = [
(f, file_pat.match(f.name))
for f in d.iterdir()
if (
file_pat.match(f.name)
and file_pat.match(f.name).group("cal_id") == str(cal.cal_id)
)
]
for f, m in sorted(files_matches, key=lambda x: (x[1].group("ymd"))):
yr, mo, day = m.group("year"), m.group("month"), m.group("day")
vers_date = date(int(yr), int(mo), int(day))
new_hx.sched_versions_by_date[vers_date] = cls.get_icalendar_cal(f)
return new_hx
def get_changes_for_date(self, version_date) -> List[ScheduleChange]:
"""Get a cal's schedule changes for a given date.
Get the ScheduleChanges for the Cal referenced by
this ScheduleHistory object, comparing the version
of calendar events for the date given in the
parameter version_date with the next older schedule
for that cal.
"""
i = list(self.sched_versions_by_date.keys()).index(version_date)
ref_date, ref_vers = list(self.sched_versions_by_date.items())[i]
comp_date, comp_vers = list(self.sched_versions_by_date.items())[i - 1]
reference_schedule = Schedule.from_icalendar(
icalCal=ref_vers,
cal=self.cal,
)
comparison_schedule = Schedule.from_icalendar(
icalCal=comp_vers,
cal=self.cal,
)
additions = reference_schedule.events - comparison_schedule.events
removals = comparison_schedule.events - reference_schedule.events
pid = self.cal.cal_id
a = [
ScheduleChange(ref_date, comp_date, pid, x.summary, x.forced_datetime, "a")
for x in additions
]
r = [
ScheduleChange(ref_date, comp_date, pid, x.summary, x.forced_datetime, "r")
for x in removals
]
return a + r
# TODO: consider directly referencing Cal object from ScheduleChange?
# (rather than indirect lookup via Cal.cal_id)
def change_log(self, num_changelogs=None) -> Dict[date, List[ScheduleChange]]:
"""Get a list of ScheduleChanges from multiple version dates.
Compare each schedule version with the immediately preceding
version (except for the very oldest version, for which there
will be nothing available for comparison.) For each schedule
version date, provide a list of the changes.
"""
length = len(list(self.sched_versions_by_date))
if num_changelogs is None:
change_slice = slice(1, length)
else:
change_slice = slice(max(1, length - num_changelogs), length)
return {
date_: self.get_changes_for_date(date_)
for date_ in list(self.sched_versions_by_date.keys())[change_slice]
}
# TODO implement user option for which versions to analyze?
# TODO allow user to specify sorting/grouping
# TODO consider putting in its own class
@classmethod
def change_log_report_for_cals(
cls,
cals: List[Cal],
earliest_date: Optional[date] = None,
latest_date: Optional[date] = None,
summary_filters: Optional[List[str]] = None,
num_changelogs=None,
changelog_action_dict=None,
fmt_cfg=None,
) -> str:
"""Return a filtered/sorted list of changes.
Return a history of changes for multiple
dates/cals, filtering events by a user-specifiable
list of search terms (matched to an event's
summary field), and a user-specifiable date
range.
If no filters are provided, then
no search filter is applied.
"""
# fmt_cfg = {} if fmt_cfg is None else fmt_cfg
date_fmt = sub_cfg(fmt_cfg, "date_fmt", CHANGELOG_DEF_DATE_FMT)
time_fmt = sub_cfg(fmt_cfg, "time_fmt", CHANGELOG_DEF_TIME_FMT)
time_replacements = sub_cfg(
fmt_cfg, "time_replacement", CHANGELOG_DEF_TIME_REPLACEMENTS
)
change_report_record_template = sub_cfg(
fmt_cfg, "change_report", DEF_CHANGE_REPORT_FMT
)
def cal_by_id(cal_id: str) -> Cal:
for p in cals:
if p.cal_id == cal_id:
return p
raise KeyError(f"Did not find id {cal_id}.")
def meets_filter_criteria(c: ScheduleChange) -> bool:
return not any(
(
summary_filters
and not any(f in c.event_summary for f in summary_filters),
earliest_date and c.event_start.date() < earliest_date,
latest_date and c.event_start.date() > latest_date,
)
)
def local_format_dt(
datetime_: datetime,
cal: Cal,
date_fmt: str = CHANGELOG_DEF_DATE_FMT,
time_fmt=CHANGELOG_DEF_TIME_FMT,
time_replacements=None,
) -> str:
if time_replacements is None:
time_replacements = CHANGELOG_DEF_TIME_REPLACEMENTS
tz_datetime = datetime_.astimezone(pytz.timezone(cal.timezone))
date_str = tz_datetime.date().strftime(date_fmt)
time_str = tz_datetime.time().strftime(time_fmt)
if time_replacements is not None:
for pre, post in time_replacements.items():
time_str = time_str.replace(pre, post)
return date_str + time_str
if summary_filters is None:
summary_filters = []
if changelog_action_dict is None:
changelog_action_dict = {"a": "ADD:", "r": "REMOVE:"}
changes_by_ver_date: DefaultDict[date, List[ScheduleChange]] = defaultdict(list)
for p in cals:
for date_, changes in p.schedule_history.change_log(
num_changelogs=num_changelogs,
).items():
changes_by_ver_date[date_] = changes_by_ver_date[date_] + (
[c for c in changes if meets_filter_criteria(c)]
)
report = "\n" # ""
cbvd = sorted(changes_by_ver_date.items(), key=lambda x: x[0])
for version_date, changes in cbvd:
report += f"\n\nUpdates for sched vers dated {str(version_date)}:"
if len(changes) == 0:
report += " NO CHANGES"
report += "\n\n"
for c in sorted(
changes,
key=lambda x: (
x.event_start.year,
x.event_start.month,
x.event_start.day,
cal_by_id(x.cal_id).name,
x.event_summary,
),
):
cal = cal_by_id(c.cal_id)
event_start_str = local_format_dt(
datetime_=c.event_start,
cal=cal,
date_fmt=date_fmt,
time_fmt=time_fmt,
time_replacements=time_replacements,
)
report += change_report_record_template.format(
name=cal.name,
label=changelog_action_dict[c.change_type],
start_str=event_start_str,
summary=c.event_summary,
compare_date=c.comparison_date,
)
return report
def most_recent_version_date_and_ical(
self,
) -> Tuple[date, icalendar.cal.Calendar]:
"""Return most recent available schedule version/version date."""
last_version_index = len(self.sched_versions_by_date) - 1
return list(self.sched_versions_by_date.items())[last_version_index]
@classmethod
def get_icalendar_cal(cls, filepathname) -> icalendar.cal.Calendar:
with open(filepathname, "r", encoding="utf-8") as file_:
c = icalendar.Calendar.from_ical(file_.read())
return c
class ScheduleWriter:
def __init__(
self,
cals: List[Cal],
earliest_date: Optional[date] = None,
latest_date: Optional[date] = None,
summary_filters: Optional[List[str]] = None,
):
self.summary_filters = summary_filters
self.cals = cals
self.events_by_cal_id: Dict[str, List[MonitoredEventData]] = {
cal.cal_id: cal.current_schedule.filtered_events(
earliest_date=earliest_date,
latest_date=latest_date,
summary_filters=summary_filters,
)
for cal in cals
}
event_dates = [
event.forced_date
for cal_id, events in self.events_by_cal_id.items()
for event in events
]
self.earliest_date = earliest_date if earliest_date else min(event_dates)
self.latest_date = latest_date if latest_date else max(event_dates)
def csv_write(
self,
csv_file,
csv_dialect: str = "excel",
include_empty_dates: bool = False,
conversion_table: Dict[str, str] = None,
classification_rules=None,
csv_cfg=None,
):
start_time_cat_dict = sub_cfg(
classification_rules, "by_start_time", None
) # DEF_START_TIME_CAT_DICT
if start_time_cat_dict is None:
print("Quitting- can't find by_start_time confg info.\n")
sys.exit(1)
# https://stackoverflow.com/questions/1060279/iterating-through-a-range-of-dates-in-python
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
conversion_table = {} if conversion_table is None else conversion_table
def convert_if_lookup_found(summary):
return conversion_table[summary] if summary in conversion_table else summary
cat_type = sub_cfg(csv_cfg, "grouping")
if cat_type is None:
print("Quitting- can't find grouping confg info.\n")
sys.exit(1)
all_day_field_name = sub_cfg(csv_cfg, "all_day_category", None)
plists_by_date = OrderedDict([])
for date_ in daterange(self.earliest_date, self.latest_date):
plist = list("" for _ in range(len(self.cals)))
for cal in self.cals:
events = self.events_by_cal_id[cal.cal_id]
index_ = self.cals.index(cal)
cat_range_names = start_time_cat_dict[
cat_type
].keys() # csv_cfg["output"][ "order" ]
event_date_groups = {}
for range_name in cat_range_names:
event_date_groups[range_name] = next(
(
x
for x in events
if x.forced_date == date_
and x.start_time_cats(start_time_cat_dict)[cat_type]
== range_name
),
None,
)
shown_options = sub_cfg(csv_cfg, "order")
if shown_options is None:
print("Quitting- can't find 'order' confg info.\n")
sys.exit(1)
csv_exp_str = sub_cfg(csv_cfg, "format")
if csv_exp_str is None:
print("Quitting- can't find 'format' confg info.\n")
sys.exit(1)
not_found_str = sub_cfg(csv_cfg, "text_if_not_present", "None")
text = (
csv_exp_str.format(
*[
convert_if_lookup_found(
event_date_groups[c].summary # type: ignore
)
if event_date_groups[c]
else not_found_str
for c in shown_options
]
)
if any([event_date_groups[c] for c in shown_options])
else ""
)
# below hack addresses scenario when all-day events need to fill in other shifts
all_day_spec_case = sub_cfg(
csv_cfg, "all_day_behavior_workaround", False
)
if all_day_spec_case:
if all_day_field_name is None:
print(
"You opted for the all-day "
"workaround but no all-day category found in config."
)
all_day_spec_case = False
if all_day_spec_case and event_date_groups[all_day_field_name]:
if not any([event_date_groups[c] for c in shown_options]):
special_event = convert_if_lookup_found(
event_date_groups[all_day_field_name].summary # type: ignore
)
text = csv_exp_str.format(
*([special_event] * len(shown_options))
)
else:
text = csv_exp_str.format(
*[
convert_if_lookup_found(
event_date_groups[c].summary # type: ignore
)
if event_date_groups[c]
else convert_if_lookup_found(
event_date_groups[ # type: ignore
all_day_field_name
].summary
)
for c in shown_options
]
)
plist[index_] = text
if set(plist) != {""} or include_empty_dates:
plists_by_date[date_] = plist
with open(csv_file, "w", newline="", encoding="utf-8") as f:
writer = csv.writer(f, dialect=csv_dialect)
writer.writerow([""] + [p.cal_id for p in self.cals])
for date_, plist in plists_by_date.items():
writer.writerow([date_] + plist)
def sub_cfg(
cfg: Optional[Dict],
sub_key: str,
default_val=None,
noisy: bool = False,
success_msg: str = "Located config sub_key: {0}. Value: {1}.",
no_sub_key_msg: str = "Could not locate config sub_key '{0}'."
"Setting {0} to default value: {1}.",
no_cfg_msg: str = "No config dict to seek sub_key '{0}'."
"Setting {0} to default value: {1}.",
):
if cfg is None:
if noisy:
print(no_cfg_msg.format(sub_key, default_val))
return default_val
else:
try:
if noisy:
print(success_msg.format(sub_key, cfg[sub_key]))
return cfg[sub_key]
except KeyError:
if noisy:
print(no_sub_key_msg.format(sub_key, default_val))
return default_val
def main(
cals_data: List[Tuple[str, str, str, str]],
cals_filter: Optional[List[str]] = None,
ics_dir=DEF_ICS_DIR,
download_option: bool = False,
show_schedule: bool = False,
show_changelog: bool = False,
csv_export_file: str = None,
earliest_date: Optional[date] = None,
latest_date: Optional[date] = None,
summary_filters: Optional[List[str]] = None,
num_changelogs=None, # (for changelogs)
cfg=None,
verbose=0,
) -> None:
output = ""
classification_rules = sub_cfg(cfg, "event_classifications")
fmt_cfg = sub_cfg(cfg, "formatting")
all_cals = [
Cal.from_tuple(cal_tuple=cal_tuple, ics_dir=ics_dir) for cal_tuple in cals_data
]
if cals_filter:
chosen_cals = [p for p in all_cals if p.cal_id in cals_filter]
else:
chosen_cals = all_cals
if download_option:
for p in chosen_cals:
p.download_latest_schedule_version()
if show_changelog:
report = ScheduleHistory.change_log_report_for_cals(
cals=chosen_cals,
earliest_date=earliest_date,
latest_date=latest_date,
summary_filters=summary_filters,
num_changelogs=num_changelogs,
fmt_cfg=sub_cfg(fmt_cfg, "changelog"),
)
output += report
if show_schedule:
for cal in chosen_cals:
schedule, version_date = cal.current_schedule_and_version_date()
schedule_display = schedule.display(
earliest_date=earliest_date,
latest_date=latest_date,
summary_filters=summary_filters,
version_date=version_date,
fmt_cfg=sub_cfg(fmt_cfg, "schedule_view"),
classification_rules=classification_rules,
)
output += schedule_display
if csv_export_file:
csv_cfg = sub_cfg(cfg, "csv")
csv_substitutions = sub_cfg(csv_cfg, "substitutions", {})
writer = ScheduleWriter(
cals=chosen_cals,
earliest_date=earliest_date,
latest_date=latest_date,
summary_filters=summary_filters,
)
empty = sub_cfg(csv_cfg, "include_empty_dates", verbose, False)
writer.csv_write(
conversion_table=csv_substitutions,
csv_file=csv_export_file,
include_empty_dates=empty,
classification_rules=classification_rules,
csv_cfg=csv_cfg,
)
print(output, end="")
| en | 0.78227 | Multipurpose ics util - changelogs, CSVs, schedule viewing. # , tzinfo # type: ignore # type: ignore Cal (or entity) with a schedule specified via .ics format. # TODO: for performance, probably no need to get a whole new # ScheduleHistory (Can instead just add the newly downloaded # schedule to existing schedule history, if available) # clear cache to force new load \ Uh oh! Could not find .ics file for the calendar "{self.name}".\n Are you specifying the correct directory for your ics files? (command line option -d)?\n Did you download the latest ics files (option -g)?\n For help, type 'ionical -h'. Quitting. # TODO More flexible implementation to allow user-specification # of what should be monitored for changes. # TODO Better handle offset-naive vis-a-vis offset-aware dts. Data to be monitored for changes. ics files read by the icalendar and recurreng_ical_events packages produce both datetime.date and datetime.datetime objects. Those objects get stored within MonitoredEventData objects *as they were generated* by the icalendar package. # it must be a datettime.date # it must be a datettime.date # it must be a datetime.date, so there's no time # print(cat_rules) # TODO: Make sure no falsy error # not great, because should really break out of 2 loops Contain a set of MonitoredEventData objects. Initialize a schedule from an .ics file (icalCal). This is the primary way a Schedule object will be created in this package. Because the icalendar package will only return the first occurence in a repeating event, need to also obtain a set of event data using the recurring_ics_events package, and combine the two sets. # ignore timezone from ics file (maybe implement later?) # TODO KeyError may represent difficulty reading Google Calendar # ics format's iniital TIMEZONE section in ics file. For at least # one test case, removing that section solved the # sole encountered KeyError. # Get the earliest and laetst dates that are explicitly specified in # the ics file (ie, not specified by recurrence). # These will be used when querying for recurrent events. # Search for recurrent events that occur a specified # of days # beyond the latest explicitly-stated event date. Get MonitoredEventData objects filtered by summary and date. Holder for a Cal's .ics URL. ^(?P<cal_id>.*) # cal_id at the start (any string) __ # double _ delimiter (?P<ymd> # to capture concatenated year/month/day (?P<year>[0-9]{4}) # 4 digit year (?P<month>[0-9]{2}) # 2 digit month (?P<day>[0-9]{2}) # 2 digit day of month ) # end capturing of <ymd> \.ics # suffix Save the current .ics file version of the Cal's schedule. # TODO: consider making SC full class # if we do that, then switch to direct reference to Cal object # (rather than indirect lookup via Cal.cal_id) # ? Pros vs Cons ? Data to be displayed on a change log report. # TODO: ???? clarify naive/local/aware issues # either "a" for addition, or "r" for removal Container for multiple versions of .ics file data. Instantiate by reading in .ics files for a Cal. Determination of which ics files correspond to Cal is made by matching Cal.cal_id to the id embedded in the filenames, as specified by the regex found in ScheduleFeed class. Get a cal's schedule changes for a given date. Get the ScheduleChanges for the Cal referenced by this ScheduleHistory object, comparing the version of calendar events for the date given in the parameter version_date with the next older schedule for that cal. # TODO: consider directly referencing Cal object from ScheduleChange? # (rather than indirect lookup via Cal.cal_id) Get a list of ScheduleChanges from multiple version dates. Compare each schedule version with the immediately preceding version (except for the very oldest version, for which there will be nothing available for comparison.) For each schedule version date, provide a list of the changes. # TODO implement user option for which versions to analyze? # TODO allow user to specify sorting/grouping # TODO consider putting in its own class Return a filtered/sorted list of changes. Return a history of changes for multiple dates/cals, filtering events by a user-specifiable list of search terms (matched to an event's summary field), and a user-specifiable date range. If no filters are provided, then no search filter is applied. # fmt_cfg = {} if fmt_cfg is None else fmt_cfg # "" Return most recent available schedule version/version date. # DEF_START_TIME_CAT_DICT # https://stackoverflow.com/questions/1060279/iterating-through-a-range-of-dates-in-python # csv_cfg["output"][ "order" ] # type: ignore # below hack addresses scenario when all-day events need to fill in other shifts # type: ignore # type: ignore # type: ignore # (for changelogs) | 2.520388 | 3 |
mvlearn/compose/merge.py | idc9/mvlearn | 0 | 6631236 | """Merging utilities."""
# Copyright 2019 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Authors: <NAME>
import numpy as np
from abc import abstractmethod
from sklearn.base import TransformerMixin
from sklearn.utils.validation import check_is_fitted
from ..utils.utils import check_Xs
class BaseMerger(TransformerMixin):
"""A base class for merging multiview datasets into single view datasets.
The .transform function should return a single dataset.
Parameters
----------
Attributes
----------
See Also
--------
"""
def __init__(self):
pass # pragma: no cover
@abstractmethod
def fit(self, Xs, y=None):
r"""Fit model to multiview data.
Parameters
----------
Xs: list of array-likes
- Xs shape: (n_views,)
- Xs[i] shape: (n_samples, n_features_i)
y : array, shape (n_samples,), optional
Returns
-------
self: returns an instance of self.
"""
return self # pragma: no cover
@abstractmethod
def transform(self, Xs, y=None):
r"""Merge multiview data into a single dataset
Parameters
----------
Xs: list of array-likes
- Xs shape: (n_views,)
- Xs[i] shape: (n_samples, n_features_i)
y : array, shape (n_samples,), optional
Returns
-------
X_transformed : numpy.ndarray of shape (n_samples, n_features)
The singleview output
"""
pass # pragma: no cover
def fit_transform(self, Xs, y=None):
r"""Fit to the data and merge
Parameters
----------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
y : array, shape (n_samples,), optional
Returns
-------
X_transformed : numpy.ndarray of shape (n_samples, n_features)
The singleview output
"""
return self.fit(Xs, y).transform(Xs)
@abstractmethod
def inverse_transform(self, X):
r"""Take a single view dataset and split it into multiple views.
Parameters
----------
X : numpy.ndarray, shape (n_total_features, n_samples)
The input dataset
Returns
-------
Xs : list of numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
"""
pass # pragma: no cover
class ConcatMerger(BaseMerger):
r"""A transformer that stacks features of multiview datasets.
Take a multiview dataset and transform it in a single view dataset
by stacking features.
Attributes
----------
n_features_ : list of ints
The number of features in each view of the input dataset
n_total_features_ : int
The number of features in the dataset, equal to the sum of n_features_
n_views_ : int
The number of views in the dataset
See Also
--------
AverageMerger
"""
def __init__(self):
pass
def fit(self, Xs, y=None):
r"""Fit to the data.
Stores the number of features in each view
Parameters
----------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
y
Ignored
Returns
-------
self : object
Transformer instance.
"""
Xs, n_views, n_samples, n_features = check_Xs(
Xs, return_dimensions=True
)
self.n_features_ = n_features
self.n_total_features_ = sum(self.n_features_)
self.n_views_ = n_views
return self
def transform(self, Xs, y=None):
r"""Merge the data by stacking its features.
The multiple views are transformed into a single view dataset by
stacking (i.e. concatenating) the features.
Parameters
----------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
y
Ignored
Returns
-------
X_transformed : numpy.ndarray of shape (n_total_features, n_samples)
The stacked data, containing all the stacked features.
"""
Xs = check_Xs(Xs)
return np.hstack(Xs)
def inverse_transform(self, X):
r"""Take a single view dataset and split it into multiple views.
The input dimension must match the fitted dimension of the multiview
dataset.
Parameters
----------
X : numpy.ndarray, shape (n_total_features, n_samples)
The input dataset
Returns
-------
Xs : list of numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
The multiview dataset obtained by splitting features of X
"""
check_is_fitted(self)
n_feature = X.shape[1]
if n_feature != self.n_total_features_:
raise ValueError(
"The number of features in the input array ({}) does not match"
" the total number of features in the multiview dataset"
" ({})".format(n_feature, self.n_total_features_)
)
return np.split(X, np.cumsum(self.n_features_)[:-1], axis=1)
class AverageMerger(BaseMerger):
r"""A transformer that computes the mean of multiview datasets
Take a multiview dataset and transform it in a single view dataset
by averaging across views
Attributes
----------
n_feature_ : list of ints
The number of feature in each view of the input dataset
Must be the same for each dataset.
n_views_ : int
The number of views in the dataset
See Also
--------
ConcatMerger
"""
def __init__(self):
pass
def fit(self, Xs, y=None):
r"""Fit to the data.
Stores the number of features in each view, and checks that
each view has the same number of features.
Parameters
----------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
y
Ignored
Returns
-------
self : object
Transformer instance.
"""
Xs = check_Xs(Xs)
n_features_ = [X.shape[1] for X in Xs]
if len(set(n_features_)) > 1:
raise ValueError(
"The number of features in each dataset should be the same."
)
self.n_feature_ = n_features_[0]
self.n_views_ = len(n_features_)
return self
def transform(self, Xs, y=None):
r"""Merge the views by averaging
Transform the multiview dataset into a single view by averaging
the views
Parameters
----------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
y
Ignored
Returns
-------
X_transformed : numpy.ndarray of shape (n_total_features, n_samples)
The average of the views.
"""
Xs = check_Xs(Xs)
return np.mean(Xs, axis=0)
| """Merging utilities."""
# Copyright 2019 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Authors: <NAME>
import numpy as np
from abc import abstractmethod
from sklearn.base import TransformerMixin
from sklearn.utils.validation import check_is_fitted
from ..utils.utils import check_Xs
class BaseMerger(TransformerMixin):
"""A base class for merging multiview datasets into single view datasets.
The .transform function should return a single dataset.
Parameters
----------
Attributes
----------
See Also
--------
"""
def __init__(self):
pass # pragma: no cover
@abstractmethod
def fit(self, Xs, y=None):
r"""Fit model to multiview data.
Parameters
----------
Xs: list of array-likes
- Xs shape: (n_views,)
- Xs[i] shape: (n_samples, n_features_i)
y : array, shape (n_samples,), optional
Returns
-------
self: returns an instance of self.
"""
return self # pragma: no cover
@abstractmethod
def transform(self, Xs, y=None):
r"""Merge multiview data into a single dataset
Parameters
----------
Xs: list of array-likes
- Xs shape: (n_views,)
- Xs[i] shape: (n_samples, n_features_i)
y : array, shape (n_samples,), optional
Returns
-------
X_transformed : numpy.ndarray of shape (n_samples, n_features)
The singleview output
"""
pass # pragma: no cover
def fit_transform(self, Xs, y=None):
r"""Fit to the data and merge
Parameters
----------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
y : array, shape (n_samples,), optional
Returns
-------
X_transformed : numpy.ndarray of shape (n_samples, n_features)
The singleview output
"""
return self.fit(Xs, y).transform(Xs)
@abstractmethod
def inverse_transform(self, X):
r"""Take a single view dataset and split it into multiple views.
Parameters
----------
X : numpy.ndarray, shape (n_total_features, n_samples)
The input dataset
Returns
-------
Xs : list of numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
"""
pass # pragma: no cover
class ConcatMerger(BaseMerger):
r"""A transformer that stacks features of multiview datasets.
Take a multiview dataset and transform it in a single view dataset
by stacking features.
Attributes
----------
n_features_ : list of ints
The number of features in each view of the input dataset
n_total_features_ : int
The number of features in the dataset, equal to the sum of n_features_
n_views_ : int
The number of views in the dataset
See Also
--------
AverageMerger
"""
def __init__(self):
pass
def fit(self, Xs, y=None):
r"""Fit to the data.
Stores the number of features in each view
Parameters
----------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
y
Ignored
Returns
-------
self : object
Transformer instance.
"""
Xs, n_views, n_samples, n_features = check_Xs(
Xs, return_dimensions=True
)
self.n_features_ = n_features
self.n_total_features_ = sum(self.n_features_)
self.n_views_ = n_views
return self
def transform(self, Xs, y=None):
r"""Merge the data by stacking its features.
The multiple views are transformed into a single view dataset by
stacking (i.e. concatenating) the features.
Parameters
----------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
y
Ignored
Returns
-------
X_transformed : numpy.ndarray of shape (n_total_features, n_samples)
The stacked data, containing all the stacked features.
"""
Xs = check_Xs(Xs)
return np.hstack(Xs)
def inverse_transform(self, X):
r"""Take a single view dataset and split it into multiple views.
The input dimension must match the fitted dimension of the multiview
dataset.
Parameters
----------
X : numpy.ndarray, shape (n_total_features, n_samples)
The input dataset
Returns
-------
Xs : list of numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
The multiview dataset obtained by splitting features of X
"""
check_is_fitted(self)
n_feature = X.shape[1]
if n_feature != self.n_total_features_:
raise ValueError(
"The number of features in the input array ({}) does not match"
" the total number of features in the multiview dataset"
" ({})".format(n_feature, self.n_total_features_)
)
return np.split(X, np.cumsum(self.n_features_)[:-1], axis=1)
class AverageMerger(BaseMerger):
r"""A transformer that computes the mean of multiview datasets
Take a multiview dataset and transform it in a single view dataset
by averaging across views
Attributes
----------
n_feature_ : list of ints
The number of feature in each view of the input dataset
Must be the same for each dataset.
n_views_ : int
The number of views in the dataset
See Also
--------
ConcatMerger
"""
def __init__(self):
pass
def fit(self, Xs, y=None):
r"""Fit to the data.
Stores the number of features in each view, and checks that
each view has the same number of features.
Parameters
----------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
y
Ignored
Returns
-------
self : object
Transformer instance.
"""
Xs = check_Xs(Xs)
n_features_ = [X.shape[1] for X in Xs]
if len(set(n_features_)) > 1:
raise ValueError(
"The number of features in each dataset should be the same."
)
self.n_feature_ = n_features_[0]
self.n_views_ = len(n_features_)
return self
def transform(self, Xs, y=None):
r"""Merge the views by averaging
Transform the multiview dataset into a single view by averaging
the views
Parameters
----------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_features_i)
y
Ignored
Returns
-------
X_transformed : numpy.ndarray of shape (n_total_features, n_samples)
The average of the views.
"""
Xs = check_Xs(Xs)
return np.mean(Xs, axis=0)
| en | 0.715219 | Merging utilities. # Copyright 2019 NeuroData (http://neurodata.io) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Authors: <NAME> A base class for merging multiview datasets into single view datasets. The .transform function should return a single dataset. Parameters ---------- Attributes ---------- See Also -------- # pragma: no cover Fit model to multiview data. Parameters ---------- Xs: list of array-likes - Xs shape: (n_views,) - Xs[i] shape: (n_samples, n_features_i) y : array, shape (n_samples,), optional Returns ------- self: returns an instance of self. # pragma: no cover Merge multiview data into a single dataset Parameters ---------- Xs: list of array-likes - Xs shape: (n_views,) - Xs[i] shape: (n_samples, n_features_i) y : array, shape (n_samples,), optional Returns ------- X_transformed : numpy.ndarray of shape (n_samples, n_features) The singleview output # pragma: no cover Fit to the data and merge Parameters ---------- Xs : list of array-likes or numpy.ndarray - Xs length: n_views - Xs[i] shape: (n_samples, n_features_i) y : array, shape (n_samples,), optional Returns ------- X_transformed : numpy.ndarray of shape (n_samples, n_features) The singleview output Take a single view dataset and split it into multiple views. Parameters ---------- X : numpy.ndarray, shape (n_total_features, n_samples) The input dataset Returns ------- Xs : list of numpy.ndarray - Xs length: n_views - Xs[i] shape: (n_samples, n_features_i) # pragma: no cover A transformer that stacks features of multiview datasets. Take a multiview dataset and transform it in a single view dataset by stacking features. Attributes ---------- n_features_ : list of ints The number of features in each view of the input dataset n_total_features_ : int The number of features in the dataset, equal to the sum of n_features_ n_views_ : int The number of views in the dataset See Also -------- AverageMerger Fit to the data. Stores the number of features in each view Parameters ---------- Xs : list of array-likes or numpy.ndarray - Xs length: n_views - Xs[i] shape: (n_samples, n_features_i) y Ignored Returns ------- self : object Transformer instance. Merge the data by stacking its features. The multiple views are transformed into a single view dataset by stacking (i.e. concatenating) the features. Parameters ---------- Xs : list of array-likes or numpy.ndarray - Xs length: n_views - Xs[i] shape: (n_samples, n_features_i) y Ignored Returns ------- X_transformed : numpy.ndarray of shape (n_total_features, n_samples) The stacked data, containing all the stacked features. Take a single view dataset and split it into multiple views. The input dimension must match the fitted dimension of the multiview dataset. Parameters ---------- X : numpy.ndarray, shape (n_total_features, n_samples) The input dataset Returns ------- Xs : list of numpy.ndarray - Xs length: n_views - Xs[i] shape: (n_samples, n_features_i) The multiview dataset obtained by splitting features of X A transformer that computes the mean of multiview datasets Take a multiview dataset and transform it in a single view dataset by averaging across views Attributes ---------- n_feature_ : list of ints The number of feature in each view of the input dataset Must be the same for each dataset. n_views_ : int The number of views in the dataset See Also -------- ConcatMerger Fit to the data. Stores the number of features in each view, and checks that each view has the same number of features. Parameters ---------- Xs : list of array-likes or numpy.ndarray - Xs length: n_views - Xs[i] shape: (n_samples, n_features_i) y Ignored Returns ------- self : object Transformer instance. Merge the views by averaging Transform the multiview dataset into a single view by averaging the views Parameters ---------- Xs : list of array-likes or numpy.ndarray - Xs length: n_views - Xs[i] shape: (n_samples, n_features_i) y Ignored Returns ------- X_transformed : numpy.ndarray of shape (n_total_features, n_samples) The average of the views. | 2.485856 | 2 |
xeasy_ml/xes_ml_arch/src/ml/prediction_ml.py | jiayanduo456/xeasy-ml | 10 | 6631237 | # -*-coding:utf-8-*-
# @version: 0.0.1
# License: MIT
from . import base_ml
import traceback
from ..ml_utils import runstatus
class PredictionML(base_ml.BaseML):
"""
This basic class encapsulates the functions of the prediction part, and you can call the method
of the class to make predictions on the test set.
Parameters
--------
conf : configparser.ConfigParser, default = None
Configuration file for prediction of the test data set.
Examples
--------
>>> from xes_ml_arch.src.ml import prediction_ml
>>> import configparser
>>> import pandas as pd
>>> conf = configparser.ConfigParser()
>>> conf.read("myconfig.conf")
>>> pml = prediction_ml.PredictionML(conf=conf)
>>> data = pd.read_csv("my_data.csv")
>>> pml.set_data(data)
>>> pml.start()
"""
def __init__(self, conf=None,xeasy_log_path = None):
self._test_data = None
super(PredictionML, self).__init__(config=conf, xeasy_log_path = xeasy_log_path)
def start(self):
"""
Start predict data handle.
"""
self.managerlogger.logger.info("start ml predict...")
if runstatus.RunStatus.SUCC == self._predict_handle():
self.managerlogger.logger.info("finished ml predict!")
else:
self.managerlogger.logger.error("ml predict failed!")
def _init_model(self):
"""
Load the trained model.
Returns
-------
:return: bool
True : Succ
False : failed
"""
if not super(PredictionML, self)._init_model():
return False
# load model
if runstatus.RunStatus.FAILED == self._model.load_model():
self.managerlogger.logger.error("load model error")
return False
self.managerlogger.logger.info("successfly load model to predict: %s" % self._model.MODEL_ID)
return True
def _predict_handle(self):
'''
Model predict handle.
Returns
-------
:return: bool
True : Succ
False : failed
'''
try:
self._feature_processor.test_data = self._data
if runstatus.RunStatus.FAILED == self._feature_processor.execute():
self.managerlogger.logger.error("predict feature processor error")
return False
self.managerlogger.logger.info("successfly predict model: %s" % self._model.MODEL_ID)
# get predict result
if runstatus.RunStatus.FAILED == self._get_result():
self.managerlogger.logger.error("predict get result error")
return False
self.managerlogger.logger.info("successfly get result of predict : %s" % self._model.MODEL_ID)
# store result to file
if runstatus.RunStatus.FAILED == self._store_predict_result():
self.managerlogger.logger.error("store predict result error")
return False
self.managerlogger.logger.info("successfly store result of predict : %s" % self._model.MODEL_ID)
return True
except:
self.managerlogger.logger.debug(traceback.format_exc())
self.managerlogger.logger.error("predict handle error")
return False | # -*-coding:utf-8-*-
# @version: 0.0.1
# License: MIT
from . import base_ml
import traceback
from ..ml_utils import runstatus
class PredictionML(base_ml.BaseML):
"""
This basic class encapsulates the functions of the prediction part, and you can call the method
of the class to make predictions on the test set.
Parameters
--------
conf : configparser.ConfigParser, default = None
Configuration file for prediction of the test data set.
Examples
--------
>>> from xes_ml_arch.src.ml import prediction_ml
>>> import configparser
>>> import pandas as pd
>>> conf = configparser.ConfigParser()
>>> conf.read("myconfig.conf")
>>> pml = prediction_ml.PredictionML(conf=conf)
>>> data = pd.read_csv("my_data.csv")
>>> pml.set_data(data)
>>> pml.start()
"""
def __init__(self, conf=None,xeasy_log_path = None):
self._test_data = None
super(PredictionML, self).__init__(config=conf, xeasy_log_path = xeasy_log_path)
def start(self):
"""
Start predict data handle.
"""
self.managerlogger.logger.info("start ml predict...")
if runstatus.RunStatus.SUCC == self._predict_handle():
self.managerlogger.logger.info("finished ml predict!")
else:
self.managerlogger.logger.error("ml predict failed!")
def _init_model(self):
"""
Load the trained model.
Returns
-------
:return: bool
True : Succ
False : failed
"""
if not super(PredictionML, self)._init_model():
return False
# load model
if runstatus.RunStatus.FAILED == self._model.load_model():
self.managerlogger.logger.error("load model error")
return False
self.managerlogger.logger.info("successfly load model to predict: %s" % self._model.MODEL_ID)
return True
def _predict_handle(self):
'''
Model predict handle.
Returns
-------
:return: bool
True : Succ
False : failed
'''
try:
self._feature_processor.test_data = self._data
if runstatus.RunStatus.FAILED == self._feature_processor.execute():
self.managerlogger.logger.error("predict feature processor error")
return False
self.managerlogger.logger.info("successfly predict model: %s" % self._model.MODEL_ID)
# get predict result
if runstatus.RunStatus.FAILED == self._get_result():
self.managerlogger.logger.error("predict get result error")
return False
self.managerlogger.logger.info("successfly get result of predict : %s" % self._model.MODEL_ID)
# store result to file
if runstatus.RunStatus.FAILED == self._store_predict_result():
self.managerlogger.logger.error("store predict result error")
return False
self.managerlogger.logger.info("successfly store result of predict : %s" % self._model.MODEL_ID)
return True
except:
self.managerlogger.logger.debug(traceback.format_exc())
self.managerlogger.logger.error("predict handle error")
return False | en | 0.494464 | # -*-coding:utf-8-*- # @version: 0.0.1 # License: MIT This basic class encapsulates the functions of the prediction part, and you can call the method of the class to make predictions on the test set. Parameters -------- conf : configparser.ConfigParser, default = None Configuration file for prediction of the test data set. Examples -------- >>> from xes_ml_arch.src.ml import prediction_ml >>> import configparser >>> import pandas as pd >>> conf = configparser.ConfigParser() >>> conf.read("myconfig.conf") >>> pml = prediction_ml.PredictionML(conf=conf) >>> data = pd.read_csv("my_data.csv") >>> pml.set_data(data) >>> pml.start() Start predict data handle. Load the trained model. Returns ------- :return: bool True : Succ False : failed # load model Model predict handle. Returns ------- :return: bool True : Succ False : failed # get predict result # store result to file | 3.025934 | 3 |
comspy/user/Server_Chinese.py | SunnyLi1106/Comspy | 1 | 6631238 | <gh_stars>1-10
#!/usr/bin/python3
# 文件名:server.py
# 导入 socket、sys 模块
import socket
import sys
import threading
def server(Maximum_Number_Connections):
# 创建 socket 对象
serversocket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
# 获取本地主机名
host = socket.gethostname()
port = int(input("请输入通信端口号(0~65535):"))
# 绑定端口号
serversocket.bind((host, port))
# 设置最大连接数,超过后排队
serversocket.listen(Maximum_Number_Connections)
while True:
# 建立客户端连接
try:
clientsocket,addr = serversocket.accept()
print("连接成功\n地址: %s" % str(addr))
msg="连接成功\n地址: %s" % str(addr) + "\r"
clientsocket.send(msg.encode('utf-8'))
while True:
msg = input("输入内容:") + "\r"
clientsocket.send(msg.encode('utf-8'))
if msg == "QUIT\r":
print("你中断了本次通信")
clientsocket.close()
exit()
except ConnectionResetError:
print("报出错误ConnectionResetError,可能是远程主机强制关闭现有连接")
exit()
except ConnectionRefusedError:
print("报出错误ConnectionRefusedError,可能是目标计算机积极拒绝")
exit()
except ConnectionAbortedError:
print("报出错误ConnectionAbortedError,可能是主机中的软件中止了一个已建立的连接")
exit()
except BrokenPipeError:
print("报出错误BrokenPipeError")
exit()
def start(MaxNumCon=5):
threading.Thread(target=server(MaxNumCon)).start()
| #!/usr/bin/python3
# 文件名:server.py
# 导入 socket、sys 模块
import socket
import sys
import threading
def server(Maximum_Number_Connections):
# 创建 socket 对象
serversocket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
# 获取本地主机名
host = socket.gethostname()
port = int(input("请输入通信端口号(0~65535):"))
# 绑定端口号
serversocket.bind((host, port))
# 设置最大连接数,超过后排队
serversocket.listen(Maximum_Number_Connections)
while True:
# 建立客户端连接
try:
clientsocket,addr = serversocket.accept()
print("连接成功\n地址: %s" % str(addr))
msg="连接成功\n地址: %s" % str(addr) + "\r"
clientsocket.send(msg.encode('utf-8'))
while True:
msg = input("输入内容:") + "\r"
clientsocket.send(msg.encode('utf-8'))
if msg == "QUIT\r":
print("你中断了本次通信")
clientsocket.close()
exit()
except ConnectionResetError:
print("报出错误ConnectionResetError,可能是远程主机强制关闭现有连接")
exit()
except ConnectionRefusedError:
print("报出错误ConnectionRefusedError,可能是目标计算机积极拒绝")
exit()
except ConnectionAbortedError:
print("报出错误ConnectionAbortedError,可能是主机中的软件中止了一个已建立的连接")
exit()
except BrokenPipeError:
print("报出错误BrokenPipeError")
exit()
def start(MaxNumCon=5):
threading.Thread(target=server(MaxNumCon)).start() | zh | 0.88468 | #!/usr/bin/python3 # 文件名:server.py # 导入 socket、sys 模块 # 创建 socket 对象 # 获取本地主机名 # 绑定端口号 # 设置最大连接数,超过后排队 # 建立客户端连接 | 3.408534 | 3 |
examples/temp.seq.py | carbonscott/pyrotein | 1 | 6631239 | <filename>examples/temp.seq.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
## import sys
## sys.path.insert(0, '/home/scott/Dropbox/codes/pyrotein')
import pyrotein as pr
import os
fl_aln = 'seq.align.fasta'
seq_dict = pr.fasta.read(fl_aln)
tally_dict = pr.fasta.tally_resn_in_seqs(seq_dict)
super_seq = pr.fasta.infer_super_seq(tally_dict)
seq_to_resi_dict = pr.fasta.seq_to_resi(super_seq, 1)
ref = super_seq
pdb = '1f88'
chain = 'A'
entry = f"{pdb}_{chain}"
tar = seq_dict[entry]
seq_diff = pr.fasta.diff_seq(tar, ref)
nterm, cterm = 1, 322
ref_simp = pr.fasta.strip_null(ref)
seq_to_resi_dict = pr.fasta.seq_to_resi(ref_simp, 1)
nseqi = pr.fasta.get_lseqi(tar)
cseqi = pr.fasta.get_rseqi(tar)
tar_simp = tar[nseqi : nseqi + len(ref_simp)]
seq_simp_dict = pr.fasta.seq_to_resi(ref_simp, 1)
seq_simp_diff = pr.fasta.diff_seq(tar_simp, ref_simp)
seq_non_null_list = pr.fasta.seqi_non_null(seq_simp_diff)
# Read coordinates from a PDB file...
fl_pdb = f"{pdb}.pdb"
drc = 'pdb'
pdb_path = os.path.join(drc, fl_pdb)
atoms_pdb = pr.atom.read(pdb_path)
# Create a lookup table for this pdb...
atom_dict = pr.atom.create_lookup_table(atoms_pdb)
| <filename>examples/temp.seq.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
## import sys
## sys.path.insert(0, '/home/scott/Dropbox/codes/pyrotein')
import pyrotein as pr
import os
fl_aln = 'seq.align.fasta'
seq_dict = pr.fasta.read(fl_aln)
tally_dict = pr.fasta.tally_resn_in_seqs(seq_dict)
super_seq = pr.fasta.infer_super_seq(tally_dict)
seq_to_resi_dict = pr.fasta.seq_to_resi(super_seq, 1)
ref = super_seq
pdb = '1f88'
chain = 'A'
entry = f"{pdb}_{chain}"
tar = seq_dict[entry]
seq_diff = pr.fasta.diff_seq(tar, ref)
nterm, cterm = 1, 322
ref_simp = pr.fasta.strip_null(ref)
seq_to_resi_dict = pr.fasta.seq_to_resi(ref_simp, 1)
nseqi = pr.fasta.get_lseqi(tar)
cseqi = pr.fasta.get_rseqi(tar)
tar_simp = tar[nseqi : nseqi + len(ref_simp)]
seq_simp_dict = pr.fasta.seq_to_resi(ref_simp, 1)
seq_simp_diff = pr.fasta.diff_seq(tar_simp, ref_simp)
seq_non_null_list = pr.fasta.seqi_non_null(seq_simp_diff)
# Read coordinates from a PDB file...
fl_pdb = f"{pdb}.pdb"
drc = 'pdb'
pdb_path = os.path.join(drc, fl_pdb)
atoms_pdb = pr.atom.read(pdb_path)
# Create a lookup table for this pdb...
atom_dict = pr.atom.create_lookup_table(atoms_pdb)
| en | 0.420313 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- ## import sys ## sys.path.insert(0, '/home/scott/Dropbox/codes/pyrotein') # Read coordinates from a PDB file... # Create a lookup table for this pdb... | 2.22007 | 2 |
napari_allencell_segmenter/_tests/core/state_test.py | neuromusic/napari-allencell-segmenter | 8 | 6631240 | import pytest
from unittest import mock
from unittest.mock import MagicMock, create_autospec
from napari_allencell_segmenter.core.state import State, SegmenterModel
class TestRouter:
def setup_method(self):
self._state = State()
def test_segmenter_model(self):
# Assert
assert self._state.segmenter_model is not None
assert type(self._state.segmenter_model) == SegmenterModel
| import pytest
from unittest import mock
from unittest.mock import MagicMock, create_autospec
from napari_allencell_segmenter.core.state import State, SegmenterModel
class TestRouter:
def setup_method(self):
self._state = State()
def test_segmenter_model(self):
# Assert
assert self._state.segmenter_model is not None
assert type(self._state.segmenter_model) == SegmenterModel
| none | 1 | 2.396544 | 2 |
|
seqlib/interval.py | kepbod/seqlib | 2 | 6631241 | <filename>seqlib/interval.py
'''
interval.py - Deal with intervals.
author: <NAME> <<EMAIL>>
version: 1.0
'''
# copied and modified from https://github.com/kepbod/interval
import sys
import copy
class Interval(object):
'''
Class: Interval
Maintainer: <NAME>
Version: 1.0
Usage: a = Interval(list)
(nested list: [[x,x,f1...],[x,x,f2...]...] / [[x,x],[x,x]...] or
simple list: [x,x,f1...] / [x,x])
Notes: all the intervals in the list will become mutually exclusive and
be sorted after instantiation.
For example: input: [[1, 10, 'a'], [17, 22, 'b'], [7, 12, 'c'],
[20, 25, 'd'], [30, 35, 'e']]
output: [[1, 12, 'a', 'c'], [17, 25, 'b', 'd'], [30, 35, 'e']]
Attributes: interval
Functions: c = a + b or a += b
c = b + a
c = a * b or a *= b
c = b * a
c = a - b or a -= b
c = b - a
a[n] or a[n:m]
[x, x] in a or [[x, x], [x, x]] not in a
a.complement(sta, end)
a.extractwith(b)
a.extractwithout(b)
mapto(interval, index) -> interval
overlapwith(index, interval) -> index
'''
def __init__(self, interval, instance_flag=0):
self.interval = [[int(i[0]), int(i[1])] + i[2:]
for i in Interval.__convert(interval)]
if not self.interval:
return
if not instance_flag:
self.interval.sort()
tmp = []
a = self.interval[0]
for b in self.interval[1:]:
if a[1] <= b[0]:
tmp.append(a)
a = b
else:
a[1] = b[1] if b[1] > a[1] else a[1]
a.extend(b[2:])
tmp.append(a)
self.interval = tmp
def __len__(self):
'''
Usage: len(c)
length of interval c
'''
return len(self.interval)
def __add__(self, interval):
'''
Usage: c = a + b or a += b
extract union intervals, 'a' should be instance.
'''
tmp = copy.deepcopy(self.interval)
if isinstance(interval, Interval):
tmp.extend(interval.interval)
else:
tmp.extend(Interval.__convert(interval))
return Interval(tmp)
def __radd__(self, interval):
'''
Usage: c = b + a
extract union intervals, 'a' should be instance.
'''
return self.__add__(interval)
def __mul__(self, interval, real_flag=1):
'''
Usage: c = a * b or a *= b
extract intersection intervals, 'a' should be instance.
'''
tmp = []
tmp1 = self.interval
if isinstance(interval, Interval):
tmp2 = interval.interval
else:
tmp2 = Interval(interval).interval
if not tmp1 or not tmp2:
return Interval([])
a, b = tmp1[0], tmp2[0]
i, j = 1, 1
while True:
sta = a[0] if a[0] > b[0] else b[0]
end = a[1] if a[1] < b[1] else b[1]
if sta < end:
if real_flag:
tmp.append([sta, end] + a[2:] + b[2:])
else:
tmp.append(copy.copy(a))
if a[1] == end:
if i == len(tmp1):
break
a = tmp1[i]
i += 1
if b[1] == end:
if j == len(tmp2):
break
b = tmp2[j]
j += 1
return Interval(tmp, 1)
def __rmul__(self, interval):
'''
Usage: c = b * a
extract intersection intervals, 'a' should be instance.
'''
return self.__mul__(interval)
def __sub__(self, interval, real_flag=1):
'''
Usage: c = a - b or a -= b
extract difference intervals, 'a' should be instance.
'''
if not self.interval:
return Interval([])
if isinstance(interval, Interval):
tmp = copy.deepcopy(interval)
else:
tmp = Interval(interval)
if not tmp:
return copy.deepcopy(self)
if self.interval[0][0] < tmp.interval[0][0]:
sta = self.interval[0][0]
else:
sta = tmp.interval[0][0]
if self.interval[-1][1] > tmp.interval[-1][1]:
end = self.interval[-1][1]
else:
end = tmp.interval[-1][1]
tmp.complement(sta, end)
return self.__mul__(tmp, real_flag)
def __rsub__(self, interval):
'''
Usage: c = b - a
extract difference intervals, 'a' should be instance.
'''
if isinstance(interval, Interval):
tmp = copy.deepcopy(interval)
else:
tmp = Interval(interval)
if not self.interval:
return tmp
if not tmp:
return Interval([])
if self.interval[0][0] < tmp.interval[0][0]:
sta = self.interval[0][0]
else:
sta = tmp.interval[0][0]
if self.interval[-1][1] > tmp.interval[-1][1]:
end = self.interval[-1][1]
else:
end = tmp.interval[-1][1]
tmp_a = copy.deepcopy(self)
tmp_a.complement(sta, end)
return Interval.__mul__(tmp, tmp_a)
def __getitem__(self, index):
'''
Usage: a[n] or a[n:m]
intercept index and slice on interval objects.
'''
return self.interval[index]
def __repr__(self):
'''
print objects.
'''
return repr(self.interval)
def __contains__(self, interval):
'''
Usage: [x, x] in a or [[x, x], [x, x]] not in a
judge whether interval is in a or not, 'a' should be instance.
'''
tmp = self.__mul__(interval).interval
if tmp:
return True
else:
return False
def complement(self, sta='#', end='#'):
'''
Usage: a.complement(sta, end)
complement of 'a'.
'''
tmp = []
if sta != '#' and sta < self.interval[0][0]:
tmp.append([sta, self.interval[0][0]])
a = self.interval[0][1]
for item in self.interval[1:]:
b = item[0]
if a != b:
tmp.append([a, b])
a = item[1]
if end != '#' and end > a:
tmp.append([a, end])
self.interval = tmp
def extractwith(self, interval):
'''
Usage: a.extractwith(b)
extract intervals in 'b'.
'''
self.interval = self.__mul__(interval, 0).interval
def extractwithout(self, interval):
'''
Usage: a.extractwithout(b)
extract intervals not in 'b'.
'''
self.interval = self.__sub__(interval, 0).interval
@staticmethod
def split(interval, x, y, flag):
'''
split(interval, x, y, flag) -> interval
split interval based on x and y.
'''
x, y = int(x), int(y)
assert x <= y, '{} is not fewer than {}'.format(x, y)
lst = Interval(Interval.__init(interval))
if flag == 'left':
return lst.__mul__([0, x]).interval
elif flag == 'middle':
return lst.__mul__([x, y]).interval
elif flag == 'right':
return lst.__mul__([y, lst.interval[-1][1]]).interval
else:
sys.exit('flag should be "left", "middle", "right"')
@staticmethod
def mapto(interval, index):
'''
mapto(interval, index) -> interval
Map interval onto index.
'''
tmp1 = Interval.__init(interval)
tmp2 = Interval.__init(index)
return Interval.__map(tmp2, tmp1, flag=1)
@staticmethod
def overlapwith(index, interval):
'''
overlapwith(index, interval) -> index
Overlap index with interval.
'''
tmp1 = Interval.__init(index)
tmp2 = Interval.__init(interval)
return Interval.__map(tmp1, tmp2, flag=0)
@staticmethod
def __convert(interval):
assert type(interval) is list, 'the type is {}'.format(type(interval))
if not interval:
return interval
if type(interval[0]) is list:
return interval
else:
return [interval]
@staticmethod
def __init(interval):
mapping = [[int(i[0]), int(i[1])] + i[2:]
for i in Interval.__convert(interval)]
mapping.sort()
return mapping
@staticmethod
def __map(index, interval, flag):
mapped_fragment = []
tmp_fragment = []
if not interval:
if flag:
return mapped_fragment
else:
return index
for dex in index:
dex_info = dex[2:]
while True:
try:
fragment = interval.pop(0)
except IndexError:
if tmp_fragment:
interval.extend(tmp_fragment)
tmp_fragment = []
continue
else:
if flag:
return mapped_fragment
else:
return index
if fragment[0] >= dex[1]:
interval.insert(0, fragment)
interval[0:0] = tmp_fragment
tmp_fragment = []
break
elif dex[0] < fragment[1] and dex[1] > fragment[0]:
dex += fragment[2:]
sta = dex[0] if dex[0] > fragment[0] else fragment[0]
end = dex[1] if dex[1] < fragment[1] else fragment[1]
new_fragment = [sta, end] + fragment[2:] + dex_info
mapped_fragment.append(new_fragment)
if fragment[1] > dex[1]:
tmp_fragment.append([dex[1],
fragment[1]] + fragment[2:])
else:
if flag:
return mapped_fragment
else:
return index
| <filename>seqlib/interval.py
'''
interval.py - Deal with intervals.
author: <NAME> <<EMAIL>>
version: 1.0
'''
# copied and modified from https://github.com/kepbod/interval
import sys
import copy
class Interval(object):
'''
Class: Interval
Maintainer: <NAME>
Version: 1.0
Usage: a = Interval(list)
(nested list: [[x,x,f1...],[x,x,f2...]...] / [[x,x],[x,x]...] or
simple list: [x,x,f1...] / [x,x])
Notes: all the intervals in the list will become mutually exclusive and
be sorted after instantiation.
For example: input: [[1, 10, 'a'], [17, 22, 'b'], [7, 12, 'c'],
[20, 25, 'd'], [30, 35, 'e']]
output: [[1, 12, 'a', 'c'], [17, 25, 'b', 'd'], [30, 35, 'e']]
Attributes: interval
Functions: c = a + b or a += b
c = b + a
c = a * b or a *= b
c = b * a
c = a - b or a -= b
c = b - a
a[n] or a[n:m]
[x, x] in a or [[x, x], [x, x]] not in a
a.complement(sta, end)
a.extractwith(b)
a.extractwithout(b)
mapto(interval, index) -> interval
overlapwith(index, interval) -> index
'''
def __init__(self, interval, instance_flag=0):
self.interval = [[int(i[0]), int(i[1])] + i[2:]
for i in Interval.__convert(interval)]
if not self.interval:
return
if not instance_flag:
self.interval.sort()
tmp = []
a = self.interval[0]
for b in self.interval[1:]:
if a[1] <= b[0]:
tmp.append(a)
a = b
else:
a[1] = b[1] if b[1] > a[1] else a[1]
a.extend(b[2:])
tmp.append(a)
self.interval = tmp
def __len__(self):
'''
Usage: len(c)
length of interval c
'''
return len(self.interval)
def __add__(self, interval):
'''
Usage: c = a + b or a += b
extract union intervals, 'a' should be instance.
'''
tmp = copy.deepcopy(self.interval)
if isinstance(interval, Interval):
tmp.extend(interval.interval)
else:
tmp.extend(Interval.__convert(interval))
return Interval(tmp)
def __radd__(self, interval):
'''
Usage: c = b + a
extract union intervals, 'a' should be instance.
'''
return self.__add__(interval)
def __mul__(self, interval, real_flag=1):
'''
Usage: c = a * b or a *= b
extract intersection intervals, 'a' should be instance.
'''
tmp = []
tmp1 = self.interval
if isinstance(interval, Interval):
tmp2 = interval.interval
else:
tmp2 = Interval(interval).interval
if not tmp1 or not tmp2:
return Interval([])
a, b = tmp1[0], tmp2[0]
i, j = 1, 1
while True:
sta = a[0] if a[0] > b[0] else b[0]
end = a[1] if a[1] < b[1] else b[1]
if sta < end:
if real_flag:
tmp.append([sta, end] + a[2:] + b[2:])
else:
tmp.append(copy.copy(a))
if a[1] == end:
if i == len(tmp1):
break
a = tmp1[i]
i += 1
if b[1] == end:
if j == len(tmp2):
break
b = tmp2[j]
j += 1
return Interval(tmp, 1)
def __rmul__(self, interval):
'''
Usage: c = b * a
extract intersection intervals, 'a' should be instance.
'''
return self.__mul__(interval)
def __sub__(self, interval, real_flag=1):
'''
Usage: c = a - b or a -= b
extract difference intervals, 'a' should be instance.
'''
if not self.interval:
return Interval([])
if isinstance(interval, Interval):
tmp = copy.deepcopy(interval)
else:
tmp = Interval(interval)
if not tmp:
return copy.deepcopy(self)
if self.interval[0][0] < tmp.interval[0][0]:
sta = self.interval[0][0]
else:
sta = tmp.interval[0][0]
if self.interval[-1][1] > tmp.interval[-1][1]:
end = self.interval[-1][1]
else:
end = tmp.interval[-1][1]
tmp.complement(sta, end)
return self.__mul__(tmp, real_flag)
def __rsub__(self, interval):
'''
Usage: c = b - a
extract difference intervals, 'a' should be instance.
'''
if isinstance(interval, Interval):
tmp = copy.deepcopy(interval)
else:
tmp = Interval(interval)
if not self.interval:
return tmp
if not tmp:
return Interval([])
if self.interval[0][0] < tmp.interval[0][0]:
sta = self.interval[0][0]
else:
sta = tmp.interval[0][0]
if self.interval[-1][1] > tmp.interval[-1][1]:
end = self.interval[-1][1]
else:
end = tmp.interval[-1][1]
tmp_a = copy.deepcopy(self)
tmp_a.complement(sta, end)
return Interval.__mul__(tmp, tmp_a)
def __getitem__(self, index):
'''
Usage: a[n] or a[n:m]
intercept index and slice on interval objects.
'''
return self.interval[index]
def __repr__(self):
'''
print objects.
'''
return repr(self.interval)
def __contains__(self, interval):
'''
Usage: [x, x] in a or [[x, x], [x, x]] not in a
judge whether interval is in a or not, 'a' should be instance.
'''
tmp = self.__mul__(interval).interval
if tmp:
return True
else:
return False
def complement(self, sta='#', end='#'):
'''
Usage: a.complement(sta, end)
complement of 'a'.
'''
tmp = []
if sta != '#' and sta < self.interval[0][0]:
tmp.append([sta, self.interval[0][0]])
a = self.interval[0][1]
for item in self.interval[1:]:
b = item[0]
if a != b:
tmp.append([a, b])
a = item[1]
if end != '#' and end > a:
tmp.append([a, end])
self.interval = tmp
def extractwith(self, interval):
'''
Usage: a.extractwith(b)
extract intervals in 'b'.
'''
self.interval = self.__mul__(interval, 0).interval
def extractwithout(self, interval):
'''
Usage: a.extractwithout(b)
extract intervals not in 'b'.
'''
self.interval = self.__sub__(interval, 0).interval
@staticmethod
def split(interval, x, y, flag):
'''
split(interval, x, y, flag) -> interval
split interval based on x and y.
'''
x, y = int(x), int(y)
assert x <= y, '{} is not fewer than {}'.format(x, y)
lst = Interval(Interval.__init(interval))
if flag == 'left':
return lst.__mul__([0, x]).interval
elif flag == 'middle':
return lst.__mul__([x, y]).interval
elif flag == 'right':
return lst.__mul__([y, lst.interval[-1][1]]).interval
else:
sys.exit('flag should be "left", "middle", "right"')
@staticmethod
def mapto(interval, index):
'''
mapto(interval, index) -> interval
Map interval onto index.
'''
tmp1 = Interval.__init(interval)
tmp2 = Interval.__init(index)
return Interval.__map(tmp2, tmp1, flag=1)
@staticmethod
def overlapwith(index, interval):
'''
overlapwith(index, interval) -> index
Overlap index with interval.
'''
tmp1 = Interval.__init(index)
tmp2 = Interval.__init(interval)
return Interval.__map(tmp1, tmp2, flag=0)
@staticmethod
def __convert(interval):
assert type(interval) is list, 'the type is {}'.format(type(interval))
if not interval:
return interval
if type(interval[0]) is list:
return interval
else:
return [interval]
@staticmethod
def __init(interval):
mapping = [[int(i[0]), int(i[1])] + i[2:]
for i in Interval.__convert(interval)]
mapping.sort()
return mapping
@staticmethod
def __map(index, interval, flag):
mapped_fragment = []
tmp_fragment = []
if not interval:
if flag:
return mapped_fragment
else:
return index
for dex in index:
dex_info = dex[2:]
while True:
try:
fragment = interval.pop(0)
except IndexError:
if tmp_fragment:
interval.extend(tmp_fragment)
tmp_fragment = []
continue
else:
if flag:
return mapped_fragment
else:
return index
if fragment[0] >= dex[1]:
interval.insert(0, fragment)
interval[0:0] = tmp_fragment
tmp_fragment = []
break
elif dex[0] < fragment[1] and dex[1] > fragment[0]:
dex += fragment[2:]
sta = dex[0] if dex[0] > fragment[0] else fragment[0]
end = dex[1] if dex[1] < fragment[1] else fragment[1]
new_fragment = [sta, end] + fragment[2:] + dex_info
mapped_fragment.append(new_fragment)
if fragment[1] > dex[1]:
tmp_fragment.append([dex[1],
fragment[1]] + fragment[2:])
else:
if flag:
return mapped_fragment
else:
return index
| en | 0.679238 | interval.py - Deal with intervals. author: <NAME> <<EMAIL>> version: 1.0 # copied and modified from https://github.com/kepbod/interval Class: Interval Maintainer: <NAME> Version: 1.0 Usage: a = Interval(list) (nested list: [[x,x,f1...],[x,x,f2...]...] / [[x,x],[x,x]...] or simple list: [x,x,f1...] / [x,x]) Notes: all the intervals in the list will become mutually exclusive and be sorted after instantiation. For example: input: [[1, 10, 'a'], [17, 22, 'b'], [7, 12, 'c'], [20, 25, 'd'], [30, 35, 'e']] output: [[1, 12, 'a', 'c'], [17, 25, 'b', 'd'], [30, 35, 'e']] Attributes: interval Functions: c = a + b or a += b c = b + a c = a * b or a *= b c = b * a c = a - b or a -= b c = b - a a[n] or a[n:m] [x, x] in a or [[x, x], [x, x]] not in a a.complement(sta, end) a.extractwith(b) a.extractwithout(b) mapto(interval, index) -> interval overlapwith(index, interval) -> index Usage: len(c) length of interval c Usage: c = a + b or a += b extract union intervals, 'a' should be instance. Usage: c = b + a extract union intervals, 'a' should be instance. Usage: c = a * b or a *= b extract intersection intervals, 'a' should be instance. Usage: c = b * a extract intersection intervals, 'a' should be instance. Usage: c = a - b or a -= b extract difference intervals, 'a' should be instance. Usage: c = b - a extract difference intervals, 'a' should be instance. Usage: a[n] or a[n:m] intercept index and slice on interval objects. print objects. Usage: [x, x] in a or [[x, x], [x, x]] not in a judge whether interval is in a or not, 'a' should be instance. Usage: a.complement(sta, end) complement of 'a'. Usage: a.extractwith(b) extract intervals in 'b'. Usage: a.extractwithout(b) extract intervals not in 'b'. split(interval, x, y, flag) -> interval split interval based on x and y. mapto(interval, index) -> interval Map interval onto index. overlapwith(index, interval) -> index Overlap index with interval. | 3.555102 | 4 |
addon/models.py | flavours/registry-proof-of-concept | 0 | 6631242 | <filename>addon/models.py<gh_stars>0
from addon.fields import NonStrippingTextField
from core.models import UUIDPrimaryKeyMixin
from django.contrib.postgres.fields import JSONField
from django.db import models
from markupfield.fields import MarkupField
from rest_framework.reverse import reverse
class Stack(UUIDPrimaryKeyMixin, models.Model):
identifier = models.SlugField(max_length=30)
def __str__(self):
return f"{self.identifier}"
def get_api_url(self, request=None):
return reverse("stack-detail", args=[self.pk], request=request)
class Addon(UUIDPrimaryKeyMixin, models.Model):
namespace = models.ForeignKey("namespace.Namespace", related_name="addons")
identifier = models.CharField(max_length=255)
description = MarkupField(help_text="in markdown")
def __str__(self):
return f"{self.namespace}/{self.identifier}"
class AddonVersion(UUIDPrimaryKeyMixin, models.Model):
addon = models.ForeignKey("Addon", related_name="addonversions")
identifier = models.CharField(
max_length=255, help_text="`1.0` or `master` or `1.2-beta`"
)
yaml = NonStrippingTextField()
config = JSONField(blank=True, default=dict)
stacks = models.ManyToManyField(
"Stack", help_text="Stacks this tag of the addon supports"
)
def __str__(self):
return f"{self.addon}:{self.identifier}"
| <filename>addon/models.py<gh_stars>0
from addon.fields import NonStrippingTextField
from core.models import UUIDPrimaryKeyMixin
from django.contrib.postgres.fields import JSONField
from django.db import models
from markupfield.fields import MarkupField
from rest_framework.reverse import reverse
class Stack(UUIDPrimaryKeyMixin, models.Model):
identifier = models.SlugField(max_length=30)
def __str__(self):
return f"{self.identifier}"
def get_api_url(self, request=None):
return reverse("stack-detail", args=[self.pk], request=request)
class Addon(UUIDPrimaryKeyMixin, models.Model):
namespace = models.ForeignKey("namespace.Namespace", related_name="addons")
identifier = models.CharField(max_length=255)
description = MarkupField(help_text="in markdown")
def __str__(self):
return f"{self.namespace}/{self.identifier}"
class AddonVersion(UUIDPrimaryKeyMixin, models.Model):
addon = models.ForeignKey("Addon", related_name="addonversions")
identifier = models.CharField(
max_length=255, help_text="`1.0` or `master` or `1.2-beta`"
)
yaml = NonStrippingTextField()
config = JSONField(blank=True, default=dict)
stacks = models.ManyToManyField(
"Stack", help_text="Stacks this tag of the addon supports"
)
def __str__(self):
return f"{self.addon}:{self.identifier}"
| none | 1 | 2.299134 | 2 |
|
Mundo 1 Fundamentos/ex014.py | costa53/curso_em_video_python3 | 1 | 6631243 | <reponame>costa53/curso_em_video_python3
# DESAFIO 014
# Escreva um programa que converta uma temperatura digitada em °C e a converta para °F.
c = float(input('Informe a temperatura em °C: '))
f = (c * 1.8) + 32
print(f'A temperatura de {c}°C corresponde a {f:.1f}°F!')
| # DESAFIO 014
# Escreva um programa que converta uma temperatura digitada em °C e a converta para °F.
c = float(input('Informe a temperatura em °C: '))
f = (c * 1.8) + 32
print(f'A temperatura de {c}°C corresponde a {f:.1f}°F!') | pt | 0.960902 | # DESAFIO 014 # Escreva um programa que converta uma temperatura digitada em °C e a converta para °F. | 4.31714 | 4 |
t3f/ops_test.py | towadroid/t3f | 217 | 6631244 | import numpy as np
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
from t3f.tensor_train import TensorTrain
from t3f.tensor_train_batch import TensorTrainBatch
from t3f import ops
from t3f import shapes
from t3f import initializers
class _TTTensorTest():
def testFullTensor2d(self):
np.random.seed(1)
for rank in [1, 2]:
a = np.random.rand(10, rank).astype(self.dtype.as_numpy_dtype)
b = np.random.rand(rank, 9).astype(self.dtype.as_numpy_dtype)
tt_cores = (a.reshape(1, 10, rank), b.reshape(rank, 9, 1))
desired = np.dot(a, b)
tf_tens = TensorTrain(tt_cores)
actual = self.evaluate(ops.full(tf_tens))
self.assertAllClose(desired, actual)
def testFullTensor3d(self):
np.random.seed(1)
for rank_1 in [1, 2]:
a = np.random.rand(10, rank_1).astype(self.dtype.as_numpy_dtype)
b = np.random.rand(rank_1, 9, 3).astype(self.dtype.as_numpy_dtype)
c = np.random.rand(3, 8).astype(self.dtype.as_numpy_dtype)
tt_cores = (a.reshape(1, 10, rank_1), b, c.reshape((3, 8, 1)))
# Basically do full by hand.
desired = a.dot(b.reshape((rank_1, -1)))
desired = desired.reshape((-1, 3)).dot(c)
desired = desired.reshape(10, 9, 8)
tf_tens = TensorTrain(tt_cores)
actual = self.evaluate(ops.full(tf_tens))
self.assertAllClose(desired, actual)
def testFlatInnerTTTensbyTTTens(self):
# Inner product between two TT-tensors.
shape_list = ((2, 2),
(2, 3, 4),
(4, 2, 5, 2))
rank_list = (1, 2)
for shape in shape_list:
for rank in rank_list:
tt_1 = initializers.random_tensor(shape, tt_rank=rank,
dtype=self.dtype)
tt_2 = initializers.random_tensor(shape, tt_rank=rank,
dtype=self.dtype)
res_actual = ops.flat_inner(tt_1, tt_2)
tt_1_full = tf.reshape(ops.full(tt_1), (1, -1))
tt_2_full = tf.reshape(ops.full(tt_2), (-1, 1))
res_desired = tf.matmul(tt_1_full, tt_2_full)
res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired])
self.assertAllClose(res_actual_val, np.squeeze(res_desired_val),
rtol=1e-5)
def testFlatInnerTTTensbySparseTens(self):
# Inner product between a TT-tensor and a sparse tensor.
shape_list = ((2, 2),
(2, 3, 4),
(4, 2, 5, 2))
rank_list = (1, 2)
np.random.seed(1)
for shape in shape_list:
for rank in rank_list:
for num_elements in [1, 10]:
tt_1 = initializers.random_tensor(shape, tt_rank=rank,
dtype=self.dtype)
sparse_flat_indices = np.random.choice(np.prod(shape), num_elements)
sparse_flat_indices = sparse_flat_indices.astype(int)
sparse_indices = np.unravel_index(sparse_flat_indices, shape)
sparse_indices = np.vstack(sparse_indices).transpose()
values = np.random.randn(num_elements)
values = values.astype(self.dtype.as_numpy_dtype)
sparse_2 = tf.SparseTensor(indices=sparse_indices, values=values,
dense_shape=shape)
res_actual = ops.flat_inner(tt_1, sparse_2)
res_actual_val, tt_1_val = self.evaluate([res_actual, ops.full(tt_1)])
res_desired_val = tt_1_val.flatten()[sparse_flat_indices].dot(values)
self.assertAllClose(res_actual_val, res_desired_val)
def testAdd(self):
# Sum two TT-tensors.
tt_a = initializers.random_tensor((2, 1, 3, 4), tt_rank=2,
dtype=self.dtype)
tt_b = initializers.random_tensor((2, 1, 3, 4), tt_rank=[1, 2, 4, 3, 1],
dtype=self.dtype)
res_actual = ops.full(ops.add(tt_a, tt_b))
res_actual2 = ops.full(tt_a + tt_b)
res_desired = ops.full(tt_a) + ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testMultiply(self):
# Multiply two TT-tensors.
tt_a = initializers.random_tensor((1, 2, 3, 4), tt_rank=2,
dtype=self.dtype)
tt_b = initializers.random_tensor((1, 2, 3, 4), tt_rank=[1, 1, 4, 3, 1],
dtype=self.dtype)
res_actual = ops.full(ops.multiply(tt_a, tt_b))
res_actual2 = ops.full(tt_a * tt_b)
res_desired = ops.full(tt_a) * ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testMultiplyByNumber(self):
# Multiply a tensor by a number.
tt = initializers.random_tensor((1, 2, 3), tt_rank=(1, 2, 3, 1),
dtype=self.dtype)
res_actual = ops.full(ops.multiply(tt, 4))
res_actual2 = ops.full(4.0 * tt)
res_desired = 4.0 * ops.full(tt)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testFrobeniusNormTens(self):
# Frobenius norm of a TT-tensor.
shape_list = ((2, 2),
(2, 3, 4),
(4, 2, 5, 2))
rank_list = (1, 2)
for shape in shape_list:
for rank in rank_list:
tt = initializers.random_tensor(shape, tt_rank=rank,
dtype=self.dtype)
norm_sq_actual = ops.frobenius_norm_squared(tt)
norm_actual = ops.frobenius_norm(tt, epsilon=0.0)
vars = [norm_sq_actual, norm_actual, ops.full(tt)]
norm_sq_actual_val, norm_actual_val, tt_val = self.evaluate(vars)
tt_val = tt_val.flatten()
norm_sq_desired_val = tt_val.dot(tt_val)
norm_desired_val = np.linalg.norm(tt_val)
self.assertAllClose(norm_sq_actual_val, norm_sq_desired_val)
self.assertAllClose(norm_actual_val, norm_desired_val, atol=1e-5,
rtol=1e-5)
def testCastFloat(self):
# Test cast function for float tt-tensors.
tt_x = initializers.random_tensor((2, 3, 2), tt_rank=2)
casted = ops.cast(tt_x, self.dtype)
casted_val = self.evaluate(ops.full(casted))
self.assertEqual(self.dtype, casted.dtype)
self.assertTrue(self.dtype, casted_val.dtype)
def testCastIntFloat(self):
# Tests cast function from int to float for tensors.
np.random.seed(1)
K_1 = np.random.randint(0, high=100, size=(1, 2, 2))
K_2 = np.random.randint(0, high=100, size=(2, 3, 2))
K_3 = np.random.randint(0, high=100, size=(2, 2, 1))
tt_int = TensorTrain([K_1, K_2, K_3], tt_ranks=[1, 2, 2, 1])
casted = ops.cast(tt_int, self.dtype)
casted_val = self.evaluate(ops.full(casted))
self.assertEqual(self.dtype, casted.dtype)
self.assertTrue(self.dtype, casted_val.dtype)
def testCoreRenorm(self):
a = initializers.random_tensor(3 * (10,), tt_rank=7,
dtype=self.dtype)
b = ops.renormalize_tt_cores(a)
var_list = [ops.full(a), ops.full(b)]
af, bf = self.evaluate(var_list)
b_cores = self.evaluate(b.tt_cores)
b_cores_norms = []
for cr in b_cores:
b_cores_norms.append(np.linalg.norm(cr))
self.assertAllClose(af, bf, atol=1e-5, rtol=1e-5)
self.assertAllClose(b_cores_norms, b_cores_norms[0]
* np.ones((len(b_cores))))
class _TTMatrixTest():
def testFullMatrix2d(self):
np.random.seed(1)
for rank in [1, 2]:
a = np.random.rand(2, 3, rank).astype(self.dtype.as_numpy_dtype)
b = np.random.rand(rank, 4, 5).astype(self.dtype.as_numpy_dtype)
tt_cores = (a.reshape(1, 2, 3, rank), b.reshape((rank, 4, 5, 1)))
# Basically do full by hand.
desired = a.reshape((-1, rank)).dot(b.reshape((rank, -1)))
desired = desired.reshape((2, 3, 4, 5))
desired = desired.transpose((0, 2, 1, 3))
desired = desired.reshape((2 * 4, 3 * 5))
tf_mat = TensorTrain(tt_cores)
actual = self.evaluate(ops.full(tf_mat))
self.assertAllClose(desired, actual)
def testFullMatrix3d(self):
np.random.seed(1)
for rank in [1, 2]:
a = np.random.rand(2, 3, rank).astype(self.dtype.as_numpy_dtype)
b = np.random.rand(rank, 4, 5, rank).astype(self.dtype.as_numpy_dtype)
c = np.random.rand(rank, 2, 2).astype(self.dtype.as_numpy_dtype)
tt_cores = (a.reshape(1, 2, 3, rank), b.reshape(rank, 4, 5, rank),
c.reshape(rank, 2, 2, 1))
# Basically do full by hand.
desired = a.reshape((-1, rank)).dot(b.reshape((rank, -1)))
desired = desired.reshape((-1, rank)).dot(c.reshape((rank, -1)))
desired = desired.reshape((2, 3, 4, 5, 2, 2))
desired = desired.transpose((0, 2, 4, 1, 3, 5))
desired = desired.reshape((2 * 4 * 2, 3 * 5 * 2))
tf_mat = TensorTrain(tt_cores)
actual = self.evaluate(ops.full(tf_mat))
self.assertAllClose(desired, actual)
def testTTMatTimesTTMat(self):
# Multiply a TT-matrix by another TT-matrix.
left_shape = (2, 3, 4)
sum_shape = (4, 3, 5)
right_shape = (4, 4, 4)
tt_mat_1 = initializers.random_matrix((left_shape, sum_shape), tt_rank=3,
dtype=self.dtype)
tt_mat_2 = initializers.random_matrix((sum_shape, right_shape),
dtype=self.dtype)
res_actual = ops.matmul(tt_mat_1, tt_mat_2)
res_actual = ops.full(res_actual)
res_desired = tf.matmul(ops.full(tt_mat_1), ops.full(tt_mat_2))
res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired])
# TODO: why so bad accuracy?
self.assertAllClose(res_actual_val, res_desired_val, atol=1e-4, rtol=1e-4)
def testTTMatTimesDenseVec(self):
# Multiply a TT-matrix by a dense vector.
inp_shape = (2, 3, 4)
out_shape = (3, 4, 3)
np.random.seed(1)
vec = np.random.rand(np.prod(inp_shape), 1).astype(self.dtype.as_numpy_dtype)
tf_vec = tf.constant(vec)
tf.compat.v1.set_random_seed(1)
tt_mat = initializers.random_matrix((out_shape, inp_shape),
dtype=self.dtype)
res_actual = ops.matmul(tt_mat, tf_vec)
res_desired = tf.matmul(ops.full(tt_mat), tf_vec)
res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired])
self.assertAllClose(res_actual_val, res_desired_val)
def testDenseMatTimesTTVec(self):
# Multiply a TT-matrix by a dense vector.
inp_shape = (3, 3, 3, 3)
out_shape = (3, 3, 3, 3)
np.random.seed(1)
mat = np.random.rand(np.prod(out_shape), np.prod(inp_shape))
mat = mat.astype(self.dtype.as_numpy_dtype)
tf_mat = tf.constant(mat)
tf.compat.v1.set_random_seed(1)
tt_vec = initializers.random_matrix((inp_shape, None),
dtype=self.dtype)
res_actual = ops.matmul(tf_mat, tt_vec)
res_desired = tf.matmul(tf_mat, ops.full(tt_vec))
res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired])
self.assertAllClose(res_actual_val, res_desired_val, atol=1e-4, rtol=1e-4)
def testFlatInnerTTMatbyTTMat(self):
# Inner product between two TT-Matrices.
shape_list = (((2, 2), (3, 4)),
((2, 3, 4), (2, 2, 2)))
rank_list = (1, 2)
for shape in shape_list:
for rank in rank_list:
tt_1 = initializers.random_matrix(shape, tt_rank=rank,
dtype=self.dtype)
tt_2 = initializers.random_matrix(shape, tt_rank=rank,
dtype=self.dtype)
res_actual = ops.flat_inner(tt_1, tt_2)
tt_1_full = tf.reshape(ops.full(tt_1), (1, -1))
tt_2_full = tf.reshape(ops.full(tt_2), (-1, 1))
res_desired = tf.matmul(tt_1_full, tt_2_full)
res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired])
self.assertAllClose(res_actual_val, np.squeeze(res_desired_val),
rtol=1e-5, atol=1e-5)
def testFlatInnerTTMatbySparseMat(self):
# Inner product between a TT-matrix and a sparse matrix.
shape_list = (((2, 2), (3, 4)),
((2, 3, 4), (2, 2, 2)))
rank_list = (1, 2)
np.random.seed(1)
for tensor_shape in shape_list:
for rank in rank_list:
for num_elements in [1, 9]:
tt_1 = initializers.random_matrix(tensor_shape, tt_rank=rank,
dtype=self.dtype)
matrix_shape = np.prod(tensor_shape[0]), np.prod(tensor_shape[1])
sparse_flat_indices = np.random.choice(np.prod(matrix_shape), num_elements)
sparse_flat_indices = sparse_flat_indices.astype(int)
sparse_indices = np.unravel_index(sparse_flat_indices, matrix_shape)
sparse_indices = np.vstack(sparse_indices).transpose()
values = np.random.randn(num_elements).astype(self.dtype.as_numpy_dtype)
sparse_2 = tf.SparseTensor(indices=sparse_indices, values=values,
dense_shape=matrix_shape)
res_actual = ops.flat_inner(tt_1, sparse_2)
res_actual_val, tt_1_val = self.evaluate([res_actual, ops.full(tt_1)])
res_desired_val = tt_1_val.flatten()[sparse_flat_indices].dot(values)
self.assertAllClose(res_actual_val, res_desired_val)
def testFrobeniusNormMatrix(self):
# Frobenius norm of a TT-matrix.
shape_list = (((2, 2), (3, 4)),
((2, 3, 4), (2, 2, 2)))
rank_list = (1, 2)
for tensor_shape in shape_list:
for rank in rank_list:
tt = initializers.random_matrix(tensor_shape, tt_rank=rank,
dtype=self.dtype)
norm_sq_actual = ops.frobenius_norm_squared(tt)
norm_actual = ops.frobenius_norm(tt)
vars = [norm_sq_actual, norm_actual, ops.full(tt)]
norm_sq_actual_val, norm_actual_val, tt_val = self.evaluate(vars)
tt_val = tt_val.flatten()
norm_sq_desired_val = tt_val.dot(tt_val)
norm_desired_val = np.linalg.norm(tt_val)
self.assertAllClose(norm_sq_actual_val, norm_sq_desired_val)
self.assertAllClose(norm_actual_val, norm_desired_val, atol=1e-5,
rtol=1e-5)
def testTranspose(self):
# Transpose a TT-matrix.
shape_list = (((2, 2), (3, 4)),
((2, 3, 4), (2, 2, 2)))
rank_list = (1, 2)
for tensor_shape in shape_list:
for rank in rank_list:
tt = initializers.random_matrix(tensor_shape, tt_rank=rank,
dtype=self.dtype)
res_actual = ops.full(ops.transpose(tt))
res_actual_val, tt_val = self.evaluate([res_actual, ops.full(tt)])
self.assertAllClose(tt_val.transpose(), res_actual_val)
def testBilinearForm(self):
# Test bilinear form.
shape_list = (((2, 2), (3, 4)),
((2, 3, 4), (2, 2, 2)))
rank_list = (1, 2)
for tensor_shape in shape_list:
for rank in rank_list:
A = initializers.random_matrix(tensor_shape, tt_rank=rank,
dtype=self.dtype)
b = initializers.random_matrix((tensor_shape[0], None), tt_rank=rank,
dtype=self.dtype)
c = initializers.random_matrix((tensor_shape[1], None), tt_rank=rank,
dtype=self.dtype)
res_actual = ops.bilinear_form(A, b, c)
vars = [res_actual, ops.full(A), ops.full(b), ops.full(c)]
res_actual_val, A_val, b_val, c_val = self.evaluate(vars)
res_desired = b_val.T.dot(A_val).dot(c_val)
self.assertAllClose(res_actual_val, np.squeeze(res_desired),
atol=1e-5, rtol=1e-5)
def testBilinearFormBatch(self):
# Test bilinear form for batch of tensors.
shape_list = (((2, 2), (3, 4)),
((2, 3, 4), (2, 2, 2)))
rank_list = (1, 2)
for tensor_shape in shape_list:
for rank in rank_list:
A = initializers.random_matrix(tensor_shape, tt_rank=rank,
dtype=self.dtype)
b = initializers.random_matrix_batch((tensor_shape[0], None),
tt_rank=rank, batch_size=5,
dtype=self.dtype)
c = initializers.random_matrix_batch((tensor_shape[1], None),
tt_rank=rank, batch_size=5,
dtype=self.dtype)
res_actual = ops.bilinear_form(A, b, c)
vars = [res_actual, ops.full(A), ops.full(b), ops.full(c)]
res_actual_val, A_val, b_val, c_val = self.evaluate(vars)
res_desired = np.diag(b_val[:, :, 0].dot(A_val).dot(c_val[:, :, 0].T))
self.assertAllClose(res_actual_val, np.squeeze(res_desired),
atol=1e-5, rtol=1e-5)
def testBilinearFormTwoMat(self):
# Test bilinear_form_two_mat.
shape_list = (((2, 2), (3, 4)),
((2, 3, 4), (2, 2, 2)))
rank_list = (1, 2)
for tensor_shape in shape_list:
for rank in rank_list:
A = initializers.random_matrix(tensor_shape, tt_rank=rank,
dtype=self.dtype)
B = initializers.random_matrix(tensor_shape, tt_rank=rank,
dtype=self.dtype)
B = ops.transpose(B)
x = initializers.random_matrix((tensor_shape[0], None), tt_rank=rank,
dtype=self.dtype)
y = initializers.random_matrix((tensor_shape[0], None), tt_rank=rank,
dtype=self.dtype)
res_actual = ops.bilinear_form_two_mat(x, A, B, y)
vars = [res_actual, ops.full(x), ops.full(A), ops.full(B), ops.full(y)]
res_actual_val, x_val, A_val, B_val, y_val = self.evaluate(vars)
res_desired = x_val.T.dot(A_val).dot(B_val).dot(y_val)
self.assertAllClose(res_actual_val, np.squeeze(res_desired),
atol=1e-5, rtol=1e-5)
def testCastFloat(self):
# Test cast function for float tt-matrices and vectors.
tt_mat = initializers.random_matrix(((2, 3), (3, 2)), tt_rank=2)
tt_vec = initializers.random_matrix(((2, 3), None), tt_rank=2)
for tt in [tt_mat, tt_vec]:
casted = ops.cast(tt, self.dtype)
casted_val = self.evaluate(ops.full(casted))
self.assertEqual(self.dtype, casted.dtype)
self.assertTrue(self.dtype, casted_val.dtype)
def testCastIntFloat(self):
# Tests cast function from int to float for matrices.
np.random.seed(1)
K_1 = np.random.randint(0, high=100, size=(1, 2, 2, 2))
K_2 = np.random.randint(0, high=100, size=(2, 3, 3, 2))
K_3 = np.random.randint(0, high=100, size=(2, 2, 2, 1))
tt_int = TensorTrain([K_1, K_2, K_3], tt_ranks=[1, 2, 2, 1])
casted = ops.cast(tt_int, self.dtype)
casted_val = self.evaluate(ops.full(casted))
self.assertEqual(self.dtype, casted.dtype)
self.assertTrue(self.dtype, casted_val.dtype)
class _TTTensorBatchTest():
def testFullTensor2d(self):
np.random.seed(1)
for rank in [1, 2]:
a = np.random.rand(3, 10, rank).astype(self.dtype.as_numpy_dtype)
b = np.random.rand(3, rank, 9).astype(self.dtype.as_numpy_dtype)
tt_cores = (a.reshape(3, 1, 10, rank), b.reshape(3, rank, 9, 1))
desired = np.einsum('oib,obj->oij', a, b)
tf_tens = TensorTrainBatch(tt_cores)
actual = self.evaluate(ops.full(tf_tens))
self.assertAllClose(desired, actual)
def testFullTensor3d(self):
np.random.seed(1)
for rank_1 in [1, 2]:
a = np.random.rand(3, 10, rank_1).astype(self.dtype.as_numpy_dtype)
b = np.random.rand(3, rank_1, 9, 3).astype(self.dtype.as_numpy_dtype)
c = np.random.rand(3, 3, 8).astype(self.dtype.as_numpy_dtype)
tt_cores = (a.reshape(3, 1, 10, rank_1), b, c.reshape((3, 3, 8, 1)))
# Basically do full by hand.
desired = np.einsum('oia,oajb,obk->oijk', a, b, c)
tf_tens = TensorTrainBatch(tt_cores)
actual = self.evaluate(ops.full(tf_tens))
self.assertAllClose(desired, actual)
def testFlatInnerTTTensbyTTTensSameBatchSize(self):
# Inner product between two batch TT-tensors of the same batch_size.
shape_list = ((2, 2),
(2, 3, 4))
rank_list = (1, 2)
for shape in shape_list:
for rank in rank_list:
tt_1 = initializers.random_tensor_batch(shape, tt_rank=rank,
batch_size=2,
dtype=self.dtype)
tt_2 = initializers.random_tensor_batch(shape, tt_rank=rank,
batch_size=2,
dtype=self.dtype)
res_actual = ops.flat_inner(tt_1, tt_2)
tt_1_full = tf.reshape(ops.full(tt_1), (2, 1, -1))
tt_2_full = tf.reshape(ops.full(tt_2), (2, -1, 1))
res_desired = tf.matmul(tt_1_full, tt_2_full)
res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired])
self.assertAllClose(res_actual_val, np.squeeze(res_desired_val))
def testFlatInnerTTTensbyTTTensBroadcasting(self):
# Inner product between two batch TT-tensors with broadcasting.
tt_1 = initializers.random_tensor_batch((2, 3, 4), batch_size=1,
dtype=self.dtype)
tt_2 = initializers.random_tensor_batch((2, 3, 4), batch_size=3,
dtype=self.dtype)
res_actual_1 = ops.flat_inner(tt_1, tt_2)
res_actual_2 = ops.flat_inner(tt_2, tt_1)
res_desired = tf.einsum('ijk,oijk->o', ops.full(tt_1[0]), ops.full(tt_2))
res = self.evaluate([res_actual_1, res_actual_2, res_desired])
res_actual_1_val, res_actual_2_val, res_desired_val = res
self.assertAllClose(res_actual_1_val, res_desired_val)
self.assertAllClose(res_actual_2_val, res_desired_val)
tt_1 = initializers.random_tensor_batch((2, 3, 4), batch_size=2,
dtype=self.dtype)
with self.assertRaises(ValueError):
# The batch_sizes are different.
ops.flat_inner(tt_1, tt_2)
def testAddSameBatchSize(self):
# Sum two TT-tensors with the same batch size.
tt_a = initializers.random_tensor_batch((2, 1, 4), tt_rank=2, batch_size=3,
dtype=self.dtype)
tt_b = initializers.random_tensor_batch((2, 1, 4), tt_rank=[1, 2, 4, 1],
batch_size=3, dtype=self.dtype)
res_actual = ops.full(ops.add(tt_a, tt_b))
res_actual2 = ops.full(tt_a + tt_b)
res_desired = ops.full(tt_a) + ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testAddBroadcasting(self):
# Sum two TT-tensors with broadcasting.
tt_a = initializers.random_tensor_batch((2, 1, 4), tt_rank=2, batch_size=1,
dtype=self.dtype)
tt_b = initializers.random_tensor_batch((2, 1, 4), tt_rank=[1, 2, 4, 1],
batch_size=3, dtype=self.dtype)
res_actual = ops.full(ops.add(tt_a, tt_b))
res_actual2 = ops.full(tt_b + tt_a)
res_desired = ops.full(tt_a) + ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testMultiplyByNumber(self):
# Multiply batch of tensors by a number.
tt = initializers.random_tensor_batch((1, 2, 3), tt_rank=(1, 2, 3, 1),
batch_size=3, dtype=self.dtype)
res_actual = ops.full(ops.multiply(tt, 4))
res_actual2 = ops.full(4.0 * tt)
res_desired = 4.0 * ops.full(tt)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testFrobeniusNormDifferentiableBatch(self):
tt = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=5,
dtype=self.dtype)
norm_sq_diff = ops.frobenius_norm_squared(tt, differentiable=True)
variables = [norm_sq_diff, ops.full(tt)]
norm_sq_diff_val, tt_full = self.evaluate(variables)
desired_norm = np.linalg.norm(tt_full.reshape((5, -1)), axis=1)**2
self.assertAllClose(norm_sq_diff_val, desired_norm, atol=1e-5, rtol=1e-5)
def testFrobeniusNormTens(self):
# Frobenius norm of a batch of TT-tensors.
tt = initializers.tensor_batch_with_random_cores((2, 2, 3), batch_size=3,
tt_rank=2,
dtype=self.dtype)
norm_sq_actual = ops.frobenius_norm_squared(tt)
norm_actual = ops.frobenius_norm(tt, epsilon=0.0)
vars = [norm_sq_actual, norm_actual, ops.full(tt)]
norm_sq_actual_val, norm_actual_val, tt_val = self.evaluate(vars)
tt_val = tt_val.reshape((3, -1))
norm_sq_desired_val = np.sum(tt_val * tt_val, axis=1)
norm_desired_val = np.sqrt(norm_sq_desired_val)
self.assertAllClose(norm_sq_actual_val, norm_sq_desired_val)
self.assertAllClose(norm_actual_val, norm_desired_val, atol=1e-5,
rtol=1e-5)
def testMultiplyBatchByTensor(self):
tt_a = initializers.random_tensor((3, 3, 3), tt_rank=2, dtype=self.dtype)
tt_b = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=5,
dtype=self.dtype)
res_actual = ops.full(ops.multiply(tt_a, tt_b))
res_actual2 = ops.full(ops.multiply(tt_b, tt_a))
res_desired = ops.full(tt_a) * ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testMultiplyBatchByBatch(self):
tt_a = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=5,
dtype=self.dtype)
tt_b = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=5,
dtype=self.dtype)
res_actual = ops.full(ops.multiply(tt_a, tt_b))
res_actual2 = ops.full(ops.multiply(tt_b, tt_a))
res_desired = ops.full(tt_a) * ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual = ops.full(ops.multiply(tt_a, tt_b))
res_actual2 = ops.full(ops.multiply(tt_b, tt_a))
res_desired = ops.full(tt_a) * ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testMultiplyBroadcasting(self):
tt_a = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=1,
dtype=self.dtype)
tt_b = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=5,
dtype=self.dtype)
res_actual = ops.full(ops.multiply(tt_a, tt_b))
res_actual2 = ops.full(ops.multiply(tt_b, tt_a))
res_desired = ops.full(tt_a) * ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testGatherND(self):
idx = [[0, 0, 0], [0, 1, 2], [0, 1, 0]]
tt = initializers.random_tensor((3, 4, 5), tt_rank=2, dtype=self.dtype)
res_np = ops.gather_nd(tt, idx)
res_desired = tf.gather_nd(ops.full(tt), idx)
to_run = [res_np, res_desired]
res_np_v, des_v = self.evaluate(to_run)
self.assertAllClose(res_np_v, des_v)
def testGatherNDBatch(self):
idx = [[0, 0, 0, 0], [1, 0, 1, 2], [0, 0, 1, 0]]
tt = initializers.random_tensor_batch((3, 4, 5), tt_rank=2, batch_size=2,
dtype=self.dtype)
res_np = ops.gather_nd(tt, idx)
res_desired = tf.gather_nd(ops.full(tt), idx)
to_run = [res_np, res_desired]
res_np_v, des_v = self.evaluate(to_run)
self.assertAllClose(res_np_v, des_v)
def testCoreRenormBatch(self):
a = initializers.random_tensor_batch(3 * (10,), tt_rank=7, batch_size=5,
dtype=self.dtype)
b = ops.renormalize_tt_cores(a)
var_list = [ops.full(a), ops.full(b)]
af, bf = self.evaluate(var_list)
b_cores = self.evaluate(b.tt_cores)
b_cores_norms = []
for cr in b_cores:
b_cores_norms.append(np.linalg.norm(cr))
self.assertAllClose(af, bf, atol=1e-5, rtol=1e-5)
self.assertAllClose(b_cores_norms, b_cores_norms[0]
* np.ones((len(b_cores))))
class _TTMatrixTestBatch():
def testFullMatrix2d(self):
np.random.seed(1)
for rank in [1, 2]:
a = np.random.rand(3, 2, 3, rank).astype(self.dtype.as_numpy_dtype)
b = np.random.rand(3, rank, 4, 5).astype(self.dtype.as_numpy_dtype)
tt_cores = (a.reshape(3, 1, 2, 3, rank), b.reshape((3, rank, 4, 5, 1)))
# Basically do full by hand.
desired = np.einsum('oijb,obkl->oijkl', a, b)
desired = desired.reshape((3, 2, 3, 4, 5))
desired = desired.transpose((0, 1, 3, 2, 4))
desired = desired.reshape((3, 2 * 4, 3 * 5))
tf_mat = TensorTrainBatch(tt_cores)
actual = self.evaluate(ops.full(tf_mat))
self.assertAllClose(desired, actual)
def testFullMatrix3d(self):
np.random.seed(1)
for rank in [1, 2]:
a = np.random.rand(3, 2, 3, rank).astype(self.dtype.as_numpy_dtype)
b = np.random.rand(3, rank, 4, 5, rank).astype(self.dtype.as_numpy_dtype)
c = np.random.rand(3, rank, 2, 2).astype(self.dtype.as_numpy_dtype)
tt_cores = (a.reshape(3, 1, 2, 3, rank), b.reshape(3, rank, 4, 5, rank),
c.reshape(3, rank, 2, 2, 1))
# Basically do full by hand.
desired = np.einsum('oija,oaklb,obpq->oijklpq', a, b, c)
desired = desired.reshape((3, 2, 3, 4, 5, 2, 2))
desired = desired.transpose((0, 1, 3, 5, 2, 4, 6))
desired = desired.reshape((3, 2 * 4 * 2, 3 * 5 * 2))
tf_mat = TensorTrainBatch(tt_cores)
actual = self.evaluate(ops.full(tf_mat))
self.assertAllClose(desired, actual)
def testTTMatTimesTTMatSameBatchSize(self):
# Multiply a batch of TT-matrices by another batch of TT-matrices with the
# same batch sizes.
left_shape = (2, 3)
sum_shape = (4, 3)
right_shape = (4, 4)
tt_mat_1 = initializers.random_matrix_batch((left_shape, sum_shape),
tt_rank=3, batch_size=3,
dtype=self.dtype)
tt_mat_2 = initializers.random_matrix_batch((sum_shape, right_shape),
batch_size=3,
dtype=self.dtype)
res_actual = ops.matmul(tt_mat_1, tt_mat_2)
res_actual = ops.full(res_actual)
res_desired = tf.matmul(ops.full(tt_mat_1), ops.full(tt_mat_2))
res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired])
# TODO: why so bad accuracy?
self.assertAllClose(res_actual_val, res_desired_val, atol=1e-5, rtol=1e-5)
def testTTMatTimesTTMatBroadcasting(self):
# Multiply a batch of TT-matrices by another batch of TT-matrices with
# broadcasting.
left_shape = (2, 3)
sum_shape = (4, 3)
right_shape = (4, 4)
tt_mat_1 = initializers.random_matrix_batch((left_shape, sum_shape),
tt_rank=3, batch_size=3,
dtype=self.dtype)
tt_mat_2 = initializers.random_matrix_batch((sum_shape, right_shape),
dtype=self.dtype)
# TT-batch by one element TT-batch
res_actual = ops.matmul(tt_mat_1, tt_mat_2)
res_actual = ops.full(res_actual)
# TT by TT-batch.
res_actual2 = ops.matmul(ops.transpose(tt_mat_2[0]), ops.transpose(tt_mat_1))
res_actual2 = ops.full(ops.transpose(res_actual2))
res_desired = tf.einsum('oij,jk->oik', ops.full(tt_mat_1),
ops.full(tt_mat_2[0]))
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val, atol=1e-5, rtol=1e-5)
self.assertAllClose(res_actual2_val, res_desired_val, atol=1e-5,
rtol=1e-5)
def testTranspose(self):
# Transpose a batch of TT-matrices.
tt = initializers.random_matrix_batch(((2, 3, 4), (2, 2, 2)),
batch_size=2, dtype=self.dtype)
res_actual = ops.full(ops.transpose(tt))
res_actual_val, tt_val = self.evaluate([res_actual, ops.full(tt)])
self.assertAllClose(tt_val.transpose((0, 2, 1)), res_actual_val)
def testAddSameBatchSize(self):
# Sum two TT-matrices with the same batch size.
tt_a = initializers.random_matrix_batch(((2, 1, 4), None), tt_rank=2,
batch_size=3, dtype=self.dtype)
tt_b = initializers.random_matrix_batch(((2, 1, 4), None),
tt_rank=[1, 2, 4, 1], batch_size=3,
dtype=self.dtype)
res_actual = ops.full(ops.add(tt_a, tt_b))
res_actual2 = ops.full(tt_a + tt_b)
res_desired = ops.full(tt_a) + ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testAddBroadcasting(self):
# Sum two TT-matrices with broadcasting.
tt_a = initializers.random_matrix_batch(((2, 1, 4), (2, 2, 2)), tt_rank=2,
batch_size=3, dtype=self.dtype)
tt_b = initializers.random_matrix_batch(((2, 1, 4), (2, 2, 2)),
tt_rank=[1, 2, 4, 1], batch_size=1,
dtype=self.dtype)
res_actual = ops.full(ops.add(tt_a, tt_b))
res_actual2 = ops.full(tt_b + tt_a)
res_desired = ops.full(tt_a) + ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testCastFloat(self):
# Test cast function for float tt-matrices and vectors.
tt_mat = initializers.random_matrix_batch(((2, 3), (3, 2)), tt_rank=2,
batch_size=3)
casted = ops.cast(tt_mat, self.dtype)
casted_val = self.evaluate(ops.full(casted))
self.assertEqual(self.dtype, casted.dtype)
self.assertTrue(self.dtype, casted_val.dtype)
def testCastIntFloat(self):
# Tests cast function from int to float for matrices.
np.random.seed(1)
K_1 = np.random.randint(0, high=100, size=(1, 2, 2, 2))
K_2 = np.random.randint(0, high=100, size=(2, 3, 3, 2))
K_3 = np.random.randint(0, high=100, size=(2, 2, 2, 1))
tt_int = TensorTrain([K_1, K_2, K_3], tt_ranks=[1, 2, 2, 1])
tt_int_batch = shapes.expand_batch_dim(tt_int)
casted = ops.cast(tt_int_batch, self.dtype)
casted_val = self.evaluate(ops.full(casted))
self.assertEqual(self.dtype, casted.dtype)
self.assertTrue(self.dtype, casted_val.dtype)
def _random_sparse(shape, non_zeros):
sparse_flat_indices = np.random.choice(np.prod(shape), non_zeros).astype(int)
sparse_indices = np.unravel_index(sparse_flat_indices, shape)
sparse_indices = np.vstack(sparse_indices).transpose()
values = np.random.randn(non_zeros).astype(self.dtype.as_numpy_dtype)
sparse = tf.SparseTensor(indices=sparse_indices, values=values,
dense_shape=shape)
return sparse
class TTTensorTestFloat32(tf.test.TestCase, _TTTensorTest):
dtype = tf.float32
class TTTensorTestFloat64(tf.test.TestCase, _TTTensorTest):
dtype = tf.float64
class TTMatrixTestFloat32(tf.test.TestCase, _TTMatrixTest):
dtype = tf.float32
class TTMatrixTestFloat64(tf.test.TestCase, _TTMatrixTest):
dtype = tf.float64
class TTTensorBatchTestFloat32(tf.test.TestCase, _TTTensorBatchTest):
dtype = tf.float32
class TTTensorBatchTestFloat64(tf.test.TestCase, _TTTensorBatchTest):
dtype = tf.float64
class TTMatrixTestBatchFloat32(tf.test.TestCase, _TTMatrixTestBatch):
dtype = tf.float32
class TTMatrixTestBatchFloat64(tf.test.TestCase, _TTMatrixTestBatch):
dtype = tf.float64
if __name__ == "__main__":
tf.test.main()
| import numpy as np
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
from t3f.tensor_train import TensorTrain
from t3f.tensor_train_batch import TensorTrainBatch
from t3f import ops
from t3f import shapes
from t3f import initializers
class _TTTensorTest():
def testFullTensor2d(self):
np.random.seed(1)
for rank in [1, 2]:
a = np.random.rand(10, rank).astype(self.dtype.as_numpy_dtype)
b = np.random.rand(rank, 9).astype(self.dtype.as_numpy_dtype)
tt_cores = (a.reshape(1, 10, rank), b.reshape(rank, 9, 1))
desired = np.dot(a, b)
tf_tens = TensorTrain(tt_cores)
actual = self.evaluate(ops.full(tf_tens))
self.assertAllClose(desired, actual)
def testFullTensor3d(self):
np.random.seed(1)
for rank_1 in [1, 2]:
a = np.random.rand(10, rank_1).astype(self.dtype.as_numpy_dtype)
b = np.random.rand(rank_1, 9, 3).astype(self.dtype.as_numpy_dtype)
c = np.random.rand(3, 8).astype(self.dtype.as_numpy_dtype)
tt_cores = (a.reshape(1, 10, rank_1), b, c.reshape((3, 8, 1)))
# Basically do full by hand.
desired = a.dot(b.reshape((rank_1, -1)))
desired = desired.reshape((-1, 3)).dot(c)
desired = desired.reshape(10, 9, 8)
tf_tens = TensorTrain(tt_cores)
actual = self.evaluate(ops.full(tf_tens))
self.assertAllClose(desired, actual)
def testFlatInnerTTTensbyTTTens(self):
# Inner product between two TT-tensors.
shape_list = ((2, 2),
(2, 3, 4),
(4, 2, 5, 2))
rank_list = (1, 2)
for shape in shape_list:
for rank in rank_list:
tt_1 = initializers.random_tensor(shape, tt_rank=rank,
dtype=self.dtype)
tt_2 = initializers.random_tensor(shape, tt_rank=rank,
dtype=self.dtype)
res_actual = ops.flat_inner(tt_1, tt_2)
tt_1_full = tf.reshape(ops.full(tt_1), (1, -1))
tt_2_full = tf.reshape(ops.full(tt_2), (-1, 1))
res_desired = tf.matmul(tt_1_full, tt_2_full)
res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired])
self.assertAllClose(res_actual_val, np.squeeze(res_desired_val),
rtol=1e-5)
def testFlatInnerTTTensbySparseTens(self):
# Inner product between a TT-tensor and a sparse tensor.
shape_list = ((2, 2),
(2, 3, 4),
(4, 2, 5, 2))
rank_list = (1, 2)
np.random.seed(1)
for shape in shape_list:
for rank in rank_list:
for num_elements in [1, 10]:
tt_1 = initializers.random_tensor(shape, tt_rank=rank,
dtype=self.dtype)
sparse_flat_indices = np.random.choice(np.prod(shape), num_elements)
sparse_flat_indices = sparse_flat_indices.astype(int)
sparse_indices = np.unravel_index(sparse_flat_indices, shape)
sparse_indices = np.vstack(sparse_indices).transpose()
values = np.random.randn(num_elements)
values = values.astype(self.dtype.as_numpy_dtype)
sparse_2 = tf.SparseTensor(indices=sparse_indices, values=values,
dense_shape=shape)
res_actual = ops.flat_inner(tt_1, sparse_2)
res_actual_val, tt_1_val = self.evaluate([res_actual, ops.full(tt_1)])
res_desired_val = tt_1_val.flatten()[sparse_flat_indices].dot(values)
self.assertAllClose(res_actual_val, res_desired_val)
def testAdd(self):
# Sum two TT-tensors.
tt_a = initializers.random_tensor((2, 1, 3, 4), tt_rank=2,
dtype=self.dtype)
tt_b = initializers.random_tensor((2, 1, 3, 4), tt_rank=[1, 2, 4, 3, 1],
dtype=self.dtype)
res_actual = ops.full(ops.add(tt_a, tt_b))
res_actual2 = ops.full(tt_a + tt_b)
res_desired = ops.full(tt_a) + ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testMultiply(self):
# Multiply two TT-tensors.
tt_a = initializers.random_tensor((1, 2, 3, 4), tt_rank=2,
dtype=self.dtype)
tt_b = initializers.random_tensor((1, 2, 3, 4), tt_rank=[1, 1, 4, 3, 1],
dtype=self.dtype)
res_actual = ops.full(ops.multiply(tt_a, tt_b))
res_actual2 = ops.full(tt_a * tt_b)
res_desired = ops.full(tt_a) * ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testMultiplyByNumber(self):
# Multiply a tensor by a number.
tt = initializers.random_tensor((1, 2, 3), tt_rank=(1, 2, 3, 1),
dtype=self.dtype)
res_actual = ops.full(ops.multiply(tt, 4))
res_actual2 = ops.full(4.0 * tt)
res_desired = 4.0 * ops.full(tt)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testFrobeniusNormTens(self):
# Frobenius norm of a TT-tensor.
shape_list = ((2, 2),
(2, 3, 4),
(4, 2, 5, 2))
rank_list = (1, 2)
for shape in shape_list:
for rank in rank_list:
tt = initializers.random_tensor(shape, tt_rank=rank,
dtype=self.dtype)
norm_sq_actual = ops.frobenius_norm_squared(tt)
norm_actual = ops.frobenius_norm(tt, epsilon=0.0)
vars = [norm_sq_actual, norm_actual, ops.full(tt)]
norm_sq_actual_val, norm_actual_val, tt_val = self.evaluate(vars)
tt_val = tt_val.flatten()
norm_sq_desired_val = tt_val.dot(tt_val)
norm_desired_val = np.linalg.norm(tt_val)
self.assertAllClose(norm_sq_actual_val, norm_sq_desired_val)
self.assertAllClose(norm_actual_val, norm_desired_val, atol=1e-5,
rtol=1e-5)
def testCastFloat(self):
# Test cast function for float tt-tensors.
tt_x = initializers.random_tensor((2, 3, 2), tt_rank=2)
casted = ops.cast(tt_x, self.dtype)
casted_val = self.evaluate(ops.full(casted))
self.assertEqual(self.dtype, casted.dtype)
self.assertTrue(self.dtype, casted_val.dtype)
def testCastIntFloat(self):
# Tests cast function from int to float for tensors.
np.random.seed(1)
K_1 = np.random.randint(0, high=100, size=(1, 2, 2))
K_2 = np.random.randint(0, high=100, size=(2, 3, 2))
K_3 = np.random.randint(0, high=100, size=(2, 2, 1))
tt_int = TensorTrain([K_1, K_2, K_3], tt_ranks=[1, 2, 2, 1])
casted = ops.cast(tt_int, self.dtype)
casted_val = self.evaluate(ops.full(casted))
self.assertEqual(self.dtype, casted.dtype)
self.assertTrue(self.dtype, casted_val.dtype)
def testCoreRenorm(self):
a = initializers.random_tensor(3 * (10,), tt_rank=7,
dtype=self.dtype)
b = ops.renormalize_tt_cores(a)
var_list = [ops.full(a), ops.full(b)]
af, bf = self.evaluate(var_list)
b_cores = self.evaluate(b.tt_cores)
b_cores_norms = []
for cr in b_cores:
b_cores_norms.append(np.linalg.norm(cr))
self.assertAllClose(af, bf, atol=1e-5, rtol=1e-5)
self.assertAllClose(b_cores_norms, b_cores_norms[0]
* np.ones((len(b_cores))))
class _TTMatrixTest():
def testFullMatrix2d(self):
np.random.seed(1)
for rank in [1, 2]:
a = np.random.rand(2, 3, rank).astype(self.dtype.as_numpy_dtype)
b = np.random.rand(rank, 4, 5).astype(self.dtype.as_numpy_dtype)
tt_cores = (a.reshape(1, 2, 3, rank), b.reshape((rank, 4, 5, 1)))
# Basically do full by hand.
desired = a.reshape((-1, rank)).dot(b.reshape((rank, -1)))
desired = desired.reshape((2, 3, 4, 5))
desired = desired.transpose((0, 2, 1, 3))
desired = desired.reshape((2 * 4, 3 * 5))
tf_mat = TensorTrain(tt_cores)
actual = self.evaluate(ops.full(tf_mat))
self.assertAllClose(desired, actual)
def testFullMatrix3d(self):
np.random.seed(1)
for rank in [1, 2]:
a = np.random.rand(2, 3, rank).astype(self.dtype.as_numpy_dtype)
b = np.random.rand(rank, 4, 5, rank).astype(self.dtype.as_numpy_dtype)
c = np.random.rand(rank, 2, 2).astype(self.dtype.as_numpy_dtype)
tt_cores = (a.reshape(1, 2, 3, rank), b.reshape(rank, 4, 5, rank),
c.reshape(rank, 2, 2, 1))
# Basically do full by hand.
desired = a.reshape((-1, rank)).dot(b.reshape((rank, -1)))
desired = desired.reshape((-1, rank)).dot(c.reshape((rank, -1)))
desired = desired.reshape((2, 3, 4, 5, 2, 2))
desired = desired.transpose((0, 2, 4, 1, 3, 5))
desired = desired.reshape((2 * 4 * 2, 3 * 5 * 2))
tf_mat = TensorTrain(tt_cores)
actual = self.evaluate(ops.full(tf_mat))
self.assertAllClose(desired, actual)
def testTTMatTimesTTMat(self):
# Multiply a TT-matrix by another TT-matrix.
left_shape = (2, 3, 4)
sum_shape = (4, 3, 5)
right_shape = (4, 4, 4)
tt_mat_1 = initializers.random_matrix((left_shape, sum_shape), tt_rank=3,
dtype=self.dtype)
tt_mat_2 = initializers.random_matrix((sum_shape, right_shape),
dtype=self.dtype)
res_actual = ops.matmul(tt_mat_1, tt_mat_2)
res_actual = ops.full(res_actual)
res_desired = tf.matmul(ops.full(tt_mat_1), ops.full(tt_mat_2))
res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired])
# TODO: why so bad accuracy?
self.assertAllClose(res_actual_val, res_desired_val, atol=1e-4, rtol=1e-4)
def testTTMatTimesDenseVec(self):
# Multiply a TT-matrix by a dense vector.
inp_shape = (2, 3, 4)
out_shape = (3, 4, 3)
np.random.seed(1)
vec = np.random.rand(np.prod(inp_shape), 1).astype(self.dtype.as_numpy_dtype)
tf_vec = tf.constant(vec)
tf.compat.v1.set_random_seed(1)
tt_mat = initializers.random_matrix((out_shape, inp_shape),
dtype=self.dtype)
res_actual = ops.matmul(tt_mat, tf_vec)
res_desired = tf.matmul(ops.full(tt_mat), tf_vec)
res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired])
self.assertAllClose(res_actual_val, res_desired_val)
def testDenseMatTimesTTVec(self):
# Multiply a TT-matrix by a dense vector.
inp_shape = (3, 3, 3, 3)
out_shape = (3, 3, 3, 3)
np.random.seed(1)
mat = np.random.rand(np.prod(out_shape), np.prod(inp_shape))
mat = mat.astype(self.dtype.as_numpy_dtype)
tf_mat = tf.constant(mat)
tf.compat.v1.set_random_seed(1)
tt_vec = initializers.random_matrix((inp_shape, None),
dtype=self.dtype)
res_actual = ops.matmul(tf_mat, tt_vec)
res_desired = tf.matmul(tf_mat, ops.full(tt_vec))
res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired])
self.assertAllClose(res_actual_val, res_desired_val, atol=1e-4, rtol=1e-4)
def testFlatInnerTTMatbyTTMat(self):
# Inner product between two TT-Matrices.
shape_list = (((2, 2), (3, 4)),
((2, 3, 4), (2, 2, 2)))
rank_list = (1, 2)
for shape in shape_list:
for rank in rank_list:
tt_1 = initializers.random_matrix(shape, tt_rank=rank,
dtype=self.dtype)
tt_2 = initializers.random_matrix(shape, tt_rank=rank,
dtype=self.dtype)
res_actual = ops.flat_inner(tt_1, tt_2)
tt_1_full = tf.reshape(ops.full(tt_1), (1, -1))
tt_2_full = tf.reshape(ops.full(tt_2), (-1, 1))
res_desired = tf.matmul(tt_1_full, tt_2_full)
res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired])
self.assertAllClose(res_actual_val, np.squeeze(res_desired_val),
rtol=1e-5, atol=1e-5)
def testFlatInnerTTMatbySparseMat(self):
# Inner product between a TT-matrix and a sparse matrix.
shape_list = (((2, 2), (3, 4)),
((2, 3, 4), (2, 2, 2)))
rank_list = (1, 2)
np.random.seed(1)
for tensor_shape in shape_list:
for rank in rank_list:
for num_elements in [1, 9]:
tt_1 = initializers.random_matrix(tensor_shape, tt_rank=rank,
dtype=self.dtype)
matrix_shape = np.prod(tensor_shape[0]), np.prod(tensor_shape[1])
sparse_flat_indices = np.random.choice(np.prod(matrix_shape), num_elements)
sparse_flat_indices = sparse_flat_indices.astype(int)
sparse_indices = np.unravel_index(sparse_flat_indices, matrix_shape)
sparse_indices = np.vstack(sparse_indices).transpose()
values = np.random.randn(num_elements).astype(self.dtype.as_numpy_dtype)
sparse_2 = tf.SparseTensor(indices=sparse_indices, values=values,
dense_shape=matrix_shape)
res_actual = ops.flat_inner(tt_1, sparse_2)
res_actual_val, tt_1_val = self.evaluate([res_actual, ops.full(tt_1)])
res_desired_val = tt_1_val.flatten()[sparse_flat_indices].dot(values)
self.assertAllClose(res_actual_val, res_desired_val)
def testFrobeniusNormMatrix(self):
# Frobenius norm of a TT-matrix.
shape_list = (((2, 2), (3, 4)),
((2, 3, 4), (2, 2, 2)))
rank_list = (1, 2)
for tensor_shape in shape_list:
for rank in rank_list:
tt = initializers.random_matrix(tensor_shape, tt_rank=rank,
dtype=self.dtype)
norm_sq_actual = ops.frobenius_norm_squared(tt)
norm_actual = ops.frobenius_norm(tt)
vars = [norm_sq_actual, norm_actual, ops.full(tt)]
norm_sq_actual_val, norm_actual_val, tt_val = self.evaluate(vars)
tt_val = tt_val.flatten()
norm_sq_desired_val = tt_val.dot(tt_val)
norm_desired_val = np.linalg.norm(tt_val)
self.assertAllClose(norm_sq_actual_val, norm_sq_desired_val)
self.assertAllClose(norm_actual_val, norm_desired_val, atol=1e-5,
rtol=1e-5)
def testTranspose(self):
# Transpose a TT-matrix.
shape_list = (((2, 2), (3, 4)),
((2, 3, 4), (2, 2, 2)))
rank_list = (1, 2)
for tensor_shape in shape_list:
for rank in rank_list:
tt = initializers.random_matrix(tensor_shape, tt_rank=rank,
dtype=self.dtype)
res_actual = ops.full(ops.transpose(tt))
res_actual_val, tt_val = self.evaluate([res_actual, ops.full(tt)])
self.assertAllClose(tt_val.transpose(), res_actual_val)
def testBilinearForm(self):
# Test bilinear form.
shape_list = (((2, 2), (3, 4)),
((2, 3, 4), (2, 2, 2)))
rank_list = (1, 2)
for tensor_shape in shape_list:
for rank in rank_list:
A = initializers.random_matrix(tensor_shape, tt_rank=rank,
dtype=self.dtype)
b = initializers.random_matrix((tensor_shape[0], None), tt_rank=rank,
dtype=self.dtype)
c = initializers.random_matrix((tensor_shape[1], None), tt_rank=rank,
dtype=self.dtype)
res_actual = ops.bilinear_form(A, b, c)
vars = [res_actual, ops.full(A), ops.full(b), ops.full(c)]
res_actual_val, A_val, b_val, c_val = self.evaluate(vars)
res_desired = b_val.T.dot(A_val).dot(c_val)
self.assertAllClose(res_actual_val, np.squeeze(res_desired),
atol=1e-5, rtol=1e-5)
def testBilinearFormBatch(self):
# Test bilinear form for batch of tensors.
shape_list = (((2, 2), (3, 4)),
((2, 3, 4), (2, 2, 2)))
rank_list = (1, 2)
for tensor_shape in shape_list:
for rank in rank_list:
A = initializers.random_matrix(tensor_shape, tt_rank=rank,
dtype=self.dtype)
b = initializers.random_matrix_batch((tensor_shape[0], None),
tt_rank=rank, batch_size=5,
dtype=self.dtype)
c = initializers.random_matrix_batch((tensor_shape[1], None),
tt_rank=rank, batch_size=5,
dtype=self.dtype)
res_actual = ops.bilinear_form(A, b, c)
vars = [res_actual, ops.full(A), ops.full(b), ops.full(c)]
res_actual_val, A_val, b_val, c_val = self.evaluate(vars)
res_desired = np.diag(b_val[:, :, 0].dot(A_val).dot(c_val[:, :, 0].T))
self.assertAllClose(res_actual_val, np.squeeze(res_desired),
atol=1e-5, rtol=1e-5)
def testBilinearFormTwoMat(self):
# Test bilinear_form_two_mat.
shape_list = (((2, 2), (3, 4)),
((2, 3, 4), (2, 2, 2)))
rank_list = (1, 2)
for tensor_shape in shape_list:
for rank in rank_list:
A = initializers.random_matrix(tensor_shape, tt_rank=rank,
dtype=self.dtype)
B = initializers.random_matrix(tensor_shape, tt_rank=rank,
dtype=self.dtype)
B = ops.transpose(B)
x = initializers.random_matrix((tensor_shape[0], None), tt_rank=rank,
dtype=self.dtype)
y = initializers.random_matrix((tensor_shape[0], None), tt_rank=rank,
dtype=self.dtype)
res_actual = ops.bilinear_form_two_mat(x, A, B, y)
vars = [res_actual, ops.full(x), ops.full(A), ops.full(B), ops.full(y)]
res_actual_val, x_val, A_val, B_val, y_val = self.evaluate(vars)
res_desired = x_val.T.dot(A_val).dot(B_val).dot(y_val)
self.assertAllClose(res_actual_val, np.squeeze(res_desired),
atol=1e-5, rtol=1e-5)
def testCastFloat(self):
# Test cast function for float tt-matrices and vectors.
tt_mat = initializers.random_matrix(((2, 3), (3, 2)), tt_rank=2)
tt_vec = initializers.random_matrix(((2, 3), None), tt_rank=2)
for tt in [tt_mat, tt_vec]:
casted = ops.cast(tt, self.dtype)
casted_val = self.evaluate(ops.full(casted))
self.assertEqual(self.dtype, casted.dtype)
self.assertTrue(self.dtype, casted_val.dtype)
def testCastIntFloat(self):
# Tests cast function from int to float for matrices.
np.random.seed(1)
K_1 = np.random.randint(0, high=100, size=(1, 2, 2, 2))
K_2 = np.random.randint(0, high=100, size=(2, 3, 3, 2))
K_3 = np.random.randint(0, high=100, size=(2, 2, 2, 1))
tt_int = TensorTrain([K_1, K_2, K_3], tt_ranks=[1, 2, 2, 1])
casted = ops.cast(tt_int, self.dtype)
casted_val = self.evaluate(ops.full(casted))
self.assertEqual(self.dtype, casted.dtype)
self.assertTrue(self.dtype, casted_val.dtype)
class _TTTensorBatchTest():
def testFullTensor2d(self):
np.random.seed(1)
for rank in [1, 2]:
a = np.random.rand(3, 10, rank).astype(self.dtype.as_numpy_dtype)
b = np.random.rand(3, rank, 9).astype(self.dtype.as_numpy_dtype)
tt_cores = (a.reshape(3, 1, 10, rank), b.reshape(3, rank, 9, 1))
desired = np.einsum('oib,obj->oij', a, b)
tf_tens = TensorTrainBatch(tt_cores)
actual = self.evaluate(ops.full(tf_tens))
self.assertAllClose(desired, actual)
def testFullTensor3d(self):
np.random.seed(1)
for rank_1 in [1, 2]:
a = np.random.rand(3, 10, rank_1).astype(self.dtype.as_numpy_dtype)
b = np.random.rand(3, rank_1, 9, 3).astype(self.dtype.as_numpy_dtype)
c = np.random.rand(3, 3, 8).astype(self.dtype.as_numpy_dtype)
tt_cores = (a.reshape(3, 1, 10, rank_1), b, c.reshape((3, 3, 8, 1)))
# Basically do full by hand.
desired = np.einsum('oia,oajb,obk->oijk', a, b, c)
tf_tens = TensorTrainBatch(tt_cores)
actual = self.evaluate(ops.full(tf_tens))
self.assertAllClose(desired, actual)
def testFlatInnerTTTensbyTTTensSameBatchSize(self):
# Inner product between two batch TT-tensors of the same batch_size.
shape_list = ((2, 2),
(2, 3, 4))
rank_list = (1, 2)
for shape in shape_list:
for rank in rank_list:
tt_1 = initializers.random_tensor_batch(shape, tt_rank=rank,
batch_size=2,
dtype=self.dtype)
tt_2 = initializers.random_tensor_batch(shape, tt_rank=rank,
batch_size=2,
dtype=self.dtype)
res_actual = ops.flat_inner(tt_1, tt_2)
tt_1_full = tf.reshape(ops.full(tt_1), (2, 1, -1))
tt_2_full = tf.reshape(ops.full(tt_2), (2, -1, 1))
res_desired = tf.matmul(tt_1_full, tt_2_full)
res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired])
self.assertAllClose(res_actual_val, np.squeeze(res_desired_val))
def testFlatInnerTTTensbyTTTensBroadcasting(self):
# Inner product between two batch TT-tensors with broadcasting.
tt_1 = initializers.random_tensor_batch((2, 3, 4), batch_size=1,
dtype=self.dtype)
tt_2 = initializers.random_tensor_batch((2, 3, 4), batch_size=3,
dtype=self.dtype)
res_actual_1 = ops.flat_inner(tt_1, tt_2)
res_actual_2 = ops.flat_inner(tt_2, tt_1)
res_desired = tf.einsum('ijk,oijk->o', ops.full(tt_1[0]), ops.full(tt_2))
res = self.evaluate([res_actual_1, res_actual_2, res_desired])
res_actual_1_val, res_actual_2_val, res_desired_val = res
self.assertAllClose(res_actual_1_val, res_desired_val)
self.assertAllClose(res_actual_2_val, res_desired_val)
tt_1 = initializers.random_tensor_batch((2, 3, 4), batch_size=2,
dtype=self.dtype)
with self.assertRaises(ValueError):
# The batch_sizes are different.
ops.flat_inner(tt_1, tt_2)
def testAddSameBatchSize(self):
# Sum two TT-tensors with the same batch size.
tt_a = initializers.random_tensor_batch((2, 1, 4), tt_rank=2, batch_size=3,
dtype=self.dtype)
tt_b = initializers.random_tensor_batch((2, 1, 4), tt_rank=[1, 2, 4, 1],
batch_size=3, dtype=self.dtype)
res_actual = ops.full(ops.add(tt_a, tt_b))
res_actual2 = ops.full(tt_a + tt_b)
res_desired = ops.full(tt_a) + ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testAddBroadcasting(self):
# Sum two TT-tensors with broadcasting.
tt_a = initializers.random_tensor_batch((2, 1, 4), tt_rank=2, batch_size=1,
dtype=self.dtype)
tt_b = initializers.random_tensor_batch((2, 1, 4), tt_rank=[1, 2, 4, 1],
batch_size=3, dtype=self.dtype)
res_actual = ops.full(ops.add(tt_a, tt_b))
res_actual2 = ops.full(tt_b + tt_a)
res_desired = ops.full(tt_a) + ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testMultiplyByNumber(self):
# Multiply batch of tensors by a number.
tt = initializers.random_tensor_batch((1, 2, 3), tt_rank=(1, 2, 3, 1),
batch_size=3, dtype=self.dtype)
res_actual = ops.full(ops.multiply(tt, 4))
res_actual2 = ops.full(4.0 * tt)
res_desired = 4.0 * ops.full(tt)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testFrobeniusNormDifferentiableBatch(self):
tt = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=5,
dtype=self.dtype)
norm_sq_diff = ops.frobenius_norm_squared(tt, differentiable=True)
variables = [norm_sq_diff, ops.full(tt)]
norm_sq_diff_val, tt_full = self.evaluate(variables)
desired_norm = np.linalg.norm(tt_full.reshape((5, -1)), axis=1)**2
self.assertAllClose(norm_sq_diff_val, desired_norm, atol=1e-5, rtol=1e-5)
def testFrobeniusNormTens(self):
# Frobenius norm of a batch of TT-tensors.
tt = initializers.tensor_batch_with_random_cores((2, 2, 3), batch_size=3,
tt_rank=2,
dtype=self.dtype)
norm_sq_actual = ops.frobenius_norm_squared(tt)
norm_actual = ops.frobenius_norm(tt, epsilon=0.0)
vars = [norm_sq_actual, norm_actual, ops.full(tt)]
norm_sq_actual_val, norm_actual_val, tt_val = self.evaluate(vars)
tt_val = tt_val.reshape((3, -1))
norm_sq_desired_val = np.sum(tt_val * tt_val, axis=1)
norm_desired_val = np.sqrt(norm_sq_desired_val)
self.assertAllClose(norm_sq_actual_val, norm_sq_desired_val)
self.assertAllClose(norm_actual_val, norm_desired_val, atol=1e-5,
rtol=1e-5)
def testMultiplyBatchByTensor(self):
tt_a = initializers.random_tensor((3, 3, 3), tt_rank=2, dtype=self.dtype)
tt_b = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=5,
dtype=self.dtype)
res_actual = ops.full(ops.multiply(tt_a, tt_b))
res_actual2 = ops.full(ops.multiply(tt_b, tt_a))
res_desired = ops.full(tt_a) * ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testMultiplyBatchByBatch(self):
tt_a = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=5,
dtype=self.dtype)
tt_b = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=5,
dtype=self.dtype)
res_actual = ops.full(ops.multiply(tt_a, tt_b))
res_actual2 = ops.full(ops.multiply(tt_b, tt_a))
res_desired = ops.full(tt_a) * ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual = ops.full(ops.multiply(tt_a, tt_b))
res_actual2 = ops.full(ops.multiply(tt_b, tt_a))
res_desired = ops.full(tt_a) * ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testMultiplyBroadcasting(self):
tt_a = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=1,
dtype=self.dtype)
tt_b = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=5,
dtype=self.dtype)
res_actual = ops.full(ops.multiply(tt_a, tt_b))
res_actual2 = ops.full(ops.multiply(tt_b, tt_a))
res_desired = ops.full(tt_a) * ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testGatherND(self):
idx = [[0, 0, 0], [0, 1, 2], [0, 1, 0]]
tt = initializers.random_tensor((3, 4, 5), tt_rank=2, dtype=self.dtype)
res_np = ops.gather_nd(tt, idx)
res_desired = tf.gather_nd(ops.full(tt), idx)
to_run = [res_np, res_desired]
res_np_v, des_v = self.evaluate(to_run)
self.assertAllClose(res_np_v, des_v)
def testGatherNDBatch(self):
idx = [[0, 0, 0, 0], [1, 0, 1, 2], [0, 0, 1, 0]]
tt = initializers.random_tensor_batch((3, 4, 5), tt_rank=2, batch_size=2,
dtype=self.dtype)
res_np = ops.gather_nd(tt, idx)
res_desired = tf.gather_nd(ops.full(tt), idx)
to_run = [res_np, res_desired]
res_np_v, des_v = self.evaluate(to_run)
self.assertAllClose(res_np_v, des_v)
def testCoreRenormBatch(self):
a = initializers.random_tensor_batch(3 * (10,), tt_rank=7, batch_size=5,
dtype=self.dtype)
b = ops.renormalize_tt_cores(a)
var_list = [ops.full(a), ops.full(b)]
af, bf = self.evaluate(var_list)
b_cores = self.evaluate(b.tt_cores)
b_cores_norms = []
for cr in b_cores:
b_cores_norms.append(np.linalg.norm(cr))
self.assertAllClose(af, bf, atol=1e-5, rtol=1e-5)
self.assertAllClose(b_cores_norms, b_cores_norms[0]
* np.ones((len(b_cores))))
class _TTMatrixTestBatch():
def testFullMatrix2d(self):
np.random.seed(1)
for rank in [1, 2]:
a = np.random.rand(3, 2, 3, rank).astype(self.dtype.as_numpy_dtype)
b = np.random.rand(3, rank, 4, 5).astype(self.dtype.as_numpy_dtype)
tt_cores = (a.reshape(3, 1, 2, 3, rank), b.reshape((3, rank, 4, 5, 1)))
# Basically do full by hand.
desired = np.einsum('oijb,obkl->oijkl', a, b)
desired = desired.reshape((3, 2, 3, 4, 5))
desired = desired.transpose((0, 1, 3, 2, 4))
desired = desired.reshape((3, 2 * 4, 3 * 5))
tf_mat = TensorTrainBatch(tt_cores)
actual = self.evaluate(ops.full(tf_mat))
self.assertAllClose(desired, actual)
def testFullMatrix3d(self):
np.random.seed(1)
for rank in [1, 2]:
a = np.random.rand(3, 2, 3, rank).astype(self.dtype.as_numpy_dtype)
b = np.random.rand(3, rank, 4, 5, rank).astype(self.dtype.as_numpy_dtype)
c = np.random.rand(3, rank, 2, 2).astype(self.dtype.as_numpy_dtype)
tt_cores = (a.reshape(3, 1, 2, 3, rank), b.reshape(3, rank, 4, 5, rank),
c.reshape(3, rank, 2, 2, 1))
# Basically do full by hand.
desired = np.einsum('oija,oaklb,obpq->oijklpq', a, b, c)
desired = desired.reshape((3, 2, 3, 4, 5, 2, 2))
desired = desired.transpose((0, 1, 3, 5, 2, 4, 6))
desired = desired.reshape((3, 2 * 4 * 2, 3 * 5 * 2))
tf_mat = TensorTrainBatch(tt_cores)
actual = self.evaluate(ops.full(tf_mat))
self.assertAllClose(desired, actual)
def testTTMatTimesTTMatSameBatchSize(self):
# Multiply a batch of TT-matrices by another batch of TT-matrices with the
# same batch sizes.
left_shape = (2, 3)
sum_shape = (4, 3)
right_shape = (4, 4)
tt_mat_1 = initializers.random_matrix_batch((left_shape, sum_shape),
tt_rank=3, batch_size=3,
dtype=self.dtype)
tt_mat_2 = initializers.random_matrix_batch((sum_shape, right_shape),
batch_size=3,
dtype=self.dtype)
res_actual = ops.matmul(tt_mat_1, tt_mat_2)
res_actual = ops.full(res_actual)
res_desired = tf.matmul(ops.full(tt_mat_1), ops.full(tt_mat_2))
res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired])
# TODO: why so bad accuracy?
self.assertAllClose(res_actual_val, res_desired_val, atol=1e-5, rtol=1e-5)
def testTTMatTimesTTMatBroadcasting(self):
# Multiply a batch of TT-matrices by another batch of TT-matrices with
# broadcasting.
left_shape = (2, 3)
sum_shape = (4, 3)
right_shape = (4, 4)
tt_mat_1 = initializers.random_matrix_batch((left_shape, sum_shape),
tt_rank=3, batch_size=3,
dtype=self.dtype)
tt_mat_2 = initializers.random_matrix_batch((sum_shape, right_shape),
dtype=self.dtype)
# TT-batch by one element TT-batch
res_actual = ops.matmul(tt_mat_1, tt_mat_2)
res_actual = ops.full(res_actual)
# TT by TT-batch.
res_actual2 = ops.matmul(ops.transpose(tt_mat_2[0]), ops.transpose(tt_mat_1))
res_actual2 = ops.full(ops.transpose(res_actual2))
res_desired = tf.einsum('oij,jk->oik', ops.full(tt_mat_1),
ops.full(tt_mat_2[0]))
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val, atol=1e-5, rtol=1e-5)
self.assertAllClose(res_actual2_val, res_desired_val, atol=1e-5,
rtol=1e-5)
def testTranspose(self):
# Transpose a batch of TT-matrices.
tt = initializers.random_matrix_batch(((2, 3, 4), (2, 2, 2)),
batch_size=2, dtype=self.dtype)
res_actual = ops.full(ops.transpose(tt))
res_actual_val, tt_val = self.evaluate([res_actual, ops.full(tt)])
self.assertAllClose(tt_val.transpose((0, 2, 1)), res_actual_val)
def testAddSameBatchSize(self):
# Sum two TT-matrices with the same batch size.
tt_a = initializers.random_matrix_batch(((2, 1, 4), None), tt_rank=2,
batch_size=3, dtype=self.dtype)
tt_b = initializers.random_matrix_batch(((2, 1, 4), None),
tt_rank=[1, 2, 4, 1], batch_size=3,
dtype=self.dtype)
res_actual = ops.full(ops.add(tt_a, tt_b))
res_actual2 = ops.full(tt_a + tt_b)
res_desired = ops.full(tt_a) + ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testAddBroadcasting(self):
# Sum two TT-matrices with broadcasting.
tt_a = initializers.random_matrix_batch(((2, 1, 4), (2, 2, 2)), tt_rank=2,
batch_size=3, dtype=self.dtype)
tt_b = initializers.random_matrix_batch(((2, 1, 4), (2, 2, 2)),
tt_rank=[1, 2, 4, 1], batch_size=1,
dtype=self.dtype)
res_actual = ops.full(ops.add(tt_a, tt_b))
res_actual2 = ops.full(tt_b + tt_a)
res_desired = ops.full(tt_a) + ops.full(tt_b)
to_run = [res_actual, res_actual2, res_desired]
res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run)
self.assertAllClose(res_actual_val, res_desired_val)
self.assertAllClose(res_actual2_val, res_desired_val)
def testCastFloat(self):
# Test cast function for float tt-matrices and vectors.
tt_mat = initializers.random_matrix_batch(((2, 3), (3, 2)), tt_rank=2,
batch_size=3)
casted = ops.cast(tt_mat, self.dtype)
casted_val = self.evaluate(ops.full(casted))
self.assertEqual(self.dtype, casted.dtype)
self.assertTrue(self.dtype, casted_val.dtype)
def testCastIntFloat(self):
# Tests cast function from int to float for matrices.
np.random.seed(1)
K_1 = np.random.randint(0, high=100, size=(1, 2, 2, 2))
K_2 = np.random.randint(0, high=100, size=(2, 3, 3, 2))
K_3 = np.random.randint(0, high=100, size=(2, 2, 2, 1))
tt_int = TensorTrain([K_1, K_2, K_3], tt_ranks=[1, 2, 2, 1])
tt_int_batch = shapes.expand_batch_dim(tt_int)
casted = ops.cast(tt_int_batch, self.dtype)
casted_val = self.evaluate(ops.full(casted))
self.assertEqual(self.dtype, casted.dtype)
self.assertTrue(self.dtype, casted_val.dtype)
def _random_sparse(shape, non_zeros):
sparse_flat_indices = np.random.choice(np.prod(shape), non_zeros).astype(int)
sparse_indices = np.unravel_index(sparse_flat_indices, shape)
sparse_indices = np.vstack(sparse_indices).transpose()
values = np.random.randn(non_zeros).astype(self.dtype.as_numpy_dtype)
sparse = tf.SparseTensor(indices=sparse_indices, values=values,
dense_shape=shape)
return sparse
class TTTensorTestFloat32(tf.test.TestCase, _TTTensorTest):
dtype = tf.float32
class TTTensorTestFloat64(tf.test.TestCase, _TTTensorTest):
dtype = tf.float64
class TTMatrixTestFloat32(tf.test.TestCase, _TTMatrixTest):
dtype = tf.float32
class TTMatrixTestFloat64(tf.test.TestCase, _TTMatrixTest):
dtype = tf.float64
class TTTensorBatchTestFloat32(tf.test.TestCase, _TTTensorBatchTest):
dtype = tf.float32
class TTTensorBatchTestFloat64(tf.test.TestCase, _TTTensorBatchTest):
dtype = tf.float64
class TTMatrixTestBatchFloat32(tf.test.TestCase, _TTMatrixTestBatch):
dtype = tf.float32
class TTMatrixTestBatchFloat64(tf.test.TestCase, _TTMatrixTestBatch):
dtype = tf.float64
if __name__ == "__main__":
tf.test.main()
| en | 0.899411 | # Basically do full by hand. # Inner product between two TT-tensors. # Inner product between a TT-tensor and a sparse tensor. # Sum two TT-tensors. # Multiply two TT-tensors. # Multiply a tensor by a number. # Frobenius norm of a TT-tensor. # Test cast function for float tt-tensors. # Tests cast function from int to float for tensors. # Basically do full by hand. # Basically do full by hand. # Multiply a TT-matrix by another TT-matrix. # TODO: why so bad accuracy? # Multiply a TT-matrix by a dense vector. # Multiply a TT-matrix by a dense vector. # Inner product between two TT-Matrices. # Inner product between a TT-matrix and a sparse matrix. # Frobenius norm of a TT-matrix. # Transpose a TT-matrix. # Test bilinear form. # Test bilinear form for batch of tensors. # Test bilinear_form_two_mat. # Test cast function for float tt-matrices and vectors. # Tests cast function from int to float for matrices. # Basically do full by hand. # Inner product between two batch TT-tensors of the same batch_size. # Inner product between two batch TT-tensors with broadcasting. # The batch_sizes are different. # Sum two TT-tensors with the same batch size. # Sum two TT-tensors with broadcasting. # Multiply batch of tensors by a number. # Frobenius norm of a batch of TT-tensors. # Basically do full by hand. # Basically do full by hand. # Multiply a batch of TT-matrices by another batch of TT-matrices with the # same batch sizes. # TODO: why so bad accuracy? # Multiply a batch of TT-matrices by another batch of TT-matrices with # broadcasting. # TT-batch by one element TT-batch # TT by TT-batch. # Transpose a batch of TT-matrices. # Sum two TT-matrices with the same batch size. # Sum two TT-matrices with broadcasting. # Test cast function for float tt-matrices and vectors. # Tests cast function from int to float for matrices. | 2.277857 | 2 |
tetravolume.py | 4dsolutions/Python5 | 11 | 6631245 | <filename>tetravolume.py
"""
Euler volume, modified by <NAME>
http://www.grunch.net/synergetics/quadvols.html
<NAME> (c) MIT License
The tetravolume.py methods make_tet and make_tri
assume that volume and area use R-edge cubes and
triangles for XYZ units respectively, and D-edge
tetrahedrons and triangles for IVM units of volume
and area (D = 2R).
The tetrahedron of edges D has sqrt(8/9) the
volume of a cube of edges R, yet each is unit in
its respective matrix.
The triangle of edges D has an XYZ area of sqrt(3)
i.e. an equilateral triangle of edges 2 in R-square
units. The IVM area of the same triangle is simply 1.
The cube of edges sqrt(2) in R units, has volume
sqrt(2) to the 3rd power. One third of that volume
is our unit tetrahedron of edges D (cube face diagonals).
See:
http://mathforum.org/kb/thread.jspa?threadID=2836546
for explanation of quadrays, used for some unit tests
"""
from math import sqrt as rt2
from qrays import Qvector, Vector
import sys
R =0.5
D =1.0
S3 = pow(9/8, 0.5)
root2 = rt2(2)
root3 = rt2(3)
root5 = rt2(5)
root6 = rt2(6)
PHI = (1 + root5)/2.0
class Tetrahedron:
"""
Takes six edges of tetrahedron with faces
(a,b,d)(b,c,e)(c,a,f)(d,e,f) -- returns volume
if ivm and xyz
"""
def __init__(self, a, b, c, d, e, f):
# a,b,c,d,e,f = [Decimal(i) for i in (a,b,c,d,e,f)]
self.a, self.a2 = a, a**2
self.b, self.b2 = b, b**2
self.c, self.c2 = c, c**2
self.d, self.d2 = d, d**2
self.e, self.e2 = e, e**2
self.f, self.f2 = f, f**2
def ivm_volume(self):
ivmvol = ((self._addopen()
- self._addclosed()
- self._addopposite())/2) ** 0.5
return ivmvol
def xyz_volume(self):
xyzvol = 1/S3 * self.ivm_volume()
return xyzvol
def _addopen(self):
a2,b2,c2,d2,e2,f2 = self.a2, self.b2, self.c2, self.d2, self.e2, self.f2
sumval = f2*a2*b2
sumval += d2 * a2 * c2
sumval += a2 * b2 * e2
sumval += c2 * b2 * d2
sumval += e2 * c2 * a2
sumval += f2 * c2 * b2
sumval += e2 * d2 * a2
sumval += b2 * d2 * f2
sumval += b2 * e2 * f2
sumval += d2 * e2 * c2
sumval += a2 * f2 * e2
sumval += d2 * f2 * c2
return sumval
def _addclosed(self):
a2,b2,c2,d2,e2,f2 = self.a2, self.b2, self.c2, self.d2, self.e2, self.f2
sumval = a2 * b2 * d2
sumval += d2 * e2 * f2
sumval += b2 * c2 * e2
sumval += a2 * c2 * f2
return sumval
def _addopposite(self):
a2,b2,c2,d2,e2,f2 = self.a2, self.b2, self.c2, self.d2, self.e2, self.f2
sumval = a2 * e2 * (a2 + e2)
sumval += b2 * f2 * (b2 + f2)
sumval += c2 * d2 * (c2 + d2)
return sumval
def make_tet(v0,v1,v2):
"""
three edges from any corner, remaining three edges computed
"""
tet = Tetrahedron(v0.length(), v1.length(), v2.length(),
(v0-v1).length(), (v1-v2).length(), (v2-v0).length())
return tet.ivm_volume(), tet.xyz_volume()
class Triangle:
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
def ivm_area(self):
ivmarea = self.xyz_area() * 1/rt2(3)
return ivmarea
def xyz_area(self):
"""
Heron's Formula, without the 1/4
"""
a,b,c = self.a, self.b, self.c
xyzarea = rt2((a+b+c) * (-a+b+c) * (a-b+c) * (a+b-c))
return xyzarea
def make_tri(v0,v1):
"""
three edges from any corner, remaining three edges computed
"""
tri = Triangle(v0.length(), v1.length(), (v1-v0).length())
return tri.ivm_area(), tri.xyz_area()
R = 0.5
D = 1.0
import unittest
class Test_Tetrahedron(unittest.TestCase):
def test_unit_volume(self):
tet = Tetrahedron(D, D, D, D, D, D)
self.assertEqual(tet.ivm_volume(), 1, "Volume not 1")
def test_e_module(self):
e0 = D
e1 = root3 * PHI**-1
e2 = rt2((5 - root5)/2)
e3 = (3 - root5)/2
e4 = rt2(5 - 2*root5)
e5 = 1/PHI
tet = Tetrahedron(e0, e1, e2, e3, e4, e5)
self.assertTrue(1/23 > tet.ivm_volume()/8 > 1/24, "Wrong E-mod")
def test_unit_volume2(self):
tet = Tetrahedron(R, R, R, R, R, R)
self.assertAlmostEqual(tet.xyz_volume(), 0.117851130)
def test_unit_volume3(self):
tet = Tetrahedron(R, R, R, R, R, R)
self.assertAlmostEqual(tet.ivm_volume(), 0.125)
def test_phi_edge_tetra(self):
tet = Tetrahedron(D, D, D, D, D, PHI)
self.assertAlmostEqual(float(tet.ivm_volume()), 0.70710678)
def test_right_tetra(self):
e = pow((root3/2)**2 + (root3/2)**2, 0.5) # right tetrahedron
tet = Tetrahedron(D, D, D, D, D, e)
self.assertAlmostEqual(tet.xyz_volume(), 1)
def test_quadrant(self):
qA = Qvector((1,0,0,0))
qB = Qvector((0,1,0,0))
qC = Qvector((0,0,1,0))
tet = make_tet(qA, qB, qC)
self.assertAlmostEqual(tet[0], 0.25)
def test_octant(self):
x = Vector((R, 0, 0))
y = Vector((0, R, 0))
z = Vector((0, 0, R))
tet = make_tet(x,y,z)
self.assertAlmostEqual(tet[1], 1/6, 5) # good to 5 places
def test_quarter_octahedron(self):
a = Vector((D,0,0))
b = Vector((0,D,0))
c = Vector((R,R,root2/2))
tet = make_tet(a, b, c)
self.assertAlmostEqual(tet[0], 1, 5) # good to 5 places
def test_xyz_cube(self):
a = Vector((R, 0.0, 0.0))
b = Vector((0.0, R, 0.0))
c = Vector((0.0, 0.0, R))
R_octa = make_tet(a,b,c)
self.assertAlmostEqual(6 * R_octa[1], 1, 4) # good to 4 places
def test_s3(self):
D_tet = Tetrahedron(D, D, D, D, D, D)
a = Vector((R, 0.0, 0.0))
b = Vector((0.0, R, 0.0))
c = Vector((0.0, 0.0, R))
R_cube = 6 * make_tet(a,b,c)[1]
self.assertAlmostEqual(D_tet.xyz_volume() * S3, R_cube, 4)
def test_martian(self):
p = Qvector((2,1,0,1))
q = Qvector((2,1,1,0))
r = Qvector((2,0,1,1))
result = make_tet(5*q, 2*p, 2*r)
self.assertAlmostEqual(result[0], 20, 7)
def test_area_martian1(self):
p = Qvector((2,1,0,1))
q = Qvector((2,1,1,0))
result = p.area(q)
self.assertAlmostEqual(result, 1)
def test_area_martian2(self):
p = 3 * Qvector((2,1,0,1))
q = 4 * Qvector((2,1,1,0))
result = p.area(q)
self.assertAlmostEqual(result, 12)
def test_area_martian3(self):
qx = Vector((D,0,0)).quadray()
qy = Vector((R,rt2(3)/2,0)).quadray()
result = qx.area(qy)
self.assertAlmostEqual(result, 1, 7)
def test_area_earthling1(self):
vx = Vector((1,0,0))
vy = Vector((0,1,0))
result = vx.area(vy)
self.assertAlmostEqual(result, 1)
def test_area_earthling2(self):
vx = Vector((2,0,0))
vy = Vector((1,rt2(3),0))
result = vx.area(vy)
self.assertAlmostEqual(result, 2*rt2(3))
def test_phi_tet(self):
"edges from common vertex: phi, 1/phi, 1"
p = Vector((1, 0, 0))
q = Vector((1, 0, 0)).rotz(60) * PHI
r = Vector((0.5, root3/6, root6/3)) * 1/PHI
result = make_tet(p, q, r)
self.assertAlmostEqual(result[0], 1, 7)
def test_phi_tet_2(self):
p = Qvector((2,1,0,1))
q = Qvector((2,1,1,0))
r = Qvector((2,0,1,1))
result = make_tet(PHI*q, (1/PHI)*p, r)
self.assertAlmostEqual(result[0], 1, 7)
def test_phi_tet_3(self):
T = Tetrahedron(PHI, 1/PHI, 1.0,
root2, root2/PHI, root2)
result = T.ivm_volume()
self.assertAlmostEqual(result, 1, 7)
def test_koski(self):
a = 1
b = PHI ** -1
c = PHI ** -2
d = (root2) * PHI ** -1
e = (root2) * PHI ** -2
f = (root2) * PHI ** -1
T = Tetrahedron(a,b,c,d,e,f)
result = T.ivm_volume()
self.assertAlmostEqual(result, PHI ** -3, 7)
class Test_Triangle(unittest.TestCase):
def test_unit_area1(self):
tri = Triangle(D, D, D)
self.assertEqual(tri.ivm_area(), 1)
def test_unit_area2(self):
tri = Triangle(2, 2, 2)
self.assertEqual(tri.ivm_area(), 4)
def test_xyz_area3(self):
tri = Triangle(D, D, D)
self.assertEqual(tri.xyz_area(), rt2(3))
def test_xyz_area4(self):
v1 = Vector((D, 0, 0))
v2 = Vector((0, D, 0))
xyz_area = make_tri(v1, v2)[1]
self.assertAlmostEqual(xyz_area, 2)
def test_xyz_area5(self):
tri = Triangle(R, R, R)
self.assertAlmostEqual(tri.xyz_area(), (root3)/4)
def command_line():
args = sys.argv[1:]
try:
args = [float(x) for x in args] # floats
t = Tetrahedron(*args)
except TypeError:
t = Tetrahedron(1,1,1,1,1,1)
print("defaults used")
print(t.ivm_volume())
print(t.xyz_volume())
if __name__ == "__main__":
if len(sys.argv)==7:
command_line()
else:
unittest.main()
| <filename>tetravolume.py
"""
Euler volume, modified by <NAME>
http://www.grunch.net/synergetics/quadvols.html
<NAME> (c) MIT License
The tetravolume.py methods make_tet and make_tri
assume that volume and area use R-edge cubes and
triangles for XYZ units respectively, and D-edge
tetrahedrons and triangles for IVM units of volume
and area (D = 2R).
The tetrahedron of edges D has sqrt(8/9) the
volume of a cube of edges R, yet each is unit in
its respective matrix.
The triangle of edges D has an XYZ area of sqrt(3)
i.e. an equilateral triangle of edges 2 in R-square
units. The IVM area of the same triangle is simply 1.
The cube of edges sqrt(2) in R units, has volume
sqrt(2) to the 3rd power. One third of that volume
is our unit tetrahedron of edges D (cube face diagonals).
See:
http://mathforum.org/kb/thread.jspa?threadID=2836546
for explanation of quadrays, used for some unit tests
"""
from math import sqrt as rt2
from qrays import Qvector, Vector
import sys
R =0.5
D =1.0
S3 = pow(9/8, 0.5)
root2 = rt2(2)
root3 = rt2(3)
root5 = rt2(5)
root6 = rt2(6)
PHI = (1 + root5)/2.0
class Tetrahedron:
"""
Takes six edges of tetrahedron with faces
(a,b,d)(b,c,e)(c,a,f)(d,e,f) -- returns volume
if ivm and xyz
"""
def __init__(self, a, b, c, d, e, f):
# a,b,c,d,e,f = [Decimal(i) for i in (a,b,c,d,e,f)]
self.a, self.a2 = a, a**2
self.b, self.b2 = b, b**2
self.c, self.c2 = c, c**2
self.d, self.d2 = d, d**2
self.e, self.e2 = e, e**2
self.f, self.f2 = f, f**2
def ivm_volume(self):
ivmvol = ((self._addopen()
- self._addclosed()
- self._addopposite())/2) ** 0.5
return ivmvol
def xyz_volume(self):
xyzvol = 1/S3 * self.ivm_volume()
return xyzvol
def _addopen(self):
a2,b2,c2,d2,e2,f2 = self.a2, self.b2, self.c2, self.d2, self.e2, self.f2
sumval = f2*a2*b2
sumval += d2 * a2 * c2
sumval += a2 * b2 * e2
sumval += c2 * b2 * d2
sumval += e2 * c2 * a2
sumval += f2 * c2 * b2
sumval += e2 * d2 * a2
sumval += b2 * d2 * f2
sumval += b2 * e2 * f2
sumval += d2 * e2 * c2
sumval += a2 * f2 * e2
sumval += d2 * f2 * c2
return sumval
def _addclosed(self):
a2,b2,c2,d2,e2,f2 = self.a2, self.b2, self.c2, self.d2, self.e2, self.f2
sumval = a2 * b2 * d2
sumval += d2 * e2 * f2
sumval += b2 * c2 * e2
sumval += a2 * c2 * f2
return sumval
def _addopposite(self):
a2,b2,c2,d2,e2,f2 = self.a2, self.b2, self.c2, self.d2, self.e2, self.f2
sumval = a2 * e2 * (a2 + e2)
sumval += b2 * f2 * (b2 + f2)
sumval += c2 * d2 * (c2 + d2)
return sumval
def make_tet(v0,v1,v2):
"""
three edges from any corner, remaining three edges computed
"""
tet = Tetrahedron(v0.length(), v1.length(), v2.length(),
(v0-v1).length(), (v1-v2).length(), (v2-v0).length())
return tet.ivm_volume(), tet.xyz_volume()
class Triangle:
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
def ivm_area(self):
ivmarea = self.xyz_area() * 1/rt2(3)
return ivmarea
def xyz_area(self):
"""
Heron's Formula, without the 1/4
"""
a,b,c = self.a, self.b, self.c
xyzarea = rt2((a+b+c) * (-a+b+c) * (a-b+c) * (a+b-c))
return xyzarea
def make_tri(v0,v1):
"""
three edges from any corner, remaining three edges computed
"""
tri = Triangle(v0.length(), v1.length(), (v1-v0).length())
return tri.ivm_area(), tri.xyz_area()
R = 0.5
D = 1.0
import unittest
class Test_Tetrahedron(unittest.TestCase):
def test_unit_volume(self):
tet = Tetrahedron(D, D, D, D, D, D)
self.assertEqual(tet.ivm_volume(), 1, "Volume not 1")
def test_e_module(self):
e0 = D
e1 = root3 * PHI**-1
e2 = rt2((5 - root5)/2)
e3 = (3 - root5)/2
e4 = rt2(5 - 2*root5)
e5 = 1/PHI
tet = Tetrahedron(e0, e1, e2, e3, e4, e5)
self.assertTrue(1/23 > tet.ivm_volume()/8 > 1/24, "Wrong E-mod")
def test_unit_volume2(self):
tet = Tetrahedron(R, R, R, R, R, R)
self.assertAlmostEqual(tet.xyz_volume(), 0.117851130)
def test_unit_volume3(self):
tet = Tetrahedron(R, R, R, R, R, R)
self.assertAlmostEqual(tet.ivm_volume(), 0.125)
def test_phi_edge_tetra(self):
tet = Tetrahedron(D, D, D, D, D, PHI)
self.assertAlmostEqual(float(tet.ivm_volume()), 0.70710678)
def test_right_tetra(self):
e = pow((root3/2)**2 + (root3/2)**2, 0.5) # right tetrahedron
tet = Tetrahedron(D, D, D, D, D, e)
self.assertAlmostEqual(tet.xyz_volume(), 1)
def test_quadrant(self):
qA = Qvector((1,0,0,0))
qB = Qvector((0,1,0,0))
qC = Qvector((0,0,1,0))
tet = make_tet(qA, qB, qC)
self.assertAlmostEqual(tet[0], 0.25)
def test_octant(self):
x = Vector((R, 0, 0))
y = Vector((0, R, 0))
z = Vector((0, 0, R))
tet = make_tet(x,y,z)
self.assertAlmostEqual(tet[1], 1/6, 5) # good to 5 places
def test_quarter_octahedron(self):
a = Vector((D,0,0))
b = Vector((0,D,0))
c = Vector((R,R,root2/2))
tet = make_tet(a, b, c)
self.assertAlmostEqual(tet[0], 1, 5) # good to 5 places
def test_xyz_cube(self):
a = Vector((R, 0.0, 0.0))
b = Vector((0.0, R, 0.0))
c = Vector((0.0, 0.0, R))
R_octa = make_tet(a,b,c)
self.assertAlmostEqual(6 * R_octa[1], 1, 4) # good to 4 places
def test_s3(self):
D_tet = Tetrahedron(D, D, D, D, D, D)
a = Vector((R, 0.0, 0.0))
b = Vector((0.0, R, 0.0))
c = Vector((0.0, 0.0, R))
R_cube = 6 * make_tet(a,b,c)[1]
self.assertAlmostEqual(D_tet.xyz_volume() * S3, R_cube, 4)
def test_martian(self):
p = Qvector((2,1,0,1))
q = Qvector((2,1,1,0))
r = Qvector((2,0,1,1))
result = make_tet(5*q, 2*p, 2*r)
self.assertAlmostEqual(result[0], 20, 7)
def test_area_martian1(self):
p = Qvector((2,1,0,1))
q = Qvector((2,1,1,0))
result = p.area(q)
self.assertAlmostEqual(result, 1)
def test_area_martian2(self):
p = 3 * Qvector((2,1,0,1))
q = 4 * Qvector((2,1,1,0))
result = p.area(q)
self.assertAlmostEqual(result, 12)
def test_area_martian3(self):
qx = Vector((D,0,0)).quadray()
qy = Vector((R,rt2(3)/2,0)).quadray()
result = qx.area(qy)
self.assertAlmostEqual(result, 1, 7)
def test_area_earthling1(self):
vx = Vector((1,0,0))
vy = Vector((0,1,0))
result = vx.area(vy)
self.assertAlmostEqual(result, 1)
def test_area_earthling2(self):
vx = Vector((2,0,0))
vy = Vector((1,rt2(3),0))
result = vx.area(vy)
self.assertAlmostEqual(result, 2*rt2(3))
def test_phi_tet(self):
"edges from common vertex: phi, 1/phi, 1"
p = Vector((1, 0, 0))
q = Vector((1, 0, 0)).rotz(60) * PHI
r = Vector((0.5, root3/6, root6/3)) * 1/PHI
result = make_tet(p, q, r)
self.assertAlmostEqual(result[0], 1, 7)
def test_phi_tet_2(self):
p = Qvector((2,1,0,1))
q = Qvector((2,1,1,0))
r = Qvector((2,0,1,1))
result = make_tet(PHI*q, (1/PHI)*p, r)
self.assertAlmostEqual(result[0], 1, 7)
def test_phi_tet_3(self):
T = Tetrahedron(PHI, 1/PHI, 1.0,
root2, root2/PHI, root2)
result = T.ivm_volume()
self.assertAlmostEqual(result, 1, 7)
def test_koski(self):
a = 1
b = PHI ** -1
c = PHI ** -2
d = (root2) * PHI ** -1
e = (root2) * PHI ** -2
f = (root2) * PHI ** -1
T = Tetrahedron(a,b,c,d,e,f)
result = T.ivm_volume()
self.assertAlmostEqual(result, PHI ** -3, 7)
class Test_Triangle(unittest.TestCase):
def test_unit_area1(self):
tri = Triangle(D, D, D)
self.assertEqual(tri.ivm_area(), 1)
def test_unit_area2(self):
tri = Triangle(2, 2, 2)
self.assertEqual(tri.ivm_area(), 4)
def test_xyz_area3(self):
tri = Triangle(D, D, D)
self.assertEqual(tri.xyz_area(), rt2(3))
def test_xyz_area4(self):
v1 = Vector((D, 0, 0))
v2 = Vector((0, D, 0))
xyz_area = make_tri(v1, v2)[1]
self.assertAlmostEqual(xyz_area, 2)
def test_xyz_area5(self):
tri = Triangle(R, R, R)
self.assertAlmostEqual(tri.xyz_area(), (root3)/4)
def command_line():
args = sys.argv[1:]
try:
args = [float(x) for x in args] # floats
t = Tetrahedron(*args)
except TypeError:
t = Tetrahedron(1,1,1,1,1,1)
print("defaults used")
print(t.ivm_volume())
print(t.xyz_volume())
if __name__ == "__main__":
if len(sys.argv)==7:
command_line()
else:
unittest.main()
| en | 0.867617 | Euler volume, modified by <NAME> http://www.grunch.net/synergetics/quadvols.html <NAME> (c) MIT License The tetravolume.py methods make_tet and make_tri assume that volume and area use R-edge cubes and triangles for XYZ units respectively, and D-edge tetrahedrons and triangles for IVM units of volume and area (D = 2R). The tetrahedron of edges D has sqrt(8/9) the volume of a cube of edges R, yet each is unit in its respective matrix. The triangle of edges D has an XYZ area of sqrt(3) i.e. an equilateral triangle of edges 2 in R-square units. The IVM area of the same triangle is simply 1. The cube of edges sqrt(2) in R units, has volume sqrt(2) to the 3rd power. One third of that volume is our unit tetrahedron of edges D (cube face diagonals). See: http://mathforum.org/kb/thread.jspa?threadID=2836546 for explanation of quadrays, used for some unit tests Takes six edges of tetrahedron with faces (a,b,d)(b,c,e)(c,a,f)(d,e,f) -- returns volume if ivm and xyz # a,b,c,d,e,f = [Decimal(i) for i in (a,b,c,d,e,f)] three edges from any corner, remaining three edges computed Heron's Formula, without the 1/4 three edges from any corner, remaining three edges computed # right tetrahedron # good to 5 places # good to 5 places # good to 4 places # floats | 3.600841 | 4 |
Implementations/software/python/ROMULUS_M_AEAD.py | rweather/romulus | 0 | 6631246 |
# ROMULUS-M Python Implementation
# Copyright 2020:
# <NAME> <<EMAIL>>
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
from SKINNY import *
import math
# ####################################################################
# # ROMULUS-M
# ####################################################################
# # ROMULUS-M1+
SKINNY_VERSION = 6
T_LENGTH = 16
COUNTER_LENGTH = 7
MEMBER_MASK = 32
# # ROMULUS-M1
# SKINNY_VERSION = 5
# T_LENGTH = 16
# COUNTER_LENGTH = 7
# MEMBER_MASK = 32
# ROMULUS-M2
# SKINNY_VERSION = 5
# T_LENGTH = 12
# COUNTER_LENGTH = 6
# MEMBER_MASK = 96
# # ROMULUS-M3
# SKINNY_VERSION = 4
# T_LENGTH = 12
# COUNTER_LENGTH = 3
# MEMBER_MASK = 160
N_LENGTH = T_LENGTH
def increase_counter(counter):
if COUNTER_LENGTH == 6:
if counter[2] & 0x80 != 0:
mask = 0x1b
else:
mask = 0
for i in reversed(range(1, 2)):
counter[i] = ((counter[i] << 1) & 0xfe) ^ (counter[i - 1] >> 7)
counter[0] = ((counter[0] << 1) & 0xfe) ^ mask
if counter[0] == 1 and counter[1] == 0 and counter[2] == 0:
if counter[3] & 0x80 != 0:
mask = 0x1b
else:
mask = 0
for i in reversed(range(1, 3)):
counter[i + 3] = ((counter[i + 3] << 1) & 0xfe) ^ (counter[3 + i - 1] >> 7)
counter[3] = ((counter[3] << 1) & 0xfe) ^ mask
elif COUNTER_LENGTH in [3, 7]:
if counter[COUNTER_LENGTH - 1] & 0x80 != 0:
if COUNTER_LENGTH == 7:
mask = 0x95
elif COUNTER_LENGTH == 3:
mask = 0x1b
else:
mask = 0
for i in reversed(range(1, COUNTER_LENGTH)):
counter[i] = ((counter[i] << 1) & 0xfe) ^ (counter[i - 1] >> 7)
counter[0] = ((counter[0] << 1) & 0xfe) ^ mask
return counter
def parse_alternate(L_in,x,y):
L_out = []
cursor = 0
while len(L_in) - cursor >= x + y :
L_out.extend([L_in[cursor:cursor+x],L_in[cursor+x:cursor+x+y]])
cursor = cursor + x + y
if len(L_in) - cursor >= x:
L_out.extend([L_in[cursor:cursor+x]])
cursor = cursor + x
if len(L_in) - cursor > 0:
L_out.extend([L_in[cursor:]])
if L_in == []:
L_out = [[]]
L_out.insert(0,[])
return L_out
def parse(L_in,x):
L_out = []
cursor = 0
while len(L_in) - cursor >= x:
L_out.extend([L_in[cursor:cursor+x]])
cursor = cursor + x
if len(L_in) - cursor > 0:
L_out.extend([L_in[cursor:]])
if L_in == []:
L_out = [[]]
L_out.insert(0,[])
return L_out
def pad(x, pad_length):
if len(x) == 0:
return [0] * pad_length
if len(x) == pad_length:
return x[:]
y = x[:]
for _ in range(pad_length - len(x) - 1):
y.append(0)
y.append(len(x))
return y
def G(A):
return [(x >> 1) ^ ((x ^ x << 7) & 0x80) for x in A]
def rho(S, M):
G_S = G(S)
C = [M[i] ^ G_S[i] for i in range(16)]
S_prime = [S[i] ^ M[i] for i in range(16)]
return S_prime, C
def rho_inv(S, C):
G_S = G(S)
M = [C[i] ^ G_S[i] for i in range(16)]
S_prime = [S[i] ^ M[i] for i in range(16)]
return S_prime, M
def tk_encoding(counter, b, t, k):
if COUNTER_LENGTH == 7:
return counter + [b[0] ^ MEMBER_MASK] + [0] * 8 + t + k
elif COUNTER_LENGTH == 6:
return counter[0:3] + [b[0] ^ MEMBER_MASK] + t + k + counter[3:6] + [0] * 13
elif COUNTER_LENGTH == 3:
return counter + [b[0] ^ MEMBER_MASK] + t + k
# function that implements the AE encryption
def crypto_aead_encrypt(M, A, N, K):
S = [0] * 16
counter = [1] + [0] * (COUNTER_LENGTH - 1)
if COUNTER_LENGTH == 6: counter[3] = 1
A_parsed = parse_alternate(A,16,T_LENGTH)
a = len(A_parsed)-1
if a%2 == 0: u = T_LENGTH
else: u = 16
M_parsed = parse_alternate(M,16+T_LENGTH-u,u)
m = len(M_parsed)-1
if m%2 == 0: v = u
else: v = 16 + T_LENGTH - u
X = A_parsed[1:] + M_parsed[1:]
X.insert(0,[])
w = 16
if len(X[a]) < u: w = w ^ 2
if len(X[a+m]) < v: w = w ^ 1
if a%2 == 0: w = w ^ 8
if m%2 == 0: w = w ^ 4
X[a] = pad(X[a],u)
X[a+m] = pad(X[a+m],v)
x = 8
print(A_parsed)
print(M_parsed)
print(X)
for i in range(1,math.floor((a+m)/2)+1):
S, _ = rho(S, X[2*i-1])
counter = increase_counter(counter)
if i == math.floor(a/2)+1: x = x ^ 4
S = skinny_enc(S, tk_encoding(counter, [x], X[2*i], K[0:16]), SKINNY_VERSION)
counter = increase_counter(counter)
if a%2 == m%2: S, _ = rho(S, [0]*16)
else:
S, _ = rho(S, X[a+m])
counter = increase_counter(counter)
S = skinny_enc(S, tk_encoding(counter, [w], N[0: T_LENGTH], K[0:16]), SKINNY_VERSION)
_, T = rho(S, [0]*16)
if len(M) == 0: return T
S = T[:]
C = []
M_parsed = parse(M,16)
m = len(M_parsed)-1
z = len(M_parsed[m])
M_parsed[m] = pad(M_parsed[m],16)
counter = [1] + [0] * (COUNTER_LENGTH - 1)
if COUNTER_LENGTH == 6: counter[3] = 1
for i in range(1,m+1):
S = skinny_enc(S, tk_encoding(counter, [4], N[0: T_LENGTH], K[0:16]), SKINNY_VERSION)
S, x = rho(S, M_parsed[i])
counter = increase_counter(counter)
if i<m: C.extend(x)
else: C.extend(x[:z])
C.extend(T)
return C
# function that implements the AE decryption
def crypto_aead_decrypt(C, A, N, K):
M = []
T = C[-16:]
C[-16:] = []
if len(C) != 0:
S = T[:]
C_parsed = parse(C,16)
c = len(C_parsed)-1
z = len(C_parsed[c])
C_parsed[c] = pad(C_parsed[c],16)
counter = [1] + [0] * (COUNTER_LENGTH - 1)
if COUNTER_LENGTH == 6: counter[3] = 1
for i in range(1,c+1):
S = skinny_enc(S, tk_encoding(counter, [4], N[0: T_LENGTH], K[0:16]), SKINNY_VERSION)
S, x = rho_inv(S, C_parsed[i])
counter = increase_counter(counter)
if i<c: M.extend(x)
else: M.extend(x[:z])
else:
S = []
S = [0] * 16
counter = [1] + [0] * (COUNTER_LENGTH - 1)
if COUNTER_LENGTH == 6: counter[3] = 1
A_parsed = parse_alternate(A,16,T_LENGTH)
a = len(A_parsed)-1
if a%2 == 0: u = T_LENGTH
else: u = 16
M_parsed = parse_alternate(M,16+T_LENGTH-u,u)
m = len(M_parsed)-1
if m%2 == 0: v = u
else: v = 16 + T_LENGTH - u
X = A_parsed[1:] + M_parsed[1:]
X.insert(0,[])
w = 16
if len(X[a]) < u: w = w ^ 2
if len(X[a+m]) < v: w = w ^ 1
if a%2 == 0: w = w ^ 8
if m%2 == 0: w = w ^ 4
X[a] = pad(X[a],u)
X[a+m] = pad(X[a+m],v)
x = 8
for i in range(1,math.floor((a+m)/2)+1):
S, _ = rho(S, X[2*i-1])
counter = increase_counter(counter)
if i == math.floor(a/2)+1: x = x ^ 4
S = skinny_enc(S, tk_encoding(counter, [x], X[2*i], K[0:16]), SKINNY_VERSION)
counter = increase_counter(counter)
if a%2 == m%2: S, _ = rho(S, [0]*16)
else:
S, _ = rho(S, X[a+m])
counter = increase_counter(counter)
S = skinny_enc(S, tk_encoding(counter, [w], N[0: T_LENGTH], K[0:16]), SKINNY_VERSION)
_, T_computed = rho(S, [0]*16)
compare = 0
for i in range(16):
compare |= (T[i] ^ T_computed[i])
if compare != 0:
return -1, []
else:
return 0, M
|
# ROMULUS-M Python Implementation
# Copyright 2020:
# <NAME> <<EMAIL>>
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
from SKINNY import *
import math
# ####################################################################
# # ROMULUS-M
# ####################################################################
# # ROMULUS-M1+
SKINNY_VERSION = 6
T_LENGTH = 16
COUNTER_LENGTH = 7
MEMBER_MASK = 32
# # ROMULUS-M1
# SKINNY_VERSION = 5
# T_LENGTH = 16
# COUNTER_LENGTH = 7
# MEMBER_MASK = 32
# ROMULUS-M2
# SKINNY_VERSION = 5
# T_LENGTH = 12
# COUNTER_LENGTH = 6
# MEMBER_MASK = 96
# # ROMULUS-M3
# SKINNY_VERSION = 4
# T_LENGTH = 12
# COUNTER_LENGTH = 3
# MEMBER_MASK = 160
N_LENGTH = T_LENGTH
def increase_counter(counter):
if COUNTER_LENGTH == 6:
if counter[2] & 0x80 != 0:
mask = 0x1b
else:
mask = 0
for i in reversed(range(1, 2)):
counter[i] = ((counter[i] << 1) & 0xfe) ^ (counter[i - 1] >> 7)
counter[0] = ((counter[0] << 1) & 0xfe) ^ mask
if counter[0] == 1 and counter[1] == 0 and counter[2] == 0:
if counter[3] & 0x80 != 0:
mask = 0x1b
else:
mask = 0
for i in reversed(range(1, 3)):
counter[i + 3] = ((counter[i + 3] << 1) & 0xfe) ^ (counter[3 + i - 1] >> 7)
counter[3] = ((counter[3] << 1) & 0xfe) ^ mask
elif COUNTER_LENGTH in [3, 7]:
if counter[COUNTER_LENGTH - 1] & 0x80 != 0:
if COUNTER_LENGTH == 7:
mask = 0x95
elif COUNTER_LENGTH == 3:
mask = 0x1b
else:
mask = 0
for i in reversed(range(1, COUNTER_LENGTH)):
counter[i] = ((counter[i] << 1) & 0xfe) ^ (counter[i - 1] >> 7)
counter[0] = ((counter[0] << 1) & 0xfe) ^ mask
return counter
def parse_alternate(L_in,x,y):
L_out = []
cursor = 0
while len(L_in) - cursor >= x + y :
L_out.extend([L_in[cursor:cursor+x],L_in[cursor+x:cursor+x+y]])
cursor = cursor + x + y
if len(L_in) - cursor >= x:
L_out.extend([L_in[cursor:cursor+x]])
cursor = cursor + x
if len(L_in) - cursor > 0:
L_out.extend([L_in[cursor:]])
if L_in == []:
L_out = [[]]
L_out.insert(0,[])
return L_out
def parse(L_in,x):
L_out = []
cursor = 0
while len(L_in) - cursor >= x:
L_out.extend([L_in[cursor:cursor+x]])
cursor = cursor + x
if len(L_in) - cursor > 0:
L_out.extend([L_in[cursor:]])
if L_in == []:
L_out = [[]]
L_out.insert(0,[])
return L_out
def pad(x, pad_length):
if len(x) == 0:
return [0] * pad_length
if len(x) == pad_length:
return x[:]
y = x[:]
for _ in range(pad_length - len(x) - 1):
y.append(0)
y.append(len(x))
return y
def G(A):
return [(x >> 1) ^ ((x ^ x << 7) & 0x80) for x in A]
def rho(S, M):
G_S = G(S)
C = [M[i] ^ G_S[i] for i in range(16)]
S_prime = [S[i] ^ M[i] for i in range(16)]
return S_prime, C
def rho_inv(S, C):
G_S = G(S)
M = [C[i] ^ G_S[i] for i in range(16)]
S_prime = [S[i] ^ M[i] for i in range(16)]
return S_prime, M
def tk_encoding(counter, b, t, k):
if COUNTER_LENGTH == 7:
return counter + [b[0] ^ MEMBER_MASK] + [0] * 8 + t + k
elif COUNTER_LENGTH == 6:
return counter[0:3] + [b[0] ^ MEMBER_MASK] + t + k + counter[3:6] + [0] * 13
elif COUNTER_LENGTH == 3:
return counter + [b[0] ^ MEMBER_MASK] + t + k
# function that implements the AE encryption
def crypto_aead_encrypt(M, A, N, K):
S = [0] * 16
counter = [1] + [0] * (COUNTER_LENGTH - 1)
if COUNTER_LENGTH == 6: counter[3] = 1
A_parsed = parse_alternate(A,16,T_LENGTH)
a = len(A_parsed)-1
if a%2 == 0: u = T_LENGTH
else: u = 16
M_parsed = parse_alternate(M,16+T_LENGTH-u,u)
m = len(M_parsed)-1
if m%2 == 0: v = u
else: v = 16 + T_LENGTH - u
X = A_parsed[1:] + M_parsed[1:]
X.insert(0,[])
w = 16
if len(X[a]) < u: w = w ^ 2
if len(X[a+m]) < v: w = w ^ 1
if a%2 == 0: w = w ^ 8
if m%2 == 0: w = w ^ 4
X[a] = pad(X[a],u)
X[a+m] = pad(X[a+m],v)
x = 8
print(A_parsed)
print(M_parsed)
print(X)
for i in range(1,math.floor((a+m)/2)+1):
S, _ = rho(S, X[2*i-1])
counter = increase_counter(counter)
if i == math.floor(a/2)+1: x = x ^ 4
S = skinny_enc(S, tk_encoding(counter, [x], X[2*i], K[0:16]), SKINNY_VERSION)
counter = increase_counter(counter)
if a%2 == m%2: S, _ = rho(S, [0]*16)
else:
S, _ = rho(S, X[a+m])
counter = increase_counter(counter)
S = skinny_enc(S, tk_encoding(counter, [w], N[0: T_LENGTH], K[0:16]), SKINNY_VERSION)
_, T = rho(S, [0]*16)
if len(M) == 0: return T
S = T[:]
C = []
M_parsed = parse(M,16)
m = len(M_parsed)-1
z = len(M_parsed[m])
M_parsed[m] = pad(M_parsed[m],16)
counter = [1] + [0] * (COUNTER_LENGTH - 1)
if COUNTER_LENGTH == 6: counter[3] = 1
for i in range(1,m+1):
S = skinny_enc(S, tk_encoding(counter, [4], N[0: T_LENGTH], K[0:16]), SKINNY_VERSION)
S, x = rho(S, M_parsed[i])
counter = increase_counter(counter)
if i<m: C.extend(x)
else: C.extend(x[:z])
C.extend(T)
return C
# function that implements the AE decryption
def crypto_aead_decrypt(C, A, N, K):
M = []
T = C[-16:]
C[-16:] = []
if len(C) != 0:
S = T[:]
C_parsed = parse(C,16)
c = len(C_parsed)-1
z = len(C_parsed[c])
C_parsed[c] = pad(C_parsed[c],16)
counter = [1] + [0] * (COUNTER_LENGTH - 1)
if COUNTER_LENGTH == 6: counter[3] = 1
for i in range(1,c+1):
S = skinny_enc(S, tk_encoding(counter, [4], N[0: T_LENGTH], K[0:16]), SKINNY_VERSION)
S, x = rho_inv(S, C_parsed[i])
counter = increase_counter(counter)
if i<c: M.extend(x)
else: M.extend(x[:z])
else:
S = []
S = [0] * 16
counter = [1] + [0] * (COUNTER_LENGTH - 1)
if COUNTER_LENGTH == 6: counter[3] = 1
A_parsed = parse_alternate(A,16,T_LENGTH)
a = len(A_parsed)-1
if a%2 == 0: u = T_LENGTH
else: u = 16
M_parsed = parse_alternate(M,16+T_LENGTH-u,u)
m = len(M_parsed)-1
if m%2 == 0: v = u
else: v = 16 + T_LENGTH - u
X = A_parsed[1:] + M_parsed[1:]
X.insert(0,[])
w = 16
if len(X[a]) < u: w = w ^ 2
if len(X[a+m]) < v: w = w ^ 1
if a%2 == 0: w = w ^ 8
if m%2 == 0: w = w ^ 4
X[a] = pad(X[a],u)
X[a+m] = pad(X[a+m],v)
x = 8
for i in range(1,math.floor((a+m)/2)+1):
S, _ = rho(S, X[2*i-1])
counter = increase_counter(counter)
if i == math.floor(a/2)+1: x = x ^ 4
S = skinny_enc(S, tk_encoding(counter, [x], X[2*i], K[0:16]), SKINNY_VERSION)
counter = increase_counter(counter)
if a%2 == m%2: S, _ = rho(S, [0]*16)
else:
S, _ = rho(S, X[a+m])
counter = increase_counter(counter)
S = skinny_enc(S, tk_encoding(counter, [w], N[0: T_LENGTH], K[0:16]), SKINNY_VERSION)
_, T_computed = rho(S, [0]*16)
compare = 0
for i in range(16):
compare |= (T[i] ^ T_computed[i])
if compare != 0:
return -1, []
else:
return 0, M
| en | 0.707593 | # ROMULUS-M Python Implementation # Copyright 2020: # <NAME> <<EMAIL>> # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. # #################################################################### # # ROMULUS-M # #################################################################### # # ROMULUS-M1+ # # ROMULUS-M1 # SKINNY_VERSION = 5 # T_LENGTH = 16 # COUNTER_LENGTH = 7 # MEMBER_MASK = 32 # ROMULUS-M2 # SKINNY_VERSION = 5 # T_LENGTH = 12 # COUNTER_LENGTH = 6 # MEMBER_MASK = 96 # # ROMULUS-M3 # SKINNY_VERSION = 4 # T_LENGTH = 12 # COUNTER_LENGTH = 3 # MEMBER_MASK = 160 # function that implements the AE encryption # function that implements the AE decryption | 2.562165 | 3 |
generated/intermediate/ansible-module-sdk/azure_rm_batchapplicationpackage.py | audevbot/autorest.devops.debug | 0 | 6631247 | <gh_stars>0
#!/usr/bin/python
#
# Copyright (c) 2019 <NAME>, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_batchapplicationpackage
version_added: '2.9'
short_description: Manage Azure ApplicationPackage instance.
description:
- 'Create, update and delete instance of Azure ApplicationPackage.'
options:
resource_group:
description:
- The name of the resource group that contains the Batch account.
required: true
type: str
account_name:
description:
- The name of the Batch account.
required: true
type: str
application_name:
description:
- The name of the application. This must be unique within the account.
required: true
type: str
name:
description:
- The version of the application.
required: true
type: str
state:
description:
- Assert the state of the ApplicationPackage.
- >-
Use C(present) to create or update an ApplicationPackage and C(absent)
to delete it.
default: present
choices:
- absent
- present
format:
description:
- 'The format of the application package, if the package is active.'
type: str
storage_url:
description:
- The URL for the application package in Azure Storage.
type: str
storage_url_expiry:
description:
- The UTC time at which the Azure Storage URL will expire.
type: datetime
last_activation_time:
description:
- >-
The time at which the package was last activated, if the package is
active.
type: datetime
id:
description:
- The ID of the resource.
type: str
etag:
description:
- 'The ETag of the resource, used for concurrency statements.'
type: str
extends_documentation_fragment:
- azure
author:
- <NAME> (@zikalino)
'''
EXAMPLES = '''
- name: ApplicationPackageCreate
azure_rm_batchapplicationpackage:
resource_group: myResourceGroup
account_name: myBatchAccount
application_name: myApplication
name: myVersion
- name: ApplicationPackageDelete
azure_rm_batchapplicationpackage:
resource_group: myResourceGroup
account_name: myBatchAccount
application_name: myApplication
name: myVersion
state: absent
'''
RETURN = '''
id:
description:
- The ID of the resource.
returned: always
type: str
sample: null
name:
description:
- The name of the resource.
returned: always
type: str
sample: null
type:
description:
- The type of the resource.
returned: always
type: str
sample: null
etag:
description:
- 'The ETag of the resource, used for concurrency statements.'
returned: always
type: str
sample: null
properties:
description:
- The properties associated with the Application Package.
returned: always
type: dict
sample: null
contains:
state:
description:
- The current state of the application package.
returned: always
type: str
sample: null
format:
description:
- 'The format of the application package, if the package is active.'
returned: always
type: str
sample: null
storage_url:
description:
- The URL for the application package in Azure Storage.
returned: always
type: str
sample: null
storage_url_expiry:
description:
- The UTC time at which the Azure Storage URL will expire.
returned: always
type: datetime
sample: null
last_activation_time:
description:
- >-
The time at which the package was last activated, if the package is
active.
returned: always
type: datetime
sample: null
'''
import time
import json
import re
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.batch import BatchManagementClient
from msrestazure.azure_operation import AzureOperationPoller
from msrest.polling import LROPoller
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMApplicationPackage(AzureRMModuleBaseExt):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
updatable=False,
disposition='resource_group_name',
required=true
),
account_name=dict(
type='str',
updatable=False,
required=true
),
application_name=dict(
type='str',
updatable=False,
required=true
),
name=dict(
type='str',
updatable=False,
disposition='version_name',
required=true
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.account_name = None
self.application_name = None
self.name = None
self.id = None
self.etag = None
self.body = {}
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
super(AzureRMApplicationPackage, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.body[key] = kwargs[key]
self.inflate_parameters(self.module_arg_spec, self.body, 0)
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(BatchManagement,
base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
if self.location is None:
self.location = resource_group.location
old_response = self.get_resource()
if not old_response:
if self.state == 'present':
self.to_do = Actions.Create
else:
if self.state == 'absent':
self.to_do = Actions.Delete
else:
modifiers = {}
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.results['changed'] = True
if self.check_mode:
return self.results
response = self.create_update_resource()
elif self.to_do == Actions.Delete:
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_resource()
else:
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
self.results["name"] = response["name"]
self.results["type"] = response["type"]
self.results["etag"] = response["etag"]
self.results["properties"] = response["properties"]
return self.results
def create_update_resource(self):
try:
if self.to_do == Actions.Create:
response = self.mgmt_client.application_package.create(resource_group_name=self.resource_group,
account_name=self.account_name,
application_name=self.application_name,
version_name=self.name)
else:
response = self.mgmt_client.application_package.update()
if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the ApplicationPackage instance.')
self.fail('Error creating the ApplicationPackage instance: {0}'.format(str(exc)))
return response.as_dict()
def delete_resource(self):
# self.log('Deleting the ApplicationPackage instance {0}'.format(self.))
try:
response = self.mgmt_client.application_package.delete(resource_group_name=self.resource_group,
account_name=self.account_name,
application_name=self.application_name,
version_name=self.name)
except CloudError as e:
self.log('Error attempting to delete the ApplicationPackage instance.')
self.fail('Error deleting the ApplicationPackage instance: {0}'.format(str(e)))
return True
def get_resource(self):
# self.log('Checking if the ApplicationPackage instance {0} is present'.format(self.))
found = False
try:
response = self.mgmt_client.application_package.get(resource_group_name=self.resource_group,
account_name=self.account_name,
application_name=self.application_name,
version_name=self.name)
except CloudError as e:
return False
return response.as_dict()
def main():
AzureRMApplicationPackage()
if __name__ == '__main__':
main()
| #!/usr/bin/python
#
# Copyright (c) 2019 <NAME>, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_batchapplicationpackage
version_added: '2.9'
short_description: Manage Azure ApplicationPackage instance.
description:
- 'Create, update and delete instance of Azure ApplicationPackage.'
options:
resource_group:
description:
- The name of the resource group that contains the Batch account.
required: true
type: str
account_name:
description:
- The name of the Batch account.
required: true
type: str
application_name:
description:
- The name of the application. This must be unique within the account.
required: true
type: str
name:
description:
- The version of the application.
required: true
type: str
state:
description:
- Assert the state of the ApplicationPackage.
- >-
Use C(present) to create or update an ApplicationPackage and C(absent)
to delete it.
default: present
choices:
- absent
- present
format:
description:
- 'The format of the application package, if the package is active.'
type: str
storage_url:
description:
- The URL for the application package in Azure Storage.
type: str
storage_url_expiry:
description:
- The UTC time at which the Azure Storage URL will expire.
type: datetime
last_activation_time:
description:
- >-
The time at which the package was last activated, if the package is
active.
type: datetime
id:
description:
- The ID of the resource.
type: str
etag:
description:
- 'The ETag of the resource, used for concurrency statements.'
type: str
extends_documentation_fragment:
- azure
author:
- <NAME> (@zikalino)
'''
EXAMPLES = '''
- name: ApplicationPackageCreate
azure_rm_batchapplicationpackage:
resource_group: myResourceGroup
account_name: myBatchAccount
application_name: myApplication
name: myVersion
- name: ApplicationPackageDelete
azure_rm_batchapplicationpackage:
resource_group: myResourceGroup
account_name: myBatchAccount
application_name: myApplication
name: myVersion
state: absent
'''
RETURN = '''
id:
description:
- The ID of the resource.
returned: always
type: str
sample: null
name:
description:
- The name of the resource.
returned: always
type: str
sample: null
type:
description:
- The type of the resource.
returned: always
type: str
sample: null
etag:
description:
- 'The ETag of the resource, used for concurrency statements.'
returned: always
type: str
sample: null
properties:
description:
- The properties associated with the Application Package.
returned: always
type: dict
sample: null
contains:
state:
description:
- The current state of the application package.
returned: always
type: str
sample: null
format:
description:
- 'The format of the application package, if the package is active.'
returned: always
type: str
sample: null
storage_url:
description:
- The URL for the application package in Azure Storage.
returned: always
type: str
sample: null
storage_url_expiry:
description:
- The UTC time at which the Azure Storage URL will expire.
returned: always
type: datetime
sample: null
last_activation_time:
description:
- >-
The time at which the package was last activated, if the package is
active.
returned: always
type: datetime
sample: null
'''
import time
import json
import re
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.batch import BatchManagementClient
from msrestazure.azure_operation import AzureOperationPoller
from msrest.polling import LROPoller
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMApplicationPackage(AzureRMModuleBaseExt):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
updatable=False,
disposition='resource_group_name',
required=true
),
account_name=dict(
type='str',
updatable=False,
required=true
),
application_name=dict(
type='str',
updatable=False,
required=true
),
name=dict(
type='str',
updatable=False,
disposition='version_name',
required=true
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.account_name = None
self.application_name = None
self.name = None
self.id = None
self.etag = None
self.body = {}
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
super(AzureRMApplicationPackage, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.body[key] = kwargs[key]
self.inflate_parameters(self.module_arg_spec, self.body, 0)
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(BatchManagement,
base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
if self.location is None:
self.location = resource_group.location
old_response = self.get_resource()
if not old_response:
if self.state == 'present':
self.to_do = Actions.Create
else:
if self.state == 'absent':
self.to_do = Actions.Delete
else:
modifiers = {}
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.results['changed'] = True
if self.check_mode:
return self.results
response = self.create_update_resource()
elif self.to_do == Actions.Delete:
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_resource()
else:
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
self.results["name"] = response["name"]
self.results["type"] = response["type"]
self.results["etag"] = response["etag"]
self.results["properties"] = response["properties"]
return self.results
def create_update_resource(self):
try:
if self.to_do == Actions.Create:
response = self.mgmt_client.application_package.create(resource_group_name=self.resource_group,
account_name=self.account_name,
application_name=self.application_name,
version_name=self.name)
else:
response = self.mgmt_client.application_package.update()
if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the ApplicationPackage instance.')
self.fail('Error creating the ApplicationPackage instance: {0}'.format(str(exc)))
return response.as_dict()
def delete_resource(self):
# self.log('Deleting the ApplicationPackage instance {0}'.format(self.))
try:
response = self.mgmt_client.application_package.delete(resource_group_name=self.resource_group,
account_name=self.account_name,
application_name=self.application_name,
version_name=self.name)
except CloudError as e:
self.log('Error attempting to delete the ApplicationPackage instance.')
self.fail('Error deleting the ApplicationPackage instance: {0}'.format(str(e)))
return True
def get_resource(self):
# self.log('Checking if the ApplicationPackage instance {0} is present'.format(self.))
found = False
try:
response = self.mgmt_client.application_package.get(resource_group_name=self.resource_group,
account_name=self.account_name,
application_name=self.application_name,
version_name=self.name)
except CloudError as e:
return False
return response.as_dict()
def main():
AzureRMApplicationPackage()
if __name__ == '__main__':
main() | en | 0.730986 | #!/usr/bin/python # # Copyright (c) 2019 <NAME>, (@zikalino) # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ---
module: azure_rm_batchapplicationpackage version_added: '2.9' short_description: Manage Azure ApplicationPackage instance. description: - 'Create, update and delete instance of Azure ApplicationPackage.' options: resource_group: description: - The name of the resource group that contains the Batch account. required: true type: str account_name: description: - The name of the Batch account. required: true type: str application_name: description: - The name of the application. This must be unique within the account. required: true type: str name: description: - The version of the application. required: true type: str state: description: - Assert the state of the ApplicationPackage. - >- Use C(present) to create or update an ApplicationPackage and C(absent) to delete it. default: present choices: - absent - present format: description: - 'The format of the application package, if the package is active.' type: str storage_url: description: - The URL for the application package in Azure Storage. type: str storage_url_expiry: description: - The UTC time at which the Azure Storage URL will expire. type: datetime last_activation_time: description: - >- The time at which the package was last activated, if the package is active. type: datetime id: description: - The ID of the resource. type: str etag: description: - 'The ETag of the resource, used for concurrency statements.' type: str extends_documentation_fragment: - azure author: - <NAME> (@zikalino) - name: ApplicationPackageCreate
azure_rm_batchapplicationpackage:
resource_group: myResourceGroup
account_name: myBatchAccount
application_name: myApplication
name: myVersion
- name: ApplicationPackageDelete
azure_rm_batchapplicationpackage:
resource_group: myResourceGroup
account_name: myBatchAccount
application_name: myApplication
name: myVersion
state: absent id:
description:
- The ID of the resource.
returned: always
type: str
sample: null
name:
description:
- The name of the resource.
returned: always
type: str
sample: null
type:
description:
- The type of the resource.
returned: always
type: str
sample: null
etag:
description:
- 'The ETag of the resource, used for concurrency statements.'
returned: always
type: str
sample: null
properties:
description:
- The properties associated with the Application Package.
returned: always
type: dict
sample: null
contains:
state:
description:
- The current state of the application package.
returned: always
type: str
sample: null
format:
description:
- 'The format of the application package, if the package is active.'
returned: always
type: str
sample: null
storage_url:
description:
- The URL for the application package in Azure Storage.
returned: always
type: str
sample: null
storage_url_expiry:
description:
- The UTC time at which the Azure Storage URL will expire.
returned: always
type: datetime
sample: null
last_activation_time:
description:
- >-
The time at which the package was last activated, if the package is
active.
returned: always
type: datetime
sample: null # This is handled in azure_rm_common # self.log('Deleting the ApplicationPackage instance {0}'.format(self.)) # self.log('Checking if the ApplicationPackage instance {0} is present'.format(self.)) | 1.826823 | 2 |
tofu/tests/tests06_mesh/test_01_checks.py | WinstonLHS/tofu | 56 | 6631248 | """
This module contains tests for tofu.geom in its structured version
"""
# Built-in
import os
import shutil
import itertools as itt
import warnings
# Standard
import numpy as np
import matplotlib.pyplot as plt
# tofu-specific
from tofu import __version__
import tofu as tf
import tofu.data as tfd
_HERE = os.path.abspath(os.path.dirname(__file__))
_PATH_DATA = os.path.join(_HERE, 'test_data')
_TOFU_USER = os.path.join(os.path.expanduser("~"), '.tofu')
_CUSTOM = os.path.dirname(os.path.dirname(os.path.dirname(_HERE)))
_CUSTOM = os.path.join(_CUSTOM, 'scripts', 'tofucustom.py')
VerbHead = 'tofu.mesh.test_01_checks'
#######################################################
#
# Setup and Teardown
#
#######################################################
def setup_module():
print("Removing user ~/.tofu/ if any")
if os.path.isdir(_TOFU_USER):
shutil.rmtree(_TOFU_USER)
# Recreating clean .tofu
# out = subprocess.run(_CUSTOM, stdout=PIPE, stderr=PIPE)
os.system('python '+_CUSTOM)
def teardown_module():
print("Removing user ~/.tofu/ if any")
if os.path.isdir(_TOFU_USER):
shutil.rmtree(_TOFU_USER)
#######################################################
#
# checking routines
#
#######################################################
class Test01_checks():
@classmethod
def setup_class(cls):
pass
@classmethod
def setup(self):
pass
def teardown(self):
pass
@classmethod
def teardown_class(cls):
pass
def test01_mesh2DRect_X_check(self):
lx = [[1, 2], [1, 2, 3, 4]]
lres = [None, 10, 0.1, [0.1, 0.2], [0.1, 0.2, 0.3, 0.1]]
for comb in itt.product(lx, lres):
if hasattr(lres, '__iter__') and len(lres) != len(lx):
continue
x, res, ind = tfd._mesh_checks._mesh2DRect_X_check(
x=[1, 2, 3, 4],
res=10,
)
if hasattr(lres, '__iter__'):
assert x_new.size == np.unique(x_new).size == res.size + 1
#######################################################
#
# object mesh2D
#
#######################################################
class Test02_Mesh2D():
@classmethod
def setup_class(cls):
pass
def setup(self):
self.dobj = {
'm0': tfd.Mesh2D(),
'm1': tfd.Mesh2D(),
'm2': tfd.Mesh2D(),
'm3': tfd.Mesh2D(),
}
# add mesh
ldomain = [
[[2, 3], [-1, 1]],
[[2, 2.3, 2.6, 3], [-1, 0., 1]],
[[2, 3], [-1, 0, 1]],
]
lres = [
0.1,
[[0.2, 0.1, 0.1, 0.2], [0.2, 0.1, 0.2]],
[0.1, [0.2, 0.1, 0.2]],
]
i0 = 0
for ii, (k0, v0) in enumerate(self.dobj.items()):
if k0 != 'm2':
self.dobj[k0].add_mesh(
domain=ldomain[i0],
res=lres[i0],
key=k0,
)
i0 += 1
else:
self.dobj[k0].add_mesh(
crop_poly=tf.load_config('WEST'),
res=0.1,
key=k0,
)
# add splines
for ii, (k0, v0) in enumerate(self.dobj.items()):
self.dobj[k0].add_bsplines(deg=ii)
# Add triangular mesh
knots = np.array([
[2, 0], [2, 1], [3, 0], [3, 1],
])
faces = np.array([[0, 1, 2], [1, 2, 3]])
self.dobjtri = {
'tri0': tf.data.Mesh2D(),
'tri1': tf.data.Mesh2D(),
}
self.dobjtri['tri0'].add_mesh(cents=faces, knots=knots, key='tri0')
# Add realistic NICE mesh for WEST
pfe = os.path.join(_PATH_DATA, 'mesh_triangular_WEST_eq.txt')
out = np.loadtxt(pfe)
nknots, ncents = int(out[0, 0]), int(out[0, 1])
assert out.shape == (nknots + ncents + 1, 3)
knots = out[1:nknots + 1, :][:, :2]
cents = out[nknots + 1:, :]
self.dobjtri['tri1'].add_mesh(cents=cents, knots=knots, key='tri1')
# add splines
for ii, (k0, v0) in enumerate(self.dobjtri.items()):
self.dobjtri[k0].add_bsplines(deg=ii)
def teardown(self):
pass
@classmethod
def teardown_class(cls):
pass
def test01_get_summary(self):
for ii, (k0, v0) in enumerate(self.dobj.items()):
self.dobj[k0].get_summary()
for ii, (k0, v0) in enumerate(self.dobjtri.items()):
self.dobjtri[k0].get_summary()
def test02_select_ind(self):
# Rect mesh
lkey = ['m0', 'm1-bs1', 'm2', 'm3-bs3']
lelements = ['knots', None, 'cents', None]
lind = [None, ([0, 5], [0, 6]), [0, 10, 100], ([0, 5, 6], [0, 2, 3])]
lcrop = [True, False, True, False]
for ii, (k0, v0) in enumerate(self.dobj.items()):
indt = self.dobj[k0].select_ind(
key=lkey[ii],
ind=lind[ii],
elements=lelements[ii],
returnas=tuple,
crop=lcrop[ii],
)
indf = self.dobj[k0].select_ind(
key=lkey[ii],
ind=indt,
elements=lelements[ii],
returnas=np.ndarray,
crop=lcrop[ii],
)
indt2 = self.dobj[k0].select_ind(
key=lkey[ii],
ind=indf,
elements=lelements[ii],
returnas=tuple,
crop=lcrop[ii],
)
assert all([np.allclose(indt[ii], indt2[ii]) for ii in [0, 1]])
# triangular meshes
lkeys = ['tri0', 'tri0', 'tri1']
lind = [None, [1], 1]
lelements = ['knots', None, 'cents']
for ii, k0 in enumerate(lkeys):
out = self.dobjtri[k0].select_ind(
key=k0,
ind=lind[ii],
elements=lelements[ii],
returnas=int,
crop=lcrop[ii],
)
if ii == 0:
assert np.allclose(out, np.r_[0, 1, 2, 3])
elif ii >= 1:
assert np.allclose(out, np.r_[1])
def test03_select_mesh(self):
# rectangular meshes
lkey = ['m0', 'm1', 'm2', 'm3']
lind = [None, ([0, 5], [0, 6]), [0, 10, 100], ([0, 5, 6], [0, 2, 3])]
lelements = ['cents', 'knots', 'cents', None]
lreturnas = ['ind', 'data', 'data', 'ind']
lreturn_neig = [None, True, False, True]
lcrop = [False, True, True, False]
for ii, (k0, v0) in enumerate(self.dobj.items()):
indf = self.dobj[k0].select_mesh_elements(
key=lkey[ii],
ind=lind[ii],
elements=lelements[ii],
returnas=lreturnas[ii],
return_neighbours=lreturn_neig[ii],
crop=lcrop[ii],
)
# triangular meshes
lkeys = ['tri0', 'tri0', 'tri0', 'tri1']
lind = [None, [1], 1, [0, 1]]
lelements = ['knots', None, 'cents', 'cents']
lreturnas = ['ind', 'data', 'ind', 'data']
for ii, k0 in enumerate(lkeys):
out = self.dobjtri[k0].select_mesh_elements(
key=k0,
ind=lind[ii],
elements=lelements[ii],
returnas=lreturnas[ii],
return_neighbours=True,
crop=lcrop[ii],
)
def test04_select_bsplines(self):
# rectangular meshes
lkey = ['m0-bs0', 'm1-bs1', 'm2-bs2', 'm3-bs3']
lind = [None, ([0, 5], [0, 6]), [0, 10, 100], ([0, 5, 6], [0, 2, 3])]
lreturnas = [None, 'data', 'data', 'ind']
lreturn_cents = [None, True, False, True]
lreturn_knots = [None, False, True, True]
for ii, (k0, v0) in enumerate(self.dobj.items()):
indf = self.dobj[k0].select_bsplines(
key=lkey[ii],
ind=lind[ii],
returnas=lreturnas[ii],
return_cents=lreturn_cents[ii],
return_knots=lreturn_knots[ii],
)
# triangular meshes
lkeys = ['tri0', 'tri0', 'tri0', 'tri1']
lkeysbs = ['tri0-bs0', None, 'tri0-bs0', 'tri1-bs1']
lind = [None, [1], 1, [0, 1]]
lelements = ['knots', None, 'cents', 'cents']
lreturnas = ['ind', 'data', 'ind', 'data']
for ii, k0 in enumerate(lkeys):
indf = self.dobjtri[k0].select_bsplines(
key=lkeysbs[ii],
ind=lind[ii],
returnas=lreturnas[ii],
return_cents=lreturn_cents[ii],
return_knots=lreturn_knots[ii],
)
def test05_sample_mesh(self):
# rectangular meshes
lres = [None, 0.1, 0.01, [0.1, 0.05]]
lmode = [None, 'rel', 'abs', 'abs']
lgrid = [None, True, False, False]
for ii, (k0, v0) in enumerate(self.dobj.items()):
out = v0.get_sample_mesh(
res=lres[ii], grid=lgrid[ii], mode=lmode[ii],
)
# triangular meshes
lkeys = ['tri0', 'tri0', 'tri0', 'tri1']
lres = [None, 0.1, 0.01, [0.1, 0.05]]
lmode = [None, 'rel', 'abs', 'abs']
lgrid = [None, True, False, False]
for ii, k0 in enumerate(lkeys):
out = self.dobjtri[k0].get_sample_mesh(
res=lres[ii], grid=lgrid[ii], mode=lmode[ii],
)
"""
def test06_sample_bspline(self):
lres = [None, 0.1, 0.01, [0.1, 0.05]]
lmode = [None, 'rel', 'abs', 'abs']
lgrid = [None, True, False, False]
for ii, (k0, v0) in enumerate(self.dobj.items()):
out = v0.get_sample_bspline(
res=lres[ii], grid=lgrid[ii], mode=lmode[ii],
)
"""
def test07_ev_details_vs_sum(self):
x = np.linspace(2.2, 2.8, 5)
y = np.linspace(-0.5, 0.5, 5)
x = np.tile(x, (y.size, 1))
y = np.tile(y, (x.shape[1], 1)).T
# rectangular meshes
lkey = ['m0-bs0', 'm1-bs1', 'm2-bs2', 'm3-bs3']
for ii, (k0, v0) in enumerate(self.dobj.items()):
val = v0.interp2d(
key=lkey[ii],
R=x,
Z=y,
coefs=None,
indbs=None,
indt=None,
grid=False,
details=True,
reshape=True,
res=None,
crop=True,
nan0=ii % 2 == 0,
imshow=False,
)
crop = v0.dobj['bsplines'][lkey[ii]]['crop']
if crop is False:
shap = np.prod(v0.dobj['bsplines'][lkey[ii]]['shape'])
else:
shap = v0.ddata[crop]['data'].sum()
assert val.shape == tuple(np.r_[x.shape, shap])
val_sum = v0.interp2d(
key=lkey[ii],
R=x,
Z=y,
coefs=None,
indbs=None,
indt=None,
grid=False,
details=False,
reshape=True,
res=None,
crop=True,
nan0=ii % 2 == 0,
imshow=False,
)
indok = ~np.isnan(val_sum[0, ...])
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Does not work because of knots padding used in func_details
# Due to scpinterp._bspl.evaluate_spline()...
if False: # To be debugged
assert np.allclose(
val_sum[0, indok],
np.nansum(val, axis=-1)[indok],
equal_nan=True,
)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# triangular meshes
lkey = ['tri0-bs0', 'tri1-bs1']
for ii, (k0, v0) in enumerate(self.dobjtri.items()):
val = v0.interp2d(
key=lkey[ii],
R=x,
Z=y,
coefs=None,
indbs=None,
indt=None,
grid=False,
details=True,
reshape=None,
res=None,
crop=True,
nan0=ii % 2 == 0,
imshow=False,
)
crop = v0.dobj['bsplines'][lkey[ii]].get('crop', False)
if crop is False:
shap = np.prod(v0.dobj['bsplines'][lkey[ii]]['shape'])
else:
shap = v0.ddata[crop]['data'].sum()
assert val.shape == tuple(np.r_[x.shape, shap])
val_sum = v0.interp2d(
key=lkey[ii],
R=x,
Z=y,
coefs=None,
indbs=None,
indt=None,
grid=False,
details=False,
reshape=None,
res=None,
crop=True,
nan0=ii % 2 == 0,
imshow=False,
)
indok = ~np.isnan(val_sum[0, ...])
assert np.allclose(
val_sum[0, indok],
np.nansum(val, axis=-1)[indok],
equal_nan=True,
)
def test08_plot_mesh(self):
# rectangular meshes
lik = [None, ([0, 2], [0, 3]), [2, 3], None]
lic = [None, ([0, 2], [0, 3]), None, [2, 3]]
for ii, (k0, v0) in enumerate(self.dobj.items()):
dax = self.dobj[k0].plot_mesh(
ind_knot=lik[ii],
ind_cent=lic[ii],
)
plt.close('all')
# triangular meshes
lik = [None, [0, 2], [2, 3], None]
lic = [None, [0, 2], None, [2, 3]]
for ii, (k0, v0) in enumerate(self.dobjtri.items()):
dax = self.dobjtri[k0].plot_mesh(
ind_knot=lik[ii],
ind_cent=lic[ii],
)
plt.close('all')
# TBF for triangular
def test09_plot_bsplines(self):
# rectangular meshes
lkey = ['m0-bs0', 'm1-bs1', 'm2-bs2', 'm3-bs3']
lind = [None, ([1, 2], [2, 1]), (1, 1), [1, 2, 10]]
lknots = [None, True, False, True]
lcents = [False, False, True, True]
for ii, (k0, v0) in enumerate(self.dobj.items()):
dax = self.dobj[k0].plot_bsplines(
key=lkey[ii],
ind=lind[ii],
knots=lknots[ii],
cents=lcents[ii],
)
plt.close('all')
# triangular meshes
lkey = ['tri0-bs0', 'tri1-bs1'] # , 'm2-bs2', 'm3-bs3']
lind = [None, [1, 2], (1, 1), [1, 2, 10]]
lknots = [None, True, False, True]
lcents = [False, False, True, True]
for ii, (k0, v0) in enumerate(self.dobjtri.items()):
dax = self.dobjtri[k0].plot_bsplines(
key=lkey[ii],
ind=lind[ii],
knots=lknots[ii],
cents=lcents[ii],
)
plt.close('all')
def test10_plot_profile2d(self):
# rectangular meshes
lkey = ['m0-bs0', 'm1-bs1', 'm2-bs2', 'm3-bs3']
for ii, (k0, v0) in enumerate(self.dobj.items()):
key = str(ii)
kbs = lkey[ii]
ref = self.dobj[k0].dobj['bsplines'][kbs]['ref']
shapebs = self.dobj[k0].dobj['bsplines'][kbs]['shape']
self.dobj[k0].add_data(
key=key,
data=np.random.random(shapebs),
ref=ref,
)
dax = self.dobj[k0].plot_profile2d(
key=key,
)
plt.close('all')
# triangular meshes
# DEACTIVATED BECAUSE TOO SLOW IN CURRENT VERSION !!!
if False:
lkey = ['tri0-bs0', 'tri1-bs1']
for ii, (k0, v0) in enumerate(self.dobjtri.items()):
key = str(ii)
kbs = lkey[ii]
ref = self.dobjtri[k0].dobj['bsplines'][kbs]['ref']
shapebs = self.dobjtri[k0].dobj['bsplines'][kbs]['shape']
self.dobjtri[k0].add_data(
key=key,
data=np.random.random(shapebs),
ref=ref,
)
dax = self.dobjtri[k0].plot_profile2d(
key=key,
)
plt.close('all')
# TBF for triangular
def test11_add_bsplines_operator(self):
lkey = ['m0-bs0', 'm1-bs1', 'm2-bs2']
lop = ['D0N1', 'D0N2', 'D1N2', 'D2N2']
lgeom = ['linear', 'toroidal']
lcrop = [False, True]
dfail = {}
for ii, (k0, v0) in enumerate(self.dobj.items()):
if ii == 3:
continue
for comb in itt.product(lop, lgeom, lcrop):
deg = self.dobj[k0].dobj['bsplines'][lkey[ii]]['deg']
# only test exact operators
if int(comb[0][1]) > deg:
# except deg =0 D1N2
if deg == 0 and comb[0] == 'D1N2':
pass
else:
continue
try:
self.dobj[k0].add_bsplines_operator(
key=lkey[ii],
operator=comb[0],
geometry=comb[1],
crop=comb[2],
)
except Exception as err:
dfail[k0] = (
f"key {lkey[ii]}, op '{comb[0]}', geom '{comb[1]}': "
+ str(err)
)
# Raise error if any fail
if len(dfail) > 0:
lstr = [f'\t- {k0}: {v0}' for k0, v0 in dfail.items()]
msg = (
"The following operators failed:\n"
+ "\n".join(lstr)
)
raise Exception(msg)
# TBF for triangular
def test12_compute_plot_geometry_matrix(self):
# get config and cam
conf = tf.load_config('WEST-V0')
cam = tf.geom.utils.create_CamLOS1D(
pinhole=[3., 1., 0.],
orientation=[np.pi, 0., 0],
focal=0.1,
sensor_nb=50,
sensor_size=0.15,
config=conf,
Diag='SXR',
Exp='WEST',
Name='cam1',
)
# compute geometry matrices
for ii, (k0, v0) in enumerate(self.dobj.items()):
self.dobj[k0].add_geometry_matrix(
cam=cam,
res=0.01,
crop=True,
store=True,
)
dax = self.dobj[k0].plot_geometry_matrix(
cam=cam, indchan=12, indbf=100,
)
plt.close('all')
| """
This module contains tests for tofu.geom in its structured version
"""
# Built-in
import os
import shutil
import itertools as itt
import warnings
# Standard
import numpy as np
import matplotlib.pyplot as plt
# tofu-specific
from tofu import __version__
import tofu as tf
import tofu.data as tfd
_HERE = os.path.abspath(os.path.dirname(__file__))
_PATH_DATA = os.path.join(_HERE, 'test_data')
_TOFU_USER = os.path.join(os.path.expanduser("~"), '.tofu')
_CUSTOM = os.path.dirname(os.path.dirname(os.path.dirname(_HERE)))
_CUSTOM = os.path.join(_CUSTOM, 'scripts', 'tofucustom.py')
VerbHead = 'tofu.mesh.test_01_checks'
#######################################################
#
# Setup and Teardown
#
#######################################################
def setup_module():
print("Removing user ~/.tofu/ if any")
if os.path.isdir(_TOFU_USER):
shutil.rmtree(_TOFU_USER)
# Recreating clean .tofu
# out = subprocess.run(_CUSTOM, stdout=PIPE, stderr=PIPE)
os.system('python '+_CUSTOM)
def teardown_module():
print("Removing user ~/.tofu/ if any")
if os.path.isdir(_TOFU_USER):
shutil.rmtree(_TOFU_USER)
#######################################################
#
# checking routines
#
#######################################################
class Test01_checks():
@classmethod
def setup_class(cls):
pass
@classmethod
def setup(self):
pass
def teardown(self):
pass
@classmethod
def teardown_class(cls):
pass
def test01_mesh2DRect_X_check(self):
lx = [[1, 2], [1, 2, 3, 4]]
lres = [None, 10, 0.1, [0.1, 0.2], [0.1, 0.2, 0.3, 0.1]]
for comb in itt.product(lx, lres):
if hasattr(lres, '__iter__') and len(lres) != len(lx):
continue
x, res, ind = tfd._mesh_checks._mesh2DRect_X_check(
x=[1, 2, 3, 4],
res=10,
)
if hasattr(lres, '__iter__'):
assert x_new.size == np.unique(x_new).size == res.size + 1
#######################################################
#
# object mesh2D
#
#######################################################
class Test02_Mesh2D():
@classmethod
def setup_class(cls):
pass
def setup(self):
self.dobj = {
'm0': tfd.Mesh2D(),
'm1': tfd.Mesh2D(),
'm2': tfd.Mesh2D(),
'm3': tfd.Mesh2D(),
}
# add mesh
ldomain = [
[[2, 3], [-1, 1]],
[[2, 2.3, 2.6, 3], [-1, 0., 1]],
[[2, 3], [-1, 0, 1]],
]
lres = [
0.1,
[[0.2, 0.1, 0.1, 0.2], [0.2, 0.1, 0.2]],
[0.1, [0.2, 0.1, 0.2]],
]
i0 = 0
for ii, (k0, v0) in enumerate(self.dobj.items()):
if k0 != 'm2':
self.dobj[k0].add_mesh(
domain=ldomain[i0],
res=lres[i0],
key=k0,
)
i0 += 1
else:
self.dobj[k0].add_mesh(
crop_poly=tf.load_config('WEST'),
res=0.1,
key=k0,
)
# add splines
for ii, (k0, v0) in enumerate(self.dobj.items()):
self.dobj[k0].add_bsplines(deg=ii)
# Add triangular mesh
knots = np.array([
[2, 0], [2, 1], [3, 0], [3, 1],
])
faces = np.array([[0, 1, 2], [1, 2, 3]])
self.dobjtri = {
'tri0': tf.data.Mesh2D(),
'tri1': tf.data.Mesh2D(),
}
self.dobjtri['tri0'].add_mesh(cents=faces, knots=knots, key='tri0')
# Add realistic NICE mesh for WEST
pfe = os.path.join(_PATH_DATA, 'mesh_triangular_WEST_eq.txt')
out = np.loadtxt(pfe)
nknots, ncents = int(out[0, 0]), int(out[0, 1])
assert out.shape == (nknots + ncents + 1, 3)
knots = out[1:nknots + 1, :][:, :2]
cents = out[nknots + 1:, :]
self.dobjtri['tri1'].add_mesh(cents=cents, knots=knots, key='tri1')
# add splines
for ii, (k0, v0) in enumerate(self.dobjtri.items()):
self.dobjtri[k0].add_bsplines(deg=ii)
def teardown(self):
pass
@classmethod
def teardown_class(cls):
pass
def test01_get_summary(self):
for ii, (k0, v0) in enumerate(self.dobj.items()):
self.dobj[k0].get_summary()
for ii, (k0, v0) in enumerate(self.dobjtri.items()):
self.dobjtri[k0].get_summary()
def test02_select_ind(self):
# Rect mesh
lkey = ['m0', 'm1-bs1', 'm2', 'm3-bs3']
lelements = ['knots', None, 'cents', None]
lind = [None, ([0, 5], [0, 6]), [0, 10, 100], ([0, 5, 6], [0, 2, 3])]
lcrop = [True, False, True, False]
for ii, (k0, v0) in enumerate(self.dobj.items()):
indt = self.dobj[k0].select_ind(
key=lkey[ii],
ind=lind[ii],
elements=lelements[ii],
returnas=tuple,
crop=lcrop[ii],
)
indf = self.dobj[k0].select_ind(
key=lkey[ii],
ind=indt,
elements=lelements[ii],
returnas=np.ndarray,
crop=lcrop[ii],
)
indt2 = self.dobj[k0].select_ind(
key=lkey[ii],
ind=indf,
elements=lelements[ii],
returnas=tuple,
crop=lcrop[ii],
)
assert all([np.allclose(indt[ii], indt2[ii]) for ii in [0, 1]])
# triangular meshes
lkeys = ['tri0', 'tri0', 'tri1']
lind = [None, [1], 1]
lelements = ['knots', None, 'cents']
for ii, k0 in enumerate(lkeys):
out = self.dobjtri[k0].select_ind(
key=k0,
ind=lind[ii],
elements=lelements[ii],
returnas=int,
crop=lcrop[ii],
)
if ii == 0:
assert np.allclose(out, np.r_[0, 1, 2, 3])
elif ii >= 1:
assert np.allclose(out, np.r_[1])
def test03_select_mesh(self):
# rectangular meshes
lkey = ['m0', 'm1', 'm2', 'm3']
lind = [None, ([0, 5], [0, 6]), [0, 10, 100], ([0, 5, 6], [0, 2, 3])]
lelements = ['cents', 'knots', 'cents', None]
lreturnas = ['ind', 'data', 'data', 'ind']
lreturn_neig = [None, True, False, True]
lcrop = [False, True, True, False]
for ii, (k0, v0) in enumerate(self.dobj.items()):
indf = self.dobj[k0].select_mesh_elements(
key=lkey[ii],
ind=lind[ii],
elements=lelements[ii],
returnas=lreturnas[ii],
return_neighbours=lreturn_neig[ii],
crop=lcrop[ii],
)
# triangular meshes
lkeys = ['tri0', 'tri0', 'tri0', 'tri1']
lind = [None, [1], 1, [0, 1]]
lelements = ['knots', None, 'cents', 'cents']
lreturnas = ['ind', 'data', 'ind', 'data']
for ii, k0 in enumerate(lkeys):
out = self.dobjtri[k0].select_mesh_elements(
key=k0,
ind=lind[ii],
elements=lelements[ii],
returnas=lreturnas[ii],
return_neighbours=True,
crop=lcrop[ii],
)
def test04_select_bsplines(self):
# rectangular meshes
lkey = ['m0-bs0', 'm1-bs1', 'm2-bs2', 'm3-bs3']
lind = [None, ([0, 5], [0, 6]), [0, 10, 100], ([0, 5, 6], [0, 2, 3])]
lreturnas = [None, 'data', 'data', 'ind']
lreturn_cents = [None, True, False, True]
lreturn_knots = [None, False, True, True]
for ii, (k0, v0) in enumerate(self.dobj.items()):
indf = self.dobj[k0].select_bsplines(
key=lkey[ii],
ind=lind[ii],
returnas=lreturnas[ii],
return_cents=lreturn_cents[ii],
return_knots=lreturn_knots[ii],
)
# triangular meshes
lkeys = ['tri0', 'tri0', 'tri0', 'tri1']
lkeysbs = ['tri0-bs0', None, 'tri0-bs0', 'tri1-bs1']
lind = [None, [1], 1, [0, 1]]
lelements = ['knots', None, 'cents', 'cents']
lreturnas = ['ind', 'data', 'ind', 'data']
for ii, k0 in enumerate(lkeys):
indf = self.dobjtri[k0].select_bsplines(
key=lkeysbs[ii],
ind=lind[ii],
returnas=lreturnas[ii],
return_cents=lreturn_cents[ii],
return_knots=lreturn_knots[ii],
)
def test05_sample_mesh(self):
# rectangular meshes
lres = [None, 0.1, 0.01, [0.1, 0.05]]
lmode = [None, 'rel', 'abs', 'abs']
lgrid = [None, True, False, False]
for ii, (k0, v0) in enumerate(self.dobj.items()):
out = v0.get_sample_mesh(
res=lres[ii], grid=lgrid[ii], mode=lmode[ii],
)
# triangular meshes
lkeys = ['tri0', 'tri0', 'tri0', 'tri1']
lres = [None, 0.1, 0.01, [0.1, 0.05]]
lmode = [None, 'rel', 'abs', 'abs']
lgrid = [None, True, False, False]
for ii, k0 in enumerate(lkeys):
out = self.dobjtri[k0].get_sample_mesh(
res=lres[ii], grid=lgrid[ii], mode=lmode[ii],
)
"""
def test06_sample_bspline(self):
lres = [None, 0.1, 0.01, [0.1, 0.05]]
lmode = [None, 'rel', 'abs', 'abs']
lgrid = [None, True, False, False]
for ii, (k0, v0) in enumerate(self.dobj.items()):
out = v0.get_sample_bspline(
res=lres[ii], grid=lgrid[ii], mode=lmode[ii],
)
"""
def test07_ev_details_vs_sum(self):
x = np.linspace(2.2, 2.8, 5)
y = np.linspace(-0.5, 0.5, 5)
x = np.tile(x, (y.size, 1))
y = np.tile(y, (x.shape[1], 1)).T
# rectangular meshes
lkey = ['m0-bs0', 'm1-bs1', 'm2-bs2', 'm3-bs3']
for ii, (k0, v0) in enumerate(self.dobj.items()):
val = v0.interp2d(
key=lkey[ii],
R=x,
Z=y,
coefs=None,
indbs=None,
indt=None,
grid=False,
details=True,
reshape=True,
res=None,
crop=True,
nan0=ii % 2 == 0,
imshow=False,
)
crop = v0.dobj['bsplines'][lkey[ii]]['crop']
if crop is False:
shap = np.prod(v0.dobj['bsplines'][lkey[ii]]['shape'])
else:
shap = v0.ddata[crop]['data'].sum()
assert val.shape == tuple(np.r_[x.shape, shap])
val_sum = v0.interp2d(
key=lkey[ii],
R=x,
Z=y,
coefs=None,
indbs=None,
indt=None,
grid=False,
details=False,
reshape=True,
res=None,
crop=True,
nan0=ii % 2 == 0,
imshow=False,
)
indok = ~np.isnan(val_sum[0, ...])
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Does not work because of knots padding used in func_details
# Due to scpinterp._bspl.evaluate_spline()...
if False: # To be debugged
assert np.allclose(
val_sum[0, indok],
np.nansum(val, axis=-1)[indok],
equal_nan=True,
)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# triangular meshes
lkey = ['tri0-bs0', 'tri1-bs1']
for ii, (k0, v0) in enumerate(self.dobjtri.items()):
val = v0.interp2d(
key=lkey[ii],
R=x,
Z=y,
coefs=None,
indbs=None,
indt=None,
grid=False,
details=True,
reshape=None,
res=None,
crop=True,
nan0=ii % 2 == 0,
imshow=False,
)
crop = v0.dobj['bsplines'][lkey[ii]].get('crop', False)
if crop is False:
shap = np.prod(v0.dobj['bsplines'][lkey[ii]]['shape'])
else:
shap = v0.ddata[crop]['data'].sum()
assert val.shape == tuple(np.r_[x.shape, shap])
val_sum = v0.interp2d(
key=lkey[ii],
R=x,
Z=y,
coefs=None,
indbs=None,
indt=None,
grid=False,
details=False,
reshape=None,
res=None,
crop=True,
nan0=ii % 2 == 0,
imshow=False,
)
indok = ~np.isnan(val_sum[0, ...])
assert np.allclose(
val_sum[0, indok],
np.nansum(val, axis=-1)[indok],
equal_nan=True,
)
def test08_plot_mesh(self):
# rectangular meshes
lik = [None, ([0, 2], [0, 3]), [2, 3], None]
lic = [None, ([0, 2], [0, 3]), None, [2, 3]]
for ii, (k0, v0) in enumerate(self.dobj.items()):
dax = self.dobj[k0].plot_mesh(
ind_knot=lik[ii],
ind_cent=lic[ii],
)
plt.close('all')
# triangular meshes
lik = [None, [0, 2], [2, 3], None]
lic = [None, [0, 2], None, [2, 3]]
for ii, (k0, v0) in enumerate(self.dobjtri.items()):
dax = self.dobjtri[k0].plot_mesh(
ind_knot=lik[ii],
ind_cent=lic[ii],
)
plt.close('all')
# TBF for triangular
def test09_plot_bsplines(self):
# rectangular meshes
lkey = ['m0-bs0', 'm1-bs1', 'm2-bs2', 'm3-bs3']
lind = [None, ([1, 2], [2, 1]), (1, 1), [1, 2, 10]]
lknots = [None, True, False, True]
lcents = [False, False, True, True]
for ii, (k0, v0) in enumerate(self.dobj.items()):
dax = self.dobj[k0].plot_bsplines(
key=lkey[ii],
ind=lind[ii],
knots=lknots[ii],
cents=lcents[ii],
)
plt.close('all')
# triangular meshes
lkey = ['tri0-bs0', 'tri1-bs1'] # , 'm2-bs2', 'm3-bs3']
lind = [None, [1, 2], (1, 1), [1, 2, 10]]
lknots = [None, True, False, True]
lcents = [False, False, True, True]
for ii, (k0, v0) in enumerate(self.dobjtri.items()):
dax = self.dobjtri[k0].plot_bsplines(
key=lkey[ii],
ind=lind[ii],
knots=lknots[ii],
cents=lcents[ii],
)
plt.close('all')
def test10_plot_profile2d(self):
# rectangular meshes
lkey = ['m0-bs0', 'm1-bs1', 'm2-bs2', 'm3-bs3']
for ii, (k0, v0) in enumerate(self.dobj.items()):
key = str(ii)
kbs = lkey[ii]
ref = self.dobj[k0].dobj['bsplines'][kbs]['ref']
shapebs = self.dobj[k0].dobj['bsplines'][kbs]['shape']
self.dobj[k0].add_data(
key=key,
data=np.random.random(shapebs),
ref=ref,
)
dax = self.dobj[k0].plot_profile2d(
key=key,
)
plt.close('all')
# triangular meshes
# DEACTIVATED BECAUSE TOO SLOW IN CURRENT VERSION !!!
if False:
lkey = ['tri0-bs0', 'tri1-bs1']
for ii, (k0, v0) in enumerate(self.dobjtri.items()):
key = str(ii)
kbs = lkey[ii]
ref = self.dobjtri[k0].dobj['bsplines'][kbs]['ref']
shapebs = self.dobjtri[k0].dobj['bsplines'][kbs]['shape']
self.dobjtri[k0].add_data(
key=key,
data=np.random.random(shapebs),
ref=ref,
)
dax = self.dobjtri[k0].plot_profile2d(
key=key,
)
plt.close('all')
# TBF for triangular
def test11_add_bsplines_operator(self):
lkey = ['m0-bs0', 'm1-bs1', 'm2-bs2']
lop = ['D0N1', 'D0N2', 'D1N2', 'D2N2']
lgeom = ['linear', 'toroidal']
lcrop = [False, True]
dfail = {}
for ii, (k0, v0) in enumerate(self.dobj.items()):
if ii == 3:
continue
for comb in itt.product(lop, lgeom, lcrop):
deg = self.dobj[k0].dobj['bsplines'][lkey[ii]]['deg']
# only test exact operators
if int(comb[0][1]) > deg:
# except deg =0 D1N2
if deg == 0 and comb[0] == 'D1N2':
pass
else:
continue
try:
self.dobj[k0].add_bsplines_operator(
key=lkey[ii],
operator=comb[0],
geometry=comb[1],
crop=comb[2],
)
except Exception as err:
dfail[k0] = (
f"key {lkey[ii]}, op '{comb[0]}', geom '{comb[1]}': "
+ str(err)
)
# Raise error if any fail
if len(dfail) > 0:
lstr = [f'\t- {k0}: {v0}' for k0, v0 in dfail.items()]
msg = (
"The following operators failed:\n"
+ "\n".join(lstr)
)
raise Exception(msg)
# TBF for triangular
def test12_compute_plot_geometry_matrix(self):
# get config and cam
conf = tf.load_config('WEST-V0')
cam = tf.geom.utils.create_CamLOS1D(
pinhole=[3., 1., 0.],
orientation=[np.pi, 0., 0],
focal=0.1,
sensor_nb=50,
sensor_size=0.15,
config=conf,
Diag='SXR',
Exp='WEST',
Name='cam1',
)
# compute geometry matrices
for ii, (k0, v0) in enumerate(self.dobj.items()):
self.dobj[k0].add_geometry_matrix(
cam=cam,
res=0.01,
crop=True,
store=True,
)
dax = self.dobj[k0].plot_geometry_matrix(
cam=cam, indchan=12, indbf=100,
)
plt.close('all')
| en | 0.318734 | This module contains tests for tofu.geom in its structured version # Built-in # Standard # tofu-specific ####################################################### # # Setup and Teardown # ####################################################### # Recreating clean .tofu # out = subprocess.run(_CUSTOM, stdout=PIPE, stderr=PIPE) ####################################################### # # checking routines # ####################################################### ####################################################### # # object mesh2D # ####################################################### # add mesh # add splines # Add triangular mesh # Add realistic NICE mesh for WEST # add splines # Rect mesh # triangular meshes # rectangular meshes # triangular meshes # rectangular meshes # triangular meshes # rectangular meshes # triangular meshes def test06_sample_bspline(self): lres = [None, 0.1, 0.01, [0.1, 0.05]] lmode = [None, 'rel', 'abs', 'abs'] lgrid = [None, True, False, False] for ii, (k0, v0) in enumerate(self.dobj.items()): out = v0.get_sample_bspline( res=lres[ii], grid=lgrid[ii], mode=lmode[ii], ) # rectangular meshes # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # Does not work because of knots padding used in func_details # Due to scpinterp._bspl.evaluate_spline()... # To be debugged # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # triangular meshes # rectangular meshes # triangular meshes # TBF for triangular # rectangular meshes # triangular meshes # , 'm2-bs2', 'm3-bs3'] # rectangular meshes # triangular meshes # DEACTIVATED BECAUSE TOO SLOW IN CURRENT VERSION !!! # TBF for triangular # only test exact operators # except deg =0 D1N2 # Raise error if any fail # TBF for triangular # get config and cam # compute geometry matrices | 2.285281 | 2 |
cscs-checks/tools/profiling_and_debugging/scorep_mpi_omp.py | jfavre/reframe | 0 | 6631249 | <reponame>jfavre/reframe<filename>cscs-checks/tools/profiling_and_debugging/scorep_mpi_omp.py
import os
import reframe as rfm
import reframe.utility.sanity as sn
@rfm.required_version('>=2.14')
@rfm.parameterized_test(['C++'], ['F90'])
class ScorepHybrid(rfm.RegressionTest):
def __init__(self, lang):
super().__init__()
self.name = 'scorep_mpi_omp_%s' % lang.replace('+', 'p')
self.descr = 'SCORE-P %s check' % lang
self.valid_systems = ['daint:gpu', 'daint:mc', 'dom:gpu', 'dom:mc']
self.valid_prog_environs = ['PrgEnv-gnu', 'PrgEnv-intel', 'PrgEnv-pgi',
'PrgEnv-cray']
self.prgenv_flags = {
'PrgEnv-cray': ['-g', '-homp'],
'PrgEnv-gnu': ['-g', '-fopenmp'],
'PrgEnv-intel': ['-g', '-openmp'],
'PrgEnv-pgi': ['-g', '-mp']
}
self.sourcesdir = os.path.join('src', lang)
self.executable = 'jacobi'
self.build_system = 'Make'
self.build_system.makefile = 'Makefile_scorep_mpi_omp'
# NOTE: Restrict concurrency to allow creation of Fortran modules
if lang == 'F90':
self.build_system.max_concurrency = 1
self.num_tasks = 3
self.num_tasks_per_node = 3
self.num_cpus_per_task = 4
self.num_iterations = 200
self.variables = {
'OMP_NUM_THREADS': str(self.num_cpus_per_task),
'ITERATIONS': str(self.num_iterations),
'SCOREP_ENABLE_PROFILING': 'false',
'SCOREP_ENABLE_TRACING': 'true',
'OMP_PROC_BIND': 'true',
'SCOREP_TIMER': 'clock_gettime'
}
cpu_count = self.num_cpus_per_task * self.num_tasks_per_node
self.otf2_file = 'otf2.txt'
self.sanity_patterns = sn.all([
sn.assert_found('SUCCESS', self.stdout),
sn.assert_eq(sn.count(sn.extractall(
r'(?P<line>LEAVE.*omp\s+\S+\s+\@_jacobi)', self.otf2_file,
'line')), 4 * self.num_iterations * cpu_count),
sn.assert_not_found('warning|WARNING', self.stderr)
])
self.maintainers = ['MK', 'JG']
self.tags = {'production'}
# additional program call in order to generate the tracing output for
# the sanity check
self.post_run = [
'otf2-print scorep-*/traces.otf2 > %s' % self.otf2_file
]
def setup(self, partition, environ, **job_opts):
scorep_ver = '5.0'
tc_ver = '19.03'
cu_ver = '10.0'
self.scorep_modules = {
'PrgEnv-gnu': ['Score-P/%s-CrayGNU-%s' % (scorep_ver, tc_ver)],
'PrgEnv-intel': ['Score-P/%s-CrayIntel-%s' % (scorep_ver, tc_ver)],
'PrgEnv-pgi': ['Score-P/%s-CrayPGI-%s' % (scorep_ver, tc_ver)],
'PrgEnv-cray': ['Score-P/%s-CrayCCE-%s' % (scorep_ver, tc_ver)]
}
if partition.fullname in ['daint:gpu', 'dom:gpu']:
self.scorep_modules['PrgEnv-gnu'] = [
'Score-P/%s-CrayGNU-%s-cuda-%s' % (scorep_ver, tc_ver, cu_ver)
]
self.modules = self.scorep_modules[environ.name]
super().setup(partition, environ, **job_opts)
prgenv_flags = self.prgenv_flags[self.current_environ.name]
self.build_system.cflags = prgenv_flags
self.build_system.cxxflags = prgenv_flags
self.build_system.fflags = prgenv_flags
self.build_system.ldflags = ['-lm']
self.build_system.options = [
"PREP='scorep --nopreprocess --mpp=mpi --thread=omp'"
]
| import os
import reframe as rfm
import reframe.utility.sanity as sn
@rfm.required_version('>=2.14')
@rfm.parameterized_test(['C++'], ['F90'])
class ScorepHybrid(rfm.RegressionTest):
def __init__(self, lang):
super().__init__()
self.name = 'scorep_mpi_omp_%s' % lang.replace('+', 'p')
self.descr = 'SCORE-P %s check' % lang
self.valid_systems = ['daint:gpu', 'daint:mc', 'dom:gpu', 'dom:mc']
self.valid_prog_environs = ['PrgEnv-gnu', 'PrgEnv-intel', 'PrgEnv-pgi',
'PrgEnv-cray']
self.prgenv_flags = {
'PrgEnv-cray': ['-g', '-homp'],
'PrgEnv-gnu': ['-g', '-fopenmp'],
'PrgEnv-intel': ['-g', '-openmp'],
'PrgEnv-pgi': ['-g', '-mp']
}
self.sourcesdir = os.path.join('src', lang)
self.executable = 'jacobi'
self.build_system = 'Make'
self.build_system.makefile = 'Makefile_scorep_mpi_omp'
# NOTE: Restrict concurrency to allow creation of Fortran modules
if lang == 'F90':
self.build_system.max_concurrency = 1
self.num_tasks = 3
self.num_tasks_per_node = 3
self.num_cpus_per_task = 4
self.num_iterations = 200
self.variables = {
'OMP_NUM_THREADS': str(self.num_cpus_per_task),
'ITERATIONS': str(self.num_iterations),
'SCOREP_ENABLE_PROFILING': 'false',
'SCOREP_ENABLE_TRACING': 'true',
'OMP_PROC_BIND': 'true',
'SCOREP_TIMER': 'clock_gettime'
}
cpu_count = self.num_cpus_per_task * self.num_tasks_per_node
self.otf2_file = 'otf2.txt'
self.sanity_patterns = sn.all([
sn.assert_found('SUCCESS', self.stdout),
sn.assert_eq(sn.count(sn.extractall(
r'(?P<line>LEAVE.*omp\s+\S+\s+\@_jacobi)', self.otf2_file,
'line')), 4 * self.num_iterations * cpu_count),
sn.assert_not_found('warning|WARNING', self.stderr)
])
self.maintainers = ['MK', 'JG']
self.tags = {'production'}
# additional program call in order to generate the tracing output for
# the sanity check
self.post_run = [
'otf2-print scorep-*/traces.otf2 > %s' % self.otf2_file
]
def setup(self, partition, environ, **job_opts):
scorep_ver = '5.0'
tc_ver = '19.03'
cu_ver = '10.0'
self.scorep_modules = {
'PrgEnv-gnu': ['Score-P/%s-CrayGNU-%s' % (scorep_ver, tc_ver)],
'PrgEnv-intel': ['Score-P/%s-CrayIntel-%s' % (scorep_ver, tc_ver)],
'PrgEnv-pgi': ['Score-P/%s-CrayPGI-%s' % (scorep_ver, tc_ver)],
'PrgEnv-cray': ['Score-P/%s-CrayCCE-%s' % (scorep_ver, tc_ver)]
}
if partition.fullname in ['daint:gpu', 'dom:gpu']:
self.scorep_modules['PrgEnv-gnu'] = [
'Score-P/%s-CrayGNU-%s-cuda-%s' % (scorep_ver, tc_ver, cu_ver)
]
self.modules = self.scorep_modules[environ.name]
super().setup(partition, environ, **job_opts)
prgenv_flags = self.prgenv_flags[self.current_environ.name]
self.build_system.cflags = prgenv_flags
self.build_system.cxxflags = prgenv_flags
self.build_system.fflags = prgenv_flags
self.build_system.ldflags = ['-lm']
self.build_system.options = [
"PREP='scorep --nopreprocess --mpp=mpi --thread=omp'"
] | en | 0.831788 | # NOTE: Restrict concurrency to allow creation of Fortran modules # additional program call in order to generate the tracing output for # the sanity check | 2.004617 | 2 |
nvchecker/source/packagist.py | bboerst/nvchecker | 0 | 6631250 | <reponame>bboerst/nvchecker
# MIT licensed
# Copyright (c) 2013-2017 lilydjwg <<EMAIL>>, et al.
from .simple_json import simple_json
PACKAGIST_URL = 'https://packagist.org/packages/%s.json'
def _version_from_json(data):
data = {version: details for version, details in data["package"]['versions'].items() if version != "dev-master"}
if len(data):
return max(data, key=lambda version: data[version]["time"])
get_version, get_cacheable_conf = simple_json(
PACKAGIST_URL,
'packagist',
_version_from_json,
)
| # MIT licensed
# Copyright (c) 2013-2017 lilydjwg <<EMAIL>>, et al.
from .simple_json import simple_json
PACKAGIST_URL = 'https://packagist.org/packages/%s.json'
def _version_from_json(data):
data = {version: details for version, details in data["package"]['versions'].items() if version != "dev-master"}
if len(data):
return max(data, key=lambda version: data[version]["time"])
get_version, get_cacheable_conf = simple_json(
PACKAGIST_URL,
'packagist',
_version_from_json,
) | en | 0.50344 | # MIT licensed # Copyright (c) 2013-2017 lilydjwg <<EMAIL>>, et al. | 2.185652 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.