max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
tests/integration/test_outpost_kubernetes.py | BeryJu/passbook | 15 | 6631551 | """outpost tests"""
from unittest.mock import MagicMock, patch
from django.test import TestCase
from kubernetes.client import AppsV1Api
from kubernetes.client.exceptions import OpenApiException
from authentik.core.tests.utils import create_test_flow
from authentik.lib.config import CONFIG
from authentik.outposts.controllers.k8s.deployment import DeploymentReconciler
from authentik.outposts.controllers.k8s.triggers import NeedsUpdate
from authentik.outposts.models import KubernetesServiceConnection, Outpost, OutpostType
from authentik.outposts.tasks import outpost_local_connection
from authentik.providers.proxy.controllers.kubernetes import ProxyKubernetesController
from authentik.providers.proxy.models import ProxyProvider
class OutpostKubernetesTests(TestCase):
"""Test Kubernetes Controllers"""
def setUp(self):
super().setUp()
# Ensure that local connection have been created
outpost_local_connection()
self.provider: ProxyProvider = ProxyProvider.objects.create(
name="test",
internal_host="http://localhost",
external_host="http://localhost",
authorization_flow=create_test_flow(),
)
self.service_connection = KubernetesServiceConnection.objects.first()
self.outpost: Outpost = Outpost.objects.create(
name="test",
type=OutpostType.PROXY,
service_connection=self.service_connection,
)
self.outpost.providers.add(self.provider)
self.outpost.save()
def test_deployment_reconciler(self):
"""test that deployment requires update"""
controller = ProxyKubernetesController(self.outpost, self.service_connection)
deployment_reconciler = DeploymentReconciler(controller)
self.assertIsNotNone(deployment_reconciler.retrieve())
config = self.outpost.config
config.kubernetes_replicas = 3
self.outpost.config = config
with self.assertRaises(NeedsUpdate):
deployment_reconciler.reconcile(
deployment_reconciler.retrieve(),
deployment_reconciler.get_reference_object(),
)
with CONFIG.patch("outposts.container_image_base", "test"):
with self.assertRaises(NeedsUpdate):
deployment_reconciler.reconcile(
deployment_reconciler.retrieve(),
deployment_reconciler.get_reference_object(),
)
deployment_reconciler.delete(deployment_reconciler.get_reference_object())
def test_controller_rename(self):
"""test that objects get deleted and re-created with new names"""
controller = ProxyKubernetesController(self.outpost, self.service_connection)
self.assertIsNone(controller.up())
self.outpost.name = "foo"
self.assertIsNone(controller.up())
apps = AppsV1Api(controller.client)
with self.assertRaises(OpenApiException):
apps.read_namespaced_deployment("test", self.outpost.config.kubernetes_namespace)
controller.down()
def test_controller_full_update(self):
"""Test an update that triggers all objects"""
controller = ProxyKubernetesController(self.outpost, self.service_connection)
self.assertIsNone(controller.up())
with patch(
"authentik.outposts.controllers.k8s.base.get_version", MagicMock(return_value="1234")
):
self.assertIsNone(controller.up())
deployment_reconciler = DeploymentReconciler(controller)
deployment = deployment_reconciler.retrieve()
self.assertEqual(deployment.metadata.labels["app.kubernetes.io/version"], "1234")
controller.down()
| """outpost tests"""
from unittest.mock import MagicMock, patch
from django.test import TestCase
from kubernetes.client import AppsV1Api
from kubernetes.client.exceptions import OpenApiException
from authentik.core.tests.utils import create_test_flow
from authentik.lib.config import CONFIG
from authentik.outposts.controllers.k8s.deployment import DeploymentReconciler
from authentik.outposts.controllers.k8s.triggers import NeedsUpdate
from authentik.outposts.models import KubernetesServiceConnection, Outpost, OutpostType
from authentik.outposts.tasks import outpost_local_connection
from authentik.providers.proxy.controllers.kubernetes import ProxyKubernetesController
from authentik.providers.proxy.models import ProxyProvider
class OutpostKubernetesTests(TestCase):
"""Test Kubernetes Controllers"""
def setUp(self):
super().setUp()
# Ensure that local connection have been created
outpost_local_connection()
self.provider: ProxyProvider = ProxyProvider.objects.create(
name="test",
internal_host="http://localhost",
external_host="http://localhost",
authorization_flow=create_test_flow(),
)
self.service_connection = KubernetesServiceConnection.objects.first()
self.outpost: Outpost = Outpost.objects.create(
name="test",
type=OutpostType.PROXY,
service_connection=self.service_connection,
)
self.outpost.providers.add(self.provider)
self.outpost.save()
def test_deployment_reconciler(self):
"""test that deployment requires update"""
controller = ProxyKubernetesController(self.outpost, self.service_connection)
deployment_reconciler = DeploymentReconciler(controller)
self.assertIsNotNone(deployment_reconciler.retrieve())
config = self.outpost.config
config.kubernetes_replicas = 3
self.outpost.config = config
with self.assertRaises(NeedsUpdate):
deployment_reconciler.reconcile(
deployment_reconciler.retrieve(),
deployment_reconciler.get_reference_object(),
)
with CONFIG.patch("outposts.container_image_base", "test"):
with self.assertRaises(NeedsUpdate):
deployment_reconciler.reconcile(
deployment_reconciler.retrieve(),
deployment_reconciler.get_reference_object(),
)
deployment_reconciler.delete(deployment_reconciler.get_reference_object())
def test_controller_rename(self):
"""test that objects get deleted and re-created with new names"""
controller = ProxyKubernetesController(self.outpost, self.service_connection)
self.assertIsNone(controller.up())
self.outpost.name = "foo"
self.assertIsNone(controller.up())
apps = AppsV1Api(controller.client)
with self.assertRaises(OpenApiException):
apps.read_namespaced_deployment("test", self.outpost.config.kubernetes_namespace)
controller.down()
def test_controller_full_update(self):
"""Test an update that triggers all objects"""
controller = ProxyKubernetesController(self.outpost, self.service_connection)
self.assertIsNone(controller.up())
with patch(
"authentik.outposts.controllers.k8s.base.get_version", MagicMock(return_value="1234")
):
self.assertIsNone(controller.up())
deployment_reconciler = DeploymentReconciler(controller)
deployment = deployment_reconciler.retrieve()
self.assertEqual(deployment.metadata.labels["app.kubernetes.io/version"], "1234")
controller.down()
| en | 0.925836 | outpost tests Test Kubernetes Controllers # Ensure that local connection have been created test that deployment requires update test that objects get deleted and re-created with new names Test an update that triggers all objects | 2.075818 | 2 |
chemreg/substance/apps.py | Chemical-Curation/chemcurator | 1 | 6631552 | <filename>chemreg/substance/apps.py
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class SubstanceConfig(AppConfig):
name = "chemreg.substance"
verbose_name = _("Substance")
def ready(self):
try:
import chemreg.substance.signals # noqa F401
except ImportError:
pass
| <filename>chemreg/substance/apps.py
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class SubstanceConfig(AppConfig):
name = "chemreg.substance"
verbose_name = _("Substance")
def ready(self):
try:
import chemreg.substance.signals # noqa F401
except ImportError:
pass
| uz | 0.378174 | # noqa F401 | 1.431409 | 1 |
downloadScans.py | LvanWissen/saa-scanDownloader | 0 | 6631553 | <reponame>LvanWissen/saa-scanDownloader
"""
saa-scanDownloader
Usage:
downloadScans.py <collectionNumber> <inventoryNumber> <path> <nscans> <folder>
downloadScans.py <collectionNumber> <inventoryNumber> <path> <nscans> <folder> --concordance False
downloadScans.py (-h | --help)
Arguments:
collectionNumber Collection number in the SAA inventory.
inventoryNumber Inventory number from the collection.
path Path in the new (d.d. 2020) search environment. You can see
this by clicking on a inventory number and check the address
bar. E.g. 1.6.
nscans How many scans are in this inventory? This can also be read
from the SAA website.
folder Output folder. A folder with the inventory number is
automatically created in this folder.
Options:
-h --help Show this screen.
--concordance <true_or_false> Save a concordance.json file with scanname and uuid [default: True]
"""
import os
import math
import time
import requests
import json
from docopt import docopt
APIURL = "https://webservices.picturae.com/archives/scans/"
DOWNLOADURL = "https://download-images.memorix.nl/ams/download/fullsize/"
def getScans(path: str,
nscans: int,
collectionNumber: str,
start: int = 0,
limit: int = 100,
APIURL=APIURL):
"""
Download scan metadata that contains a.o. the name and uuid.
Args:
path (str): Path in the new (d.d. 2020) search environment. You can see
this by clicking on a inventory number and check the address
bar. E.g. 1.6.
nscans (int): How many scans are in this inventory? This can also be
read from the SAA website.
collectionNumber (str): Collection number in the SAA inventory.
start (int, optional): Offset. Defaults to 0.
limit (int, optional): Maximum number of scans to retrieve in one go.
Defaults to 100.
APIURL ([type], optional): Picturae url. Defaults to APIURL.
"""
url = APIURL + collectionNumber + '/' + path
arguments = {
'apiKey': '<KEY>',
'lang': 'nl_NL',
'findingAid': collectionNumber,
'path': path,
'callback': 'callback_json8',
'start': start,
'limit': limit
}
scans = []
for i in range(math.ceil(nscans / 100)):
r = requests.get(url, arguments)
data = r.text.replace('callback_json8(', '').replace(')', '')
data = json.loads(data)
arguments['start'] += limit
time.sleep(.6) # be gentle
scans += data['scans']['scans']
return scans
def downloadScan(uuidScan: str, scanName: str, folder: str = 'data'):
"""
Download a single scan by uuid.
Args:
uuidScan (str): a scan's uuid (e.g. cb8e6db8-6dc7-50d6-97c1-6d6fa5284ab3)
scanName (str): a scan's name (e.g. KLAC00169000001)
folder (str, optional): Output folder. A folder with the inventory
number is automatically created in this folder.
Defaults to 'data'.
"""
fp = os.path.join(folder, scanName + ".jpg")
url = DOWNLOADURL + uuidScan + '.jpg'
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(fp, 'wb') as outfile:
for chunk in r:
outfile.write(chunk)
def main(path, nscans, collectionNumber, inventoryNumber, folder,
concordancefile):
folder = os.path.join(folder, inventoryNumber)
if concordancefile:
concordance = dict()
# 1. Obtain the scan metadata
scans = getScans(path, nscans, collectionNumber)
# 2. Download each scan
print(f"Downloading scans to {os.path.abspath(folder)}:")
for n, scan in enumerate(scans, 1):
uuid = scan['id']
name = scan['name']
print(f"\t{n}/{nscans}\t{name}.jpg")
downloadScan(uuid, name, folder)
if concordancefile:
concordance[name] = uuid
time.sleep(1)
break
if concordancefile:
with open(os.path.join(folder, 'concordance.json'),
'w',
encoding='utf-8') as jsonfile:
json.dump(concordance, jsonfile, indent=4)
if __name__ == "__main__":
arguments = docopt(__doc__)
COLLECTIONNUMBER = arguments['<collectionNumber>']
INVENTORYNUMBER = arguments['<inventoryNumber>']
PATH = arguments['<path>']
NSCANS = int(arguments['<nscans>'])
FOLDER = arguments['<folder>']
CONCORDANCEFILE = True if arguments['--concordance'] == 'True' else False
os.makedirs(os.path.join(FOLDER, INVENTORYNUMBER), exist_ok=True)
main(PATH, NSCANS, COLLECTIONNUMBER, INVENTORYNUMBER, FOLDER,
CONCORDANCEFILE)
| """
saa-scanDownloader
Usage:
downloadScans.py <collectionNumber> <inventoryNumber> <path> <nscans> <folder>
downloadScans.py <collectionNumber> <inventoryNumber> <path> <nscans> <folder> --concordance False
downloadScans.py (-h | --help)
Arguments:
collectionNumber Collection number in the SAA inventory.
inventoryNumber Inventory number from the collection.
path Path in the new (d.d. 2020) search environment. You can see
this by clicking on a inventory number and check the address
bar. E.g. 1.6.
nscans How many scans are in this inventory? This can also be read
from the SAA website.
folder Output folder. A folder with the inventory number is
automatically created in this folder.
Options:
-h --help Show this screen.
--concordance <true_or_false> Save a concordance.json file with scanname and uuid [default: True]
"""
import os
import math
import time
import requests
import json
from docopt import docopt
APIURL = "https://webservices.picturae.com/archives/scans/"
DOWNLOADURL = "https://download-images.memorix.nl/ams/download/fullsize/"
def getScans(path: str,
nscans: int,
collectionNumber: str,
start: int = 0,
limit: int = 100,
APIURL=APIURL):
"""
Download scan metadata that contains a.o. the name and uuid.
Args:
path (str): Path in the new (d.d. 2020) search environment. You can see
this by clicking on a inventory number and check the address
bar. E.g. 1.6.
nscans (int): How many scans are in this inventory? This can also be
read from the SAA website.
collectionNumber (str): Collection number in the SAA inventory.
start (int, optional): Offset. Defaults to 0.
limit (int, optional): Maximum number of scans to retrieve in one go.
Defaults to 100.
APIURL ([type], optional): Picturae url. Defaults to APIURL.
"""
url = APIURL + collectionNumber + '/' + path
arguments = {
'apiKey': '<KEY>',
'lang': 'nl_NL',
'findingAid': collectionNumber,
'path': path,
'callback': 'callback_json8',
'start': start,
'limit': limit
}
scans = []
for i in range(math.ceil(nscans / 100)):
r = requests.get(url, arguments)
data = r.text.replace('callback_json8(', '').replace(')', '')
data = json.loads(data)
arguments['start'] += limit
time.sleep(.6) # be gentle
scans += data['scans']['scans']
return scans
def downloadScan(uuidScan: str, scanName: str, folder: str = 'data'):
"""
Download a single scan by uuid.
Args:
uuidScan (str): a scan's uuid (e.g. cb8e6db8-6dc7-50d6-97c1-6d6fa5284ab3)
scanName (str): a scan's name (e.g. KLAC00169000001)
folder (str, optional): Output folder. A folder with the inventory
number is automatically created in this folder.
Defaults to 'data'.
"""
fp = os.path.join(folder, scanName + ".jpg")
url = DOWNLOADURL + uuidScan + '.jpg'
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(fp, 'wb') as outfile:
for chunk in r:
outfile.write(chunk)
def main(path, nscans, collectionNumber, inventoryNumber, folder,
concordancefile):
folder = os.path.join(folder, inventoryNumber)
if concordancefile:
concordance = dict()
# 1. Obtain the scan metadata
scans = getScans(path, nscans, collectionNumber)
# 2. Download each scan
print(f"Downloading scans to {os.path.abspath(folder)}:")
for n, scan in enumerate(scans, 1):
uuid = scan['id']
name = scan['name']
print(f"\t{n}/{nscans}\t{name}.jpg")
downloadScan(uuid, name, folder)
if concordancefile:
concordance[name] = uuid
time.sleep(1)
break
if concordancefile:
with open(os.path.join(folder, 'concordance.json'),
'w',
encoding='utf-8') as jsonfile:
json.dump(concordance, jsonfile, indent=4)
if __name__ == "__main__":
arguments = docopt(__doc__)
COLLECTIONNUMBER = arguments['<collectionNumber>']
INVENTORYNUMBER = arguments['<inventoryNumber>']
PATH = arguments['<path>']
NSCANS = int(arguments['<nscans>'])
FOLDER = arguments['<folder>']
CONCORDANCEFILE = True if arguments['--concordance'] == 'True' else False
os.makedirs(os.path.join(FOLDER, INVENTORYNUMBER), exist_ok=True)
main(PATH, NSCANS, COLLECTIONNUMBER, INVENTORYNUMBER, FOLDER,
CONCORDANCEFILE) | en | 0.726851 | saa-scanDownloader Usage: downloadScans.py <collectionNumber> <inventoryNumber> <path> <nscans> <folder> downloadScans.py <collectionNumber> <inventoryNumber> <path> <nscans> <folder> --concordance False downloadScans.py (-h | --help) Arguments: collectionNumber Collection number in the SAA inventory. inventoryNumber Inventory number from the collection. path Path in the new (d.d. 2020) search environment. You can see this by clicking on a inventory number and check the address bar. E.g. 1.6. nscans How many scans are in this inventory? This can also be read from the SAA website. folder Output folder. A folder with the inventory number is automatically created in this folder. Options: -h --help Show this screen. --concordance <true_or_false> Save a concordance.json file with scanname and uuid [default: True] Download scan metadata that contains a.o. the name and uuid. Args: path (str): Path in the new (d.d. 2020) search environment. You can see this by clicking on a inventory number and check the address bar. E.g. 1.6. nscans (int): How many scans are in this inventory? This can also be read from the SAA website. collectionNumber (str): Collection number in the SAA inventory. start (int, optional): Offset. Defaults to 0. limit (int, optional): Maximum number of scans to retrieve in one go. Defaults to 100. APIURL ([type], optional): Picturae url. Defaults to APIURL. # be gentle Download a single scan by uuid. Args: uuidScan (str): a scan's uuid (e.g. cb8e6db8-6dc7-50d6-97c1-6d6fa5284ab3) scanName (str): a scan's name (e.g. KLAC00169000001) folder (str, optional): Output folder. A folder with the inventory number is automatically created in this folder. Defaults to 'data'. # 1. Obtain the scan metadata # 2. Download each scan | 3.277225 | 3 |
Chapter4_TheGreatestTheoremNeverTold/top_pic_comments.py | pyarnold/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | 1 | 6631554 | <reponame>pyarnold/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers<gh_stars>1-10
import sys
import numpy as np
from IPython.core.display import Image
import praw
reddit = praw.Reddit("BayesianMethodsForHackers")
subreddit = reddit.get_subreddit("pics")
top_submissions = subreddit.get_top()
n_pic = int(sys.argv[1]) if sys.argv[1] else 1
i = 0
while i < n_pic:
top_submission = top_submissions.next()
while "i.imgur.com" not in top_submission.url:
# make sure it is linking to an image, not a webpage.
top_submission = top_submissions.next()
i += 1
print "Title of submission: \n", top_submission.title
top_post_url = top_submission.url
#top_submission.replace_more_comments(limit=5, threshold=0)
print top_post_url
upvotes = []
downvotes = []
contents = []
_all_comments = top_submission.comments
all_comments = []
for comment in _all_comments:
try:
upvotes.append(comment.ups)
downvotes.append(comment.downs)
contents.append(comment.body)
except Exception as e:
continue
votes = np.array([upvotes, downvotes]).T
| import sys
import numpy as np
from IPython.core.display import Image
import praw
reddit = praw.Reddit("BayesianMethodsForHackers")
subreddit = reddit.get_subreddit("pics")
top_submissions = subreddit.get_top()
n_pic = int(sys.argv[1]) if sys.argv[1] else 1
i = 0
while i < n_pic:
top_submission = top_submissions.next()
while "i.imgur.com" not in top_submission.url:
# make sure it is linking to an image, not a webpage.
top_submission = top_submissions.next()
i += 1
print "Title of submission: \n", top_submission.title
top_post_url = top_submission.url
#top_submission.replace_more_comments(limit=5, threshold=0)
print top_post_url
upvotes = []
downvotes = []
contents = []
_all_comments = top_submission.comments
all_comments = []
for comment in _all_comments:
try:
upvotes.append(comment.ups)
downvotes.append(comment.downs)
contents.append(comment.body)
except Exception as e:
continue
votes = np.array([upvotes, downvotes]).T | en | 0.769779 | # make sure it is linking to an image, not a webpage. #top_submission.replace_more_comments(limit=5, threshold=0) | 2.908817 | 3 |
src/sylvie/ast_based/nodes.py | MrAdityaAlok/Sylvie | 1 | 6631555 | <reponame>MrAdityaAlok/Sylvie<gh_stars>1-10
from dataclasses import dataclass, field
from typing import Union
from sylvie.ast_based.ast import AST
from sylvie.tokens import Token
@dataclass
class BinOp(AST):
right_child: Token
op: Token
left_child: Token
@dataclass
class Num(AST):
token: Token
value: Union[int, float] = field(init=False)
def __post_init__(self):
self.value = self.token.value
| from dataclasses import dataclass, field
from typing import Union
from sylvie.ast_based.ast import AST
from sylvie.tokens import Token
@dataclass
class BinOp(AST):
right_child: Token
op: Token
left_child: Token
@dataclass
class Num(AST):
token: Token
value: Union[int, float] = field(init=False)
def __post_init__(self):
self.value = self.token.value | none | 1 | 2.546417 | 3 |
|
bedrock/doc/annotation.py | openmednlp/bedrock | 2 | 6631556 | <gh_stars>1-10
class Annotation:
ID = 'id'
BEGIN = 'begin'
END = 'end'
LAYER = 'layer'
FEATURE = 'feature'
FEATURE_VAL = 'feature_value'
COLS = [ID, BEGIN, END, LAYER, FEATURE, FEATURE_VAL]
| class Annotation:
ID = 'id'
BEGIN = 'begin'
END = 'end'
LAYER = 'layer'
FEATURE = 'feature'
FEATURE_VAL = 'feature_value'
COLS = [ID, BEGIN, END, LAYER, FEATURE, FEATURE_VAL] | none | 1 | 1.589744 | 2 |
|
exercises/zh/test_02_06.py | Jette16/spacy-course | 2,085 | 6631557 | <reponame>Jette16/spacy-course
def test():
assert (
"import Doc, Span" or "import Span, Doc" in __solution__
), "你有正确导入Doc和Span吗?"
assert doc.text == "我喜欢周杰伦", "你有正确创建Doc吗?"
assert span.text == "周杰伦", "有正确创建span吗?"
assert span.label_ == "PERSON", "你有把标签PERSON加到span中吗?"
assert "doc.ents =" in __solution__, "你有覆盖doc.ents吗?"
assert len(doc.ents) == 1, "你有把span加入到doc.ents吗?"
assert (
list(doc.ents)[0].text == "周杰伦"
), "你有把span加入到doc.ents吗?"
__msg__.good(
"完美!之后我们学习编码信息提取流程的时候,我们就会发现"
"手动创建spaCy的实例并改变其中的实体会非常方便有用。"
)
| def test():
assert (
"import Doc, Span" or "import Span, Doc" in __solution__
), "你有正确导入Doc和Span吗?"
assert doc.text == "我喜欢周杰伦", "你有正确创建Doc吗?"
assert span.text == "周杰伦", "有正确创建span吗?"
assert span.label_ == "PERSON", "你有把标签PERSON加到span中吗?"
assert "doc.ents =" in __solution__, "你有覆盖doc.ents吗?"
assert len(doc.ents) == 1, "你有把span加入到doc.ents吗?"
assert (
list(doc.ents)[0].text == "周杰伦"
), "你有把span加入到doc.ents吗?"
__msg__.good(
"完美!之后我们学习编码信息提取流程的时候,我们就会发现"
"手动创建spaCy的实例并改变其中的实体会非常方便有用。"
) | none | 1 | 2.477262 | 2 |
|
lib/bes/thread/global_thread_pool.py | reconstruir/bes | 0 | 6631558 | #-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import atexit
from threading import Lock
from .thread_pool import thread_pool
from bes.system.log import log
class global_thread_pool(object):
'A global thread pool that can be used for misc async tasks that dont require oversight.'
_num_threads = 8
_pool = None
_lock = Lock()
@classmethod
def set_num_threads(clazz, num_threads):
success = False
clazz._lock.acquire()
if not clazz._pool:
clazz._num_threads = num_threads
success = True
clazz._lock.release()
if not success:
raise RuntimeError('Global thread pool is already running. Call set_num_threads() before add_task()')
@classmethod
def add_task(clazz, func, *args, **kargs):
'Add a task to the global thread pool.'
clazz._lock.acquire()
if not clazz._pool:
clazz._pool = clazz.__start_global_thread_pool_i(clazz._num_threads)
clazz._lock.release()
clazz._pool.add_task(func, *args, **kargs)
@classmethod
def __start_global_thread_pool_i(clazz, num_threads):
clazz.log_d('Starting global thread pool with %d threads.' % (num_threads))
gtp = thread_pool(num_threads = num_threads)
def __global_thread_pool_atexit_cleanup(thread_pool):
thread_pool.log_d('__global_thread_pool_atexit_cleanup(%s) waiting...' % (thread_pool))
thread_pool.wait_completion()
thread_pool.log_d('__global_thread_pool_atexit_cleanup(%s) done waiting...' % (thread_pool))
atexit.register(__global_thread_pool_atexit_cleanup, gtp)
return gtp
log.add_logging(global_thread_pool, 'global_thread_pool')
| #-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import atexit
from threading import Lock
from .thread_pool import thread_pool
from bes.system.log import log
class global_thread_pool(object):
'A global thread pool that can be used for misc async tasks that dont require oversight.'
_num_threads = 8
_pool = None
_lock = Lock()
@classmethod
def set_num_threads(clazz, num_threads):
success = False
clazz._lock.acquire()
if not clazz._pool:
clazz._num_threads = num_threads
success = True
clazz._lock.release()
if not success:
raise RuntimeError('Global thread pool is already running. Call set_num_threads() before add_task()')
@classmethod
def add_task(clazz, func, *args, **kargs):
'Add a task to the global thread pool.'
clazz._lock.acquire()
if not clazz._pool:
clazz._pool = clazz.__start_global_thread_pool_i(clazz._num_threads)
clazz._lock.release()
clazz._pool.add_task(func, *args, **kargs)
@classmethod
def __start_global_thread_pool_i(clazz, num_threads):
clazz.log_d('Starting global thread pool with %d threads.' % (num_threads))
gtp = thread_pool(num_threads = num_threads)
def __global_thread_pool_atexit_cleanup(thread_pool):
thread_pool.log_d('__global_thread_pool_atexit_cleanup(%s) waiting...' % (thread_pool))
thread_pool.wait_completion()
thread_pool.log_d('__global_thread_pool_atexit_cleanup(%s) done waiting...' % (thread_pool))
atexit.register(__global_thread_pool_atexit_cleanup, gtp)
return gtp
log.add_logging(global_thread_pool, 'global_thread_pool')
| en | 0.595088 | #-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*- | 2.750668 | 3 |
setup.py | dciampa/transitivecitation | 0 | 6631559 | <reponame>dciampa/transitivecitation<gh_stars>0
# !/usr/bin/env python
from distutils.core import setup
setup(
name='transitive-citation',
packages=[],
version='0.1.0',
description='Transitive Citation',
author='<NAME>, <NAME>, and <NAME>',
license='MIT',
author_email='<EMAIL>',
url='https://github.com/dciampa/transitivecitation',
keywords=['ads', 'citations', 'package', ],
)
| # !/usr/bin/env python
from distutils.core import setup
setup(
name='transitive-citation',
packages=[],
version='0.1.0',
description='Transitive Citation',
author='<NAME>, <NAME>, and <NAME>',
license='MIT',
author_email='<EMAIL>',
url='https://github.com/dciampa/transitivecitation',
keywords=['ads', 'citations', 'package', ],
) | fr | 0.163581 | # !/usr/bin/env python | 0.9645 | 1 |
DonkiDirector/DonkiDirector_cmdline.py | ess-dmsc/do-ess-data-simulator | 0 | 6631560 | #!/usr/bin/env python
from DirectorBgnThread import directorThread
import traceback
"""
if __name__ == "__main__":
dt = directorThread(None)
dt.start()
dt.max_triggers = 50
dt.EnableDataSaving = False
nFiles = 1
dt.set_file_prefix("mytest")
dt.set_file_path(".")
dt.set_files_contiguous(True)
dt.set_files_to_save(nFiles)
dt.set_file_size(dt.max_triggers/nFiles)
time.sleep(6)
dt.set_DataAlias("paperino/image","paperino/mandelbrot")
dt.set_player_priority("paperino",0)
dt.set_player_priority("pippo",1)
dt.set_player_priority("pluto",1)
dt._started = True
while (dt._started):
time.sleep(1)
print "-------------",dt.zcc.ask_for_log("paperino")
print "-------------",dt.zcc.ask_for_log("pluto")
print "-------------",dt.zcc.ask_for_log("pippo")
#dt.join()
print dt.PlayersInfo
"""
def get_user_input_loop(dt):
while True: # infinite loop
try:
n = raw_input("\n\nEnter command (type ? for help): ")
cmd_in = (n.lower()).strip(' ')
if cmd_in == "start":
dt._started = True
elif cmd_in == "stop":
dt._started = False
elif cmd_in == "players?":
print dt.PlayersInfo
elif "priority[" in cmd_in:
plname = (cmd_in.split("[")[1]).split("]")[0]
prio = int((cmd_in.split("="))[-1])
dt.set_player_priority(plname,prio)
elif "triggers=" in cmd_in:
max_triggers = int(cmd_in.split('=')[-1])
dt.max_triggers = max_triggers
print "OK"
elif cmd_in == "quit":
return # stops the loop
elif cmd_in == "?":
print "Available commands:"
print "\tstart, stop, players?, triggers=N, priority[plname]=N, quit"
except KeyboardInterrupt:
print "Bye"
return
except Exception:
traceback.print_exc()
if __name__ == "__main__":
dt = directorThread(None)
dt.start()
#
dt.max_triggers = 1
dt.EnableDataSaving = False
nFiles = 1
dt.set_file_prefix("mytest")
dt.set_file_path(".")
dt.set_files_contiguous(True)
dt.set_files_to_save(nFiles)
dt.set_file_size(dt.max_triggers/nFiles)
#
get_user_input_loop(dt)
dt.quit_and_exit()
| #!/usr/bin/env python
from DirectorBgnThread import directorThread
import traceback
"""
if __name__ == "__main__":
dt = directorThread(None)
dt.start()
dt.max_triggers = 50
dt.EnableDataSaving = False
nFiles = 1
dt.set_file_prefix("mytest")
dt.set_file_path(".")
dt.set_files_contiguous(True)
dt.set_files_to_save(nFiles)
dt.set_file_size(dt.max_triggers/nFiles)
time.sleep(6)
dt.set_DataAlias("paperino/image","paperino/mandelbrot")
dt.set_player_priority("paperino",0)
dt.set_player_priority("pippo",1)
dt.set_player_priority("pluto",1)
dt._started = True
while (dt._started):
time.sleep(1)
print "-------------",dt.zcc.ask_for_log("paperino")
print "-------------",dt.zcc.ask_for_log("pluto")
print "-------------",dt.zcc.ask_for_log("pippo")
#dt.join()
print dt.PlayersInfo
"""
def get_user_input_loop(dt):
while True: # infinite loop
try:
n = raw_input("\n\nEnter command (type ? for help): ")
cmd_in = (n.lower()).strip(' ')
if cmd_in == "start":
dt._started = True
elif cmd_in == "stop":
dt._started = False
elif cmd_in == "players?":
print dt.PlayersInfo
elif "priority[" in cmd_in:
plname = (cmd_in.split("[")[1]).split("]")[0]
prio = int((cmd_in.split("="))[-1])
dt.set_player_priority(plname,prio)
elif "triggers=" in cmd_in:
max_triggers = int(cmd_in.split('=')[-1])
dt.max_triggers = max_triggers
print "OK"
elif cmd_in == "quit":
return # stops the loop
elif cmd_in == "?":
print "Available commands:"
print "\tstart, stop, players?, triggers=N, priority[plname]=N, quit"
except KeyboardInterrupt:
print "Bye"
return
except Exception:
traceback.print_exc()
if __name__ == "__main__":
dt = directorThread(None)
dt.start()
#
dt.max_triggers = 1
dt.EnableDataSaving = False
nFiles = 1
dt.set_file_prefix("mytest")
dt.set_file_path(".")
dt.set_files_contiguous(True)
dt.set_files_to_save(nFiles)
dt.set_file_size(dt.max_triggers/nFiles)
#
get_user_input_loop(dt)
dt.quit_and_exit()
| en | 0.28253 | #!/usr/bin/env python if __name__ == "__main__": dt = directorThread(None) dt.start() dt.max_triggers = 50 dt.EnableDataSaving = False nFiles = 1 dt.set_file_prefix("mytest") dt.set_file_path(".") dt.set_files_contiguous(True) dt.set_files_to_save(nFiles) dt.set_file_size(dt.max_triggers/nFiles) time.sleep(6) dt.set_DataAlias("paperino/image","paperino/mandelbrot") dt.set_player_priority("paperino",0) dt.set_player_priority("pippo",1) dt.set_player_priority("pluto",1) dt._started = True while (dt._started): time.sleep(1) print "-------------",dt.zcc.ask_for_log("paperino") print "-------------",dt.zcc.ask_for_log("pluto") print "-------------",dt.zcc.ask_for_log("pippo") #dt.join() print dt.PlayersInfo # infinite loop # stops the loop # # | 2.68102 | 3 |
src/openscm_runner/run.py | znicholls/openscm-runner-1 | 0 | 6631561 | <reponame>znicholls/openscm-runner-1
"""
High-level run function
"""
import scmdata
from dotenv import find_dotenv, load_dotenv
from tqdm.autonotebook import tqdm
from .adapters import MAGICC7
# is this the right place to put this...
load_dotenv(find_dotenv(), verbose=True)
def run(
climate_models_cfgs,
scenarios,
output_variables=("Surface Temperature",),
full_config=False,
):
"""
Run a number of climate models over a number of scenarios
Parameters
----------
climate_models_cfgs : dict[str: list]
Dictionary where each key is a model and each value is the configs
with which to run the model. The configs are passed to the model
adapter.
scenarios : :obj:`pyam.IamDataFrame`
Scenarios to run
output_variables : list[str]
Variables to include in the output
full_config : bool
Include the configuration used to run each model in the output's
metadata
Returns
-------
:obj:`scmdata.ScmDataFrame`
Model output
Raises
------
NotImplementedError
``full_config`` is ``True``, we haven't worked out how this should
behave yet.
"""
if full_config:
raise NotImplementedError("Returning full config is not yet implemented")
res = []
for climate_model, cfgs in tqdm(climate_models_cfgs.items(), desc="Climate models"):
if climate_model == "MAGICC7":
runner = MAGICC7()
else:
raise NotImplementedError(
"No adapter available for {}".format(climate_model)
)
model_res = runner.run(scenarios, cfgs, output_variables=output_variables)
res.append(model_res)
for i, model_res in enumerate(res):
if i < 1:
key_meta = model_res.meta.columns.tolist()
assert model_res.meta.columns.tolist() == key_meta
scmdf = scmdata.df_append(res)
return scmdf
| """
High-level run function
"""
import scmdata
from dotenv import find_dotenv, load_dotenv
from tqdm.autonotebook import tqdm
from .adapters import MAGICC7
# is this the right place to put this...
load_dotenv(find_dotenv(), verbose=True)
def run(
climate_models_cfgs,
scenarios,
output_variables=("Surface Temperature",),
full_config=False,
):
"""
Run a number of climate models over a number of scenarios
Parameters
----------
climate_models_cfgs : dict[str: list]
Dictionary where each key is a model and each value is the configs
with which to run the model. The configs are passed to the model
adapter.
scenarios : :obj:`pyam.IamDataFrame`
Scenarios to run
output_variables : list[str]
Variables to include in the output
full_config : bool
Include the configuration used to run each model in the output's
metadata
Returns
-------
:obj:`scmdata.ScmDataFrame`
Model output
Raises
------
NotImplementedError
``full_config`` is ``True``, we haven't worked out how this should
behave yet.
"""
if full_config:
raise NotImplementedError("Returning full config is not yet implemented")
res = []
for climate_model, cfgs in tqdm(climate_models_cfgs.items(), desc="Climate models"):
if climate_model == "MAGICC7":
runner = MAGICC7()
else:
raise NotImplementedError(
"No adapter available for {}".format(climate_model)
)
model_res = runner.run(scenarios, cfgs, output_variables=output_variables)
res.append(model_res)
for i, model_res in enumerate(res):
if i < 1:
key_meta = model_res.meta.columns.tolist()
assert model_res.meta.columns.tolist() == key_meta
scmdf = scmdata.df_append(res)
return scmdf | en | 0.754043 | High-level run function # is this the right place to put this... Run a number of climate models over a number of scenarios Parameters ---------- climate_models_cfgs : dict[str: list] Dictionary where each key is a model and each value is the configs with which to run the model. The configs are passed to the model adapter. scenarios : :obj:`pyam.IamDataFrame` Scenarios to run output_variables : list[str] Variables to include in the output full_config : bool Include the configuration used to run each model in the output's metadata Returns ------- :obj:`scmdata.ScmDataFrame` Model output Raises ------ NotImplementedError ``full_config`` is ``True``, we haven't worked out how this should behave yet. | 2.653917 | 3 |
examples/App/FXConverter/suites.py | chiragmatkar/testplan | 96 | 6631562 | """FX conversion tests."""
import os
from testplan.testing.multitest import testsuite, testcase
def msg_to_bytes(msg, standard="utf-8"):
"""Encode text to bytes."""
return bytes(msg.encode(standard))
def bytes_to_msg(seq, standard="utf-8"):
"""Decode bytes to text."""
return seq.decode(standard)
def custom_docstring_func(_, kwargs):
"""
Return original docstring (if available) and
parametrization arguments in the format ``key: value``.
"""
kwargs_strings = [
"{}: {}".format(arg_name, arg_value)
for arg_name, arg_value in kwargs.items()
]
return os.linesep.join(kwargs_strings)
@testsuite
class ConversionTests(object):
"""Sample currency conversion operations."""
def __init__(self):
self._rates = {"EUR": {"GBP": "0.90000"}, "GBP": {"EUR": "1.10000"}}
@testcase(
parameters=(
("EUR:GBP:1000", "900"),
("GBP:EUR:1000", "1100"),
("EUR:GBP:1500", "1350"),
("GBP:EUR:1500", "1650"),
),
docstring_func=custom_docstring_func,
)
def conversion_parameterized(self, env, result, request, expect):
"""
Client sends a request to the currency converter app.
The converter retrieves the conversion rate from the downstream
and sends back the converted result to the client.
"""
env.client.send(msg_to_bytes(request))
# App requests rates from server
received = bytes_to_msg(env.server.receive(size=7))
result.equal(request[:7], received, "Downstream receives rate query.")
source, target = received.split(":")
rate = self._rates[source][target]
result.log("Downstream sends rate: {}".format(rate))
env.server.send(msg_to_bytes(rate))
# Client receives response.
result.equal(
int(expect),
int(bytes_to_msg(env.client.receive(size=1024))),
"Client received converted value.",
)
@testsuite
class EdgeCases(object):
"""Suite containing edge case scenarios."""
@testcase
def same_currency(self, env, result):
"""
Client requests conversion to the same currency.
No downstream is involved as no rate is needed.
"""
request = "EUR:EUR:2000"
expect = "2000"
result.log("Client request: {}".format(request))
env.client.send(msg_to_bytes(request))
# Client receives response.
result.equal(
int(expect),
int(bytes_to_msg(env.client.receive(size=1024))),
"Client received converted value.",
)
@testcase
def zero_amount(self, env, result):
"""
Client requests conversion of 0 amount.
No downstream is involved as no rate is needed.
"""
request = "EUR:GBP:0"
expect = "0"
result.log("Client request: {}".format(request))
env.client.send(msg_to_bytes(request))
# Client receives response.
result.equal(
int(expect),
int(bytes_to_msg(env.client.receive(size=1024))),
"Client received converted value.",
)
@testcase(
parameters=(
"GBP.EUR.1000",
"EUR::GBP:500",
"GBP:EURO:1000",
"GBP:EUR:ABC",
),
docstring_func=custom_docstring_func,
)
def invalid_requests(self, env, result, request):
"""
Client sends a request with incorrect format.
Requests are matched by [A-Z]{3}:[A-Z]{3}:[0-9]+ regex.
"""
env.client.send(msg_to_bytes(request))
# Client receives response.
result.contain(
"Invalid request format",
bytes_to_msg(env.client.receive(size=1024)),
"Invalid request error received.",
)
@testsuite
class RestartEvent(object):
"""Converter app restart and reconnect scenarios."""
def _send_and_receive(self, env, result, request, rate, expect):
"""
Client sends a request to the currency converter app.
The converter retrieves the conversion rate from the downstream
and sends back the converted result to the client.
"""
result.log("Client sends request: {}".format(request))
env.client.send(msg_to_bytes(request))
# App requests rates from server
received = bytes_to_msg(env.server.receive(size=7))
result.equal(request[:7], received, "Downstream receives rate query.")
result.log("Downstream sends rate: {}".format(rate))
env.server.send(msg_to_bytes(rate))
# Client receives response.
result.equal(
int(expect),
int(bytes_to_msg(env.client.receive(size=1024))),
"Client received converted value.",
)
def _restart_components(self, env, result):
"""
Restart converter app.
Accept new connection from rate sending server.
Restart client driver to connect to new host:port.
"""
result.log("Restarting converter app.")
env.converter.restart()
result.log(
"App is now listening on {}:{}".format(
env.converter.host, env.converter.port
)
)
env.server.accept_connection()
env.client.restart()
@testcase
def restart_app(self, env, result):
"""
Restarting converter app and reconnect with rate
server and client components before doing new requests.
"""
result.log(
"App is listening on {}:{}".format(
env.converter.host, env.converter.port
)
)
self._send_and_receive(env, result, "EUR:GBP:1000", "0.8500", "850")
self._restart_components(env, result)
self._send_and_receive(env, result, "EUR:GBP:1000", "0.8700", "870")
self._restart_components(env, result)
self._send_and_receive(env, result, "EUR:GBP:2000", "0.8300", "1660")
| """FX conversion tests."""
import os
from testplan.testing.multitest import testsuite, testcase
def msg_to_bytes(msg, standard="utf-8"):
"""Encode text to bytes."""
return bytes(msg.encode(standard))
def bytes_to_msg(seq, standard="utf-8"):
"""Decode bytes to text."""
return seq.decode(standard)
def custom_docstring_func(_, kwargs):
"""
Return original docstring (if available) and
parametrization arguments in the format ``key: value``.
"""
kwargs_strings = [
"{}: {}".format(arg_name, arg_value)
for arg_name, arg_value in kwargs.items()
]
return os.linesep.join(kwargs_strings)
@testsuite
class ConversionTests(object):
"""Sample currency conversion operations."""
def __init__(self):
self._rates = {"EUR": {"GBP": "0.90000"}, "GBP": {"EUR": "1.10000"}}
@testcase(
parameters=(
("EUR:GBP:1000", "900"),
("GBP:EUR:1000", "1100"),
("EUR:GBP:1500", "1350"),
("GBP:EUR:1500", "1650"),
),
docstring_func=custom_docstring_func,
)
def conversion_parameterized(self, env, result, request, expect):
"""
Client sends a request to the currency converter app.
The converter retrieves the conversion rate from the downstream
and sends back the converted result to the client.
"""
env.client.send(msg_to_bytes(request))
# App requests rates from server
received = bytes_to_msg(env.server.receive(size=7))
result.equal(request[:7], received, "Downstream receives rate query.")
source, target = received.split(":")
rate = self._rates[source][target]
result.log("Downstream sends rate: {}".format(rate))
env.server.send(msg_to_bytes(rate))
# Client receives response.
result.equal(
int(expect),
int(bytes_to_msg(env.client.receive(size=1024))),
"Client received converted value.",
)
@testsuite
class EdgeCases(object):
"""Suite containing edge case scenarios."""
@testcase
def same_currency(self, env, result):
"""
Client requests conversion to the same currency.
No downstream is involved as no rate is needed.
"""
request = "EUR:EUR:2000"
expect = "2000"
result.log("Client request: {}".format(request))
env.client.send(msg_to_bytes(request))
# Client receives response.
result.equal(
int(expect),
int(bytes_to_msg(env.client.receive(size=1024))),
"Client received converted value.",
)
@testcase
def zero_amount(self, env, result):
"""
Client requests conversion of 0 amount.
No downstream is involved as no rate is needed.
"""
request = "EUR:GBP:0"
expect = "0"
result.log("Client request: {}".format(request))
env.client.send(msg_to_bytes(request))
# Client receives response.
result.equal(
int(expect),
int(bytes_to_msg(env.client.receive(size=1024))),
"Client received converted value.",
)
@testcase(
parameters=(
"GBP.EUR.1000",
"EUR::GBP:500",
"GBP:EURO:1000",
"GBP:EUR:ABC",
),
docstring_func=custom_docstring_func,
)
def invalid_requests(self, env, result, request):
"""
Client sends a request with incorrect format.
Requests are matched by [A-Z]{3}:[A-Z]{3}:[0-9]+ regex.
"""
env.client.send(msg_to_bytes(request))
# Client receives response.
result.contain(
"Invalid request format",
bytes_to_msg(env.client.receive(size=1024)),
"Invalid request error received.",
)
@testsuite
class RestartEvent(object):
"""Converter app restart and reconnect scenarios."""
def _send_and_receive(self, env, result, request, rate, expect):
"""
Client sends a request to the currency converter app.
The converter retrieves the conversion rate from the downstream
and sends back the converted result to the client.
"""
result.log("Client sends request: {}".format(request))
env.client.send(msg_to_bytes(request))
# App requests rates from server
received = bytes_to_msg(env.server.receive(size=7))
result.equal(request[:7], received, "Downstream receives rate query.")
result.log("Downstream sends rate: {}".format(rate))
env.server.send(msg_to_bytes(rate))
# Client receives response.
result.equal(
int(expect),
int(bytes_to_msg(env.client.receive(size=1024))),
"Client received converted value.",
)
def _restart_components(self, env, result):
"""
Restart converter app.
Accept new connection from rate sending server.
Restart client driver to connect to new host:port.
"""
result.log("Restarting converter app.")
env.converter.restart()
result.log(
"App is now listening on {}:{}".format(
env.converter.host, env.converter.port
)
)
env.server.accept_connection()
env.client.restart()
@testcase
def restart_app(self, env, result):
"""
Restarting converter app and reconnect with rate
server and client components before doing new requests.
"""
result.log(
"App is listening on {}:{}".format(
env.converter.host, env.converter.port
)
)
self._send_and_receive(env, result, "EUR:GBP:1000", "0.8500", "850")
self._restart_components(env, result)
self._send_and_receive(env, result, "EUR:GBP:1000", "0.8700", "870")
self._restart_components(env, result)
self._send_and_receive(env, result, "EUR:GBP:2000", "0.8300", "1660")
| en | 0.829799 | FX conversion tests. Encode text to bytes. Decode bytes to text. Return original docstring (if available) and parametrization arguments in the format ``key: value``. Sample currency conversion operations. Client sends a request to the currency converter app. The converter retrieves the conversion rate from the downstream and sends back the converted result to the client. # App requests rates from server # Client receives response. Suite containing edge case scenarios. Client requests conversion to the same currency. No downstream is involved as no rate is needed. # Client receives response. Client requests conversion of 0 amount. No downstream is involved as no rate is needed. # Client receives response. Client sends a request with incorrect format. Requests are matched by [A-Z]{3}:[A-Z]{3}:[0-9]+ regex. # Client receives response. Converter app restart and reconnect scenarios. Client sends a request to the currency converter app. The converter retrieves the conversion rate from the downstream and sends back the converted result to the client. # App requests rates from server # Client receives response. Restart converter app. Accept new connection from rate sending server. Restart client driver to connect to new host:port. Restarting converter app and reconnect with rate server and client components before doing new requests. | 2.789874 | 3 |
tools/nntool/execution/execution_progress.py | 00-01/gap_sdk | 118 | 6631563 | <filename>tools/nntool/execution/execution_progress.py<gh_stars>100-1000
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
class ExecutionProgress(object):
__instance = None
def __new__(cls):
if ExecutionProgress.__instance is None:
ExecutionProgress.__instance = object.__new__(cls)
return ExecutionProgress.__instance
def __init__(self):
if not hasattr(self, 'listeners'):
self.listeners = []
@classmethod
def progress(cls, step_idx, name):
inst = cls()
for func in inst.listeners:
func(step_idx, name)
@classmethod
def start(cls):
inst = cls()
for func in inst.listeners:
func(None, "start")
@classmethod
def end(cls):
inst = cls()
for func in inst.listeners:
func(None, "end")
@classmethod
def listen(cls, func):
inst = cls()
inst.listeners.append(func)
@classmethod
def unlisten(cls, func):
inst = cls()
inst.listeners.remove(func)
| <filename>tools/nntool/execution/execution_progress.py<gh_stars>100-1000
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
class ExecutionProgress(object):
__instance = None
def __new__(cls):
if ExecutionProgress.__instance is None:
ExecutionProgress.__instance = object.__new__(cls)
return ExecutionProgress.__instance
def __init__(self):
if not hasattr(self, 'listeners'):
self.listeners = []
@classmethod
def progress(cls, step_idx, name):
inst = cls()
for func in inst.listeners:
func(step_idx, name)
@classmethod
def start(cls):
inst = cls()
for func in inst.listeners:
func(None, "start")
@classmethod
def end(cls):
inst = cls()
for func in inst.listeners:
func(None, "end")
@classmethod
def listen(cls, func):
inst = cls()
inst.listeners.append(func)
@classmethod
def unlisten(cls, func):
inst = cls()
inst.listeners.remove(func)
| en | 0.880961 | # Copyright (C) 2020 GreenWaves Technologies, SAS # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. | 2.209239 | 2 |
scripts/build_gear_input_set.py | mammadjv/GEAR | 88 | 6631564 | <gh_stars>10-100
ENCODING = 'utf-8'
SENTENCE_NUM = 5
def is_evidence_exist(evidence_set, evid):
for evidence in evidence_set:
if evid[1] == evidence[1] and evid[2] == evidence[2]:
return True
return False
'''
Build GEAR train set with truth evidence and retrieval evidence.
'''
def build_with_truth_and_threshold(truth_file, athene_file, output_file, label_file, threshold):
fin = open(truth_file, 'rb')
instances = {}
for line in fin:
label, evidence, claim, index, article, article_index = line.decode(ENCODING).strip('\r\n').split('\t')
evidence_tuple = [evidence, article, article_index, 1]
if index not in instances:
instances[index] = {}
instances[index]['claim'] = claim
instances[index]['label'] = label
instances[index]['evidences'] = [evidence_tuple]
else:
assert instances[index]['claim'] == claim
if not is_evidence_exist(instances[index]['evidences'], evidence_tuple):
instances[index]['evidences'].append(evidence_tuple)
fin.close()
print('Finish reading truth file...')
retrieve_instances = {}
fin = open(athene_file, 'rb')
for line in fin:
label, evidence, claim, index, article, article_index, confident = line.decode(ENCODING).strip('\r\n').split('\t')
evidence_tuple = [evidence, article, article_index, float(confident)]
if index not in retrieve_instances:
retrieve_instances[index] = {}
retrieve_instances[index]['claim'] = claim
if evidence_tuple[3] >= threshold:
retrieve_instances[index]['evidences'] = [evidence_tuple]
else:
retrieve_instances[index]['evidences'] = []
else:
assert retrieve_instances[index]['claim'] == claim
if not is_evidence_exist(retrieve_instances[index]['evidences'], evidence_tuple):
if evidence_tuple[3] >= threshold:
retrieve_instances[index]['evidences'].append(evidence_tuple)
fin.close()
print('Finish reading retrieve file...')
total_keys = list(instances.keys())
total_keys.extend(list(retrieve_instances.keys()))
total_keys = list(set(total_keys))
total_keys = sorted(total_keys, key=lambda x: int(x))
print(len(retrieve_instances.keys()), len(total_keys))
for index in total_keys:
if index not in instances:
if index not in retrieve_instances:
print("Cannot find the index: %s" % index)
continue
instances[index] = retrieve_instances[index]
instances[index]['label'] = 'NOTENOUGHINFO'
instance = instances[index]
if len(instance['evidences']) < SENTENCE_NUM:
if index in retrieve_instances:
pos = 0
while len(instance['evidences']) < SENTENCE_NUM and pos < len(retrieve_instances[index]['evidences']):
evidence = retrieve_instances[index]['evidences'][pos]
if not is_evidence_exist(instance['evidences'], evidence):
instance['evidences'].append(evidence)
pos += 1
else:
print('Warning: %s' % index)
print('Finish adding evidences...')
fout = open(output_file, 'wb')
# flog = open(label_file, 'wb')
for index in total_keys:
instance = instances[index]
output_line = '%s\t%s\t%s' % (index, instance['label'], instance['claim'])
label_line = '%s\t' % index
label_list = []
try:
assert len(instance['evidences']) >= SENTENCE_NUM
except Exception as _:
pass
for evidence in instance['evidences'][:SENTENCE_NUM]:
output_line += ('\t%s' % evidence[0])
label_list.append(str(evidence[3]))
output_line += '\r\n'
while len(label_list) < SENTENCE_NUM:
label_list.append('0')
label_line += '\t'.join(label_list) + '\r\n'
fout.write(output_line.encode(ENCODING))
# flog.write(label_line.encode(ENCODING))
fout.close()
# flog.close()
'''
Build GEAR dev/test set with retrieval evidence.
'''
def build_with_threshold(input, output, threshold):
fin = open(input, 'rb')
instances = {}
for line in fin:
label, evidence, claim, index, _, _, confidence = line.decode(ENCODING).strip('\r\n').split('\t')
confidence = float(confidence)
if not index in instances:
instances[index] = {}
instances[index]['claim'] = claim
instances[index]['label'] = label
if confidence >= threshold:
instances[index]['evidences'] = [evidence]
else:
instances[index]['evidences'] = []
else:
assert instances[index]['label'] == label
assert instances[index]['claim'] == claim
if confidence >= threshold:
instances[index]['evidences'].append(evidence)
fin.close()
instances = sorted(instances.items(), key=lambda x: int(x[0]))
fout = open(output, 'wb')
for instance in instances:
output_line = '%s\t%s\t%s' % (instance[0], instance[1]['label'], instance[1]['claim'])
if len(instance[1]['evidences']) == 0:
print(0)
for evidence in instance[1]['evidences']:
output_line += ('\t%s' % evidence)
output_line += '\r\n'
fout.write(output_line.encode(ENCODING))
fout.close()
if __name__ == '__main__':
print('Start building gear train set...')
build_with_truth_and_threshold('../data/bert/bert-nli-train-sr-set.tsv',
'../data/bert/bert-nli-train-retrieve-set.tsv',
'../data/gear/gear-train-set-0_001.tsv',
'none.tsv', 0.001)
print('Start building gear dev set...')
build_with_threshold('../data/bert/bert-nli-dev-retrieve-set.tsv',
'../data/gear/gear-dev-set-0_001.tsv', 0.001)
# build_with_threshold('../data/bert/bert-nli-dev-retrieve-set.tsv', '../data/gear/gear-dev-set-0_1.tsv', 0.1)
# build_with_threshold('../data/bert/bert-nli-dev-retrieve-set.tsv', '../data/gear/gear-dev-set-0_01.tsv', 0.01)
# build_with_threshold('../data/bert/bert-nli-dev-retrieve-set.tsv', '../data/gear/gear-dev-set-0_0001.tsv', 0.0001)
print('Start building gear test set...')
build_with_threshold('../data/bert/bert-nli-test-retrieve-set.tsv',
'../data/gear/gear-test-set-0_001.tsv', 0.001)
# build_with_threshold('../data/bert/bert-nli-test-retrieve-set.tsv', '../data/gear/gear-test-set-0_1.tsv', 0.1)
# build_with_threshold('../data/bert/bert-nli-test-retrieve-set.tsv', '../data/gear/gear-test-set-0_01.tsv', 0.01)
# build_with_threshold('../data/bert/bert-nli-test-retrieve-set.tsv', '../data/gear/gear-test-set-0_0001.tsv', 0.0001)
| ENCODING = 'utf-8'
SENTENCE_NUM = 5
def is_evidence_exist(evidence_set, evid):
for evidence in evidence_set:
if evid[1] == evidence[1] and evid[2] == evidence[2]:
return True
return False
'''
Build GEAR train set with truth evidence and retrieval evidence.
'''
def build_with_truth_and_threshold(truth_file, athene_file, output_file, label_file, threshold):
fin = open(truth_file, 'rb')
instances = {}
for line in fin:
label, evidence, claim, index, article, article_index = line.decode(ENCODING).strip('\r\n').split('\t')
evidence_tuple = [evidence, article, article_index, 1]
if index not in instances:
instances[index] = {}
instances[index]['claim'] = claim
instances[index]['label'] = label
instances[index]['evidences'] = [evidence_tuple]
else:
assert instances[index]['claim'] == claim
if not is_evidence_exist(instances[index]['evidences'], evidence_tuple):
instances[index]['evidences'].append(evidence_tuple)
fin.close()
print('Finish reading truth file...')
retrieve_instances = {}
fin = open(athene_file, 'rb')
for line in fin:
label, evidence, claim, index, article, article_index, confident = line.decode(ENCODING).strip('\r\n').split('\t')
evidence_tuple = [evidence, article, article_index, float(confident)]
if index not in retrieve_instances:
retrieve_instances[index] = {}
retrieve_instances[index]['claim'] = claim
if evidence_tuple[3] >= threshold:
retrieve_instances[index]['evidences'] = [evidence_tuple]
else:
retrieve_instances[index]['evidences'] = []
else:
assert retrieve_instances[index]['claim'] == claim
if not is_evidence_exist(retrieve_instances[index]['evidences'], evidence_tuple):
if evidence_tuple[3] >= threshold:
retrieve_instances[index]['evidences'].append(evidence_tuple)
fin.close()
print('Finish reading retrieve file...')
total_keys = list(instances.keys())
total_keys.extend(list(retrieve_instances.keys()))
total_keys = list(set(total_keys))
total_keys = sorted(total_keys, key=lambda x: int(x))
print(len(retrieve_instances.keys()), len(total_keys))
for index in total_keys:
if index not in instances:
if index not in retrieve_instances:
print("Cannot find the index: %s" % index)
continue
instances[index] = retrieve_instances[index]
instances[index]['label'] = 'NOTENOUGHINFO'
instance = instances[index]
if len(instance['evidences']) < SENTENCE_NUM:
if index in retrieve_instances:
pos = 0
while len(instance['evidences']) < SENTENCE_NUM and pos < len(retrieve_instances[index]['evidences']):
evidence = retrieve_instances[index]['evidences'][pos]
if not is_evidence_exist(instance['evidences'], evidence):
instance['evidences'].append(evidence)
pos += 1
else:
print('Warning: %s' % index)
print('Finish adding evidences...')
fout = open(output_file, 'wb')
# flog = open(label_file, 'wb')
for index in total_keys:
instance = instances[index]
output_line = '%s\t%s\t%s' % (index, instance['label'], instance['claim'])
label_line = '%s\t' % index
label_list = []
try:
assert len(instance['evidences']) >= SENTENCE_NUM
except Exception as _:
pass
for evidence in instance['evidences'][:SENTENCE_NUM]:
output_line += ('\t%s' % evidence[0])
label_list.append(str(evidence[3]))
output_line += '\r\n'
while len(label_list) < SENTENCE_NUM:
label_list.append('0')
label_line += '\t'.join(label_list) + '\r\n'
fout.write(output_line.encode(ENCODING))
# flog.write(label_line.encode(ENCODING))
fout.close()
# flog.close()
'''
Build GEAR dev/test set with retrieval evidence.
'''
def build_with_threshold(input, output, threshold):
fin = open(input, 'rb')
instances = {}
for line in fin:
label, evidence, claim, index, _, _, confidence = line.decode(ENCODING).strip('\r\n').split('\t')
confidence = float(confidence)
if not index in instances:
instances[index] = {}
instances[index]['claim'] = claim
instances[index]['label'] = label
if confidence >= threshold:
instances[index]['evidences'] = [evidence]
else:
instances[index]['evidences'] = []
else:
assert instances[index]['label'] == label
assert instances[index]['claim'] == claim
if confidence >= threshold:
instances[index]['evidences'].append(evidence)
fin.close()
instances = sorted(instances.items(), key=lambda x: int(x[0]))
fout = open(output, 'wb')
for instance in instances:
output_line = '%s\t%s\t%s' % (instance[0], instance[1]['label'], instance[1]['claim'])
if len(instance[1]['evidences']) == 0:
print(0)
for evidence in instance[1]['evidences']:
output_line += ('\t%s' % evidence)
output_line += '\r\n'
fout.write(output_line.encode(ENCODING))
fout.close()
if __name__ == '__main__':
print('Start building gear train set...')
build_with_truth_and_threshold('../data/bert/bert-nli-train-sr-set.tsv',
'../data/bert/bert-nli-train-retrieve-set.tsv',
'../data/gear/gear-train-set-0_001.tsv',
'none.tsv', 0.001)
print('Start building gear dev set...')
build_with_threshold('../data/bert/bert-nli-dev-retrieve-set.tsv',
'../data/gear/gear-dev-set-0_001.tsv', 0.001)
# build_with_threshold('../data/bert/bert-nli-dev-retrieve-set.tsv', '../data/gear/gear-dev-set-0_1.tsv', 0.1)
# build_with_threshold('../data/bert/bert-nli-dev-retrieve-set.tsv', '../data/gear/gear-dev-set-0_01.tsv', 0.01)
# build_with_threshold('../data/bert/bert-nli-dev-retrieve-set.tsv', '../data/gear/gear-dev-set-0_0001.tsv', 0.0001)
print('Start building gear test set...')
build_with_threshold('../data/bert/bert-nli-test-retrieve-set.tsv',
'../data/gear/gear-test-set-0_001.tsv', 0.001)
# build_with_threshold('../data/bert/bert-nli-test-retrieve-set.tsv', '../data/gear/gear-test-set-0_1.tsv', 0.1)
# build_with_threshold('../data/bert/bert-nli-test-retrieve-set.tsv', '../data/gear/gear-test-set-0_01.tsv', 0.01)
# build_with_threshold('../data/bert/bert-nli-test-retrieve-set.tsv', '../data/gear/gear-test-set-0_0001.tsv', 0.0001) | en | 0.351591 | Build GEAR train set with truth evidence and retrieval evidence. # flog = open(label_file, 'wb') # flog.write(label_line.encode(ENCODING)) # flog.close() Build GEAR dev/test set with retrieval evidence. # build_with_threshold('../data/bert/bert-nli-dev-retrieve-set.tsv', '../data/gear/gear-dev-set-0_1.tsv', 0.1) # build_with_threshold('../data/bert/bert-nli-dev-retrieve-set.tsv', '../data/gear/gear-dev-set-0_01.tsv', 0.01) # build_with_threshold('../data/bert/bert-nli-dev-retrieve-set.tsv', '../data/gear/gear-dev-set-0_0001.tsv', 0.0001) # build_with_threshold('../data/bert/bert-nli-test-retrieve-set.tsv', '../data/gear/gear-test-set-0_1.tsv', 0.1) # build_with_threshold('../data/bert/bert-nli-test-retrieve-set.tsv', '../data/gear/gear-test-set-0_01.tsv', 0.01) # build_with_threshold('../data/bert/bert-nli-test-retrieve-set.tsv', '../data/gear/gear-test-set-0_0001.tsv', 0.0001) | 3.165559 | 3 |
scripts/physics_filter.py | underworlds-robot/uwds_physics_clients | 0 | 6631565 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import rospy
import pybullet as p
import numpy as np
from tf import transformations as tf
import math
import uuid
from pyuwds.reconfigurable_client import ReconfigurableClient
from uwds_msgs.msg import Changes, Situation, Property, Invalidations
from pyuwds.uwds import FILTER
from pyuwds.types.nodes import MESH
from pyuwds.types.situations import ACTION, FACT
from std_msgs.msg import Header
PLACED = 0
HELD = 1
RELEASED = 2
PLACE_CONFIDENCE = 0.85
PICK_CONFIDENCE = 0.85
RELEASE_CONFIDENCE = 0.85
IN_CONFIDENCE = 0.65
ONTOP_CONFIDENCE = 0.95
EPSILON = 0.015 # 1cm
class PhysicsFilter(ReconfigurableClient):
"""
"""
def __init__(self):
"""
"""
self.ressource_folder = rospy.get_param("~ressource_folder")
# reasoning parameters
self.infer_actions = rospy.get_param("~infer_actions", True)
self.perception_duration = rospy.get_param("~perception_duration", 0.9)
self.simulation_tolerance = rospy.get_param("~simulation_tolerance", 0.045)
self.perception_tolerance = rospy.get_param("~perception_tolerance", 0.01)
gui = rospy.get_param("~use_gui", False)
# simulator parameters
self.time_step = rospy.get_param("~time_step", 1.0/240)
self.reasoning_frequency = rospy.get_param("~reasoning_frequency", 24)
self.simulation_step = rospy.get_param("~simulation_step", 0.1)
# self.nb_step_fall = int(self.fall_simulation_step / self.time_step)
self.nb_step = int(self.simulation_step / self.time_step)
# init simulator
if gui is True:
p.connect(p.GUI)
else:
p.connect(p.DIRECT)
p.setGravity(0, 0, -10)
#p.setPhysicsEngineParameter(contactBreakingThreshold=0.01)
p.setAdditionalSearchPath(self.ressource_folder)
p.setTimeStep(self.time_step)
p.setPhysicsEngineParameter(fixedTimeStep=self.time_step)
self.bullet_node_id_map = {}
self.previous_placed_positions = {}
self.corrected_position = {}
self.corrected_orientation = {}
self.corrected_linear_velocity = {}
self.corrected_angular_velocity = {}
self.perceived_position = {}
self.perceived_orientation = {}
self.perceived_linear_velocity = {}
self.perceived_angular_velocity = {}
self.previous_position = {}
self.previous_orientation = {}
self.node_action_state = {}
self.place_confidence = {}
self.pick_confidence = {}
self.release_confidence = {}
self.isontop_confidence = {}
self.isin_confidence = {}
self.invalidation_time = {}
self.max_step = 10
self.simulated_node_ids = []
self.previous_perceived_position = {}
self.previous_perceived_orientation = {}
self.isPerceived = {}
self.isUnstable = {}
self.isMoving = {}
self.isIn = {}
self.isOnTop = {}
self.isContaining = {}
super(PhysicsFilter, self).__init__("gravity_filter", FILTER)
self.timer = rospy.Timer(rospy.Duration(1.0/self.reasoning_frequency), self.reasoningCallback)
def onReconfigure(self, worlds_names):
"""
"""
pass
def onSubscribeChanges(self, world_name):
"""
"""
pass
def onUnsubscribeChanges(self, world_name):
"""
"""
pass
def onChanges(self, world_name, header, invalidations):
"""
"""
now = rospy.Time.now()
for node_id in invalidations.node_ids_deleted:
if node_id in self.perceived_position:
del self.perceived_position[node_id]
if node_id in self.perceived_orientation:
del self.perceived_orientation[node_id]
if node_id in self.perceived_linear_velocity:
del self.perceived_linear_velocity[node_id]
if node_id in self.perceived_angular_velocity:
del self.perceived_angular_velocity[node_id]
if node_id in self.previous_perceived_position:
del self.previous_perceived_position[node_id]
if node_id in self.previous_perceived_orientation:
del self.previous_perceived_orientation[node_id]
if node_id in self.isContaining:
del self.isContaining[node_id]
if node_id in self.isUnstable:
del self.isUnstable[node_id]
if node_id in self.isPerceived:
del self.isPerceived[node_id]
if node_id in self.node_action_state:
del self.node_action_state[node_id]
for node_id in invalidations.node_ids_updated:
node = self.ctx.worlds()[world_name].scene().nodes()[node_id]
if node.type == MESH:
self.invalidation_time[node_id] = now
if node_id not in self.isContaining:
self.isContaining[node_id] = {}
if node_id not in self.isUnstable:
self.isUnstable[node_id] = False
if node_id in self.perceived_position:
self.previous_perceived_position[node_id] = self.perceived_position[node_id]
if node_id in self.perceived_orientation:
self.previous_perceived_orientation[node_id] = self.perceived_orientation[node_id]
self.perceived_position[node_id] = [node.position.pose.position.x, node.position.pose.position.y, node.position.pose.position.z]
self.perceived_orientation[node_id] = [node.position.pose.orientation.x, node.position.pose.orientation.y, node.position.pose.orientation.z, node.position.pose.orientation.w]
self.perceived_linear_velocity[node_id] = [node.velocity.twist.linear.x, node.velocity.twist.linear.y, node.velocity.twist.linear.z]
self.perceived_angular_velocity[node_id] = [node.velocity.twist.angular.x, node.velocity.twist.angular.y, node.velocity.twist.angular.z]
update = False
if node_id in self.previous_perceived_position:
if self.isUnstable[node_id] is False:
if not(np.allclose(self.previous_perceived_position[node_id], self.perceived_position[node_id], atol=self.perception_tolerance) \
and np.allclose(self.previous_perceived_orientation[node_id], self.perceived_orientation[node_id], atol=self.perception_tolerance)):
self.updateBulletNode(world_name, node_id, self.perceived_position[node_id], self.perceived_orientation[node_id], self.perceived_linear_velocity[node_id], self.perceived_angular_velocity[node_id])
update = True
else:
self.updateBulletNode(world_name, node_id, self.perceived_position[node_id], self.perceived_orientation[node_id], self.perceived_linear_velocity[node_id], self.perceived_angular_velocity[node_id])
update = True
else:
self.updateBulletNode(world_name, node_id, self.perceived_position[node_id], self.perceived_orientation[node_id], self.perceived_linear_velocity[node_id], self.perceived_angular_velocity[node_id])
update = True
if update:
for object_id in self.isContaining[node_id]:
object = self.ctx.worlds()[world_name].scene().nodes()[object_id]
if node_id in self.previous_position and object_id in self.previous_position:
if node_id in self.previous_position and object_id in self.previous_position:
#t_prev = tf.compose_matrix(angles=tf.euler_from_quaternion(self.previous_orientation[node_id], axes='sxyz'), translate=self.previous_position[node_id])
#t_perceived = tf.compose_matrix(angles=tf.euler_from_quaternion(self.perceived_orientation[node_id], axes='sxyz'), translate=self.perceived_position[node_id])
t_prev = tf.translation_matrix(self.previous_position[node_id])
t_perceived = tf.translation_matrix(self.perceived_position[node_id])
offset = tf.translation_from_matrix(np.dot(np.linalg.inv(t_prev), t_perceived))
if not np.allclose(offset, [0, 0, 0], atol=0.01):
object_position = self.previous_position[object_id]
object_orientation = self.previous_orientation[object_id]
object_position = [object_position[0]+offset[0], object_position[1]+offset[1], object_position[2]+offset[2]]
self.updateBulletNode(world_name, object_id, object_position, object_orientation, self.perceived_linear_velocity[node_id], self.perceived_angular_velocity[node_id])
def reasoningCallback(self, timer):
header = Header()
header.stamp = rospy.Time.now()
if len(self.input_worlds)>0:
world_name = self.input_worlds[0]
invalidations = Invalidations()
changes = self.filter(world_name, header, invalidations)
self.ctx.worlds()[world_name+"_stable"].update(changes, header)
def filter(self, world_name, header, invalidations):
"""
"""
#print "start reasoning"
start_reasoning_time = rospy.Time.now()
changes = Changes()
for mesh_id in invalidations.mesh_ids_updated:
changes.meshes_to_update.append(self.meshes()[mesh_id])
for situation_id in invalidations.situation_ids_updated:
changes.situations_to_update.append(self.meshes()[mesh_id])
for node in self.ctx.worlds()[world_name].scene().nodes():
if node.type == MESH:
if node.id in self.invalidation_time:
self.isPerceived[node.id] = (header.stamp - self.invalidation_time[node.id]) < rospy.Duration(self.perception_duration)
else:
self.isPerceived[node.id] = True
start_fall_reasoning_time = rospy.Time.now()
for node_id in self.simulated_node_ids:
self.isUnstable[node_id] = False
for i in range(0, self.nb_step):
p.stepSimulation()
for node_id in self.simulated_node_ids:
if self.isPerceived[node_id]:
node = self.ctx.worlds()[world_name].scene().nodes()[node_id]
infered_position, infered_orientation = p.getBasePositionAndOrientation(self.bullet_node_id_map[node_id])
infered_linear_velocity, infered_angular_velocity = p.getBaseVelocity(self.bullet_node_id_map[node_id])
perceived_position = self.perceived_position[node_id]
stability_distance = math.sqrt(pow(perceived_position[0]-infered_position[0], 2) + pow(perceived_position[1]-infered_position[1], 2) + pow(perceived_position[2]-infered_position[2], 2))
is_unstable = stability_distance > self.simulation_tolerance
if self.isUnstable[node_id] is False and is_unstable:
self.isUnstable[node_id] = True
#print node.name + " is unstable after "+str(i)+"/"+str(self.nb_step)+" steps"
for object_id in self.isContaining[node_id]:
if object_id in self.perceived_position:
t_perceived = tf.translation_matrix(self.perceived_position[node_id])
t_infered = tf.translation_matrix(infered_position)
offset = tf.translation_from_matrix(np.dot(np.linalg.inv(t_infered), t_perceived))
#if not np.allclose(offset, [0, 0, 0], atol=0.1):
object_position, object_orientation = p.getBasePositionAndOrientation(self.bullet_node_id_map[object_id])
object_position = [object_position[0]+offset[0], object_position[1]+offset[1], object_position[2]+offset[2]]
self.updateBulletNode(world_name, object_id, object_position, object_orientation, self.perceived_linear_velocity[node_id], self.perceived_angular_velocity[node_id])
if self.isUnstable[node_id]:
self.updateBulletNode(world_name, node_id, self.perceived_position[node_id], self.perceived_orientation[node_id], self.perceived_linear_velocity[node_id], self.perceived_angular_velocity[node_id])
end_fall_reasoning_time = rospy.Time.now()
for node in self.ctx.worlds()[world_name].scene().nodes():
# print len(self.simulated_node_ids)
if node.id in self.simulated_node_ids:
if self.isUnstable[node.id] is True and self.isPerceived[node.id] is True:
if (self.node_action_state[node.id] == PLACED or self.node_action_state[node.id] == RELEASED) and self.infer_actions and self.pick_confidence[node_id] > PICK_CONFIDENCE:
print node.name + " picked up"
situation = Situation()
situation.id = str(uuid.uuid4().hex)
situation.type = ACTION
situation.description = node.name + " picked up"
situation.confidence = PICK_CONFIDENCE
situation.start.data = header.stamp
situation.end.data = header.stamp
situation.properties.append(Property("subject", node.id))
situation.properties.append(Property("action", "Place"))
changes.situations_to_update.append(situation)
self.node_action_state[node.id] = HELD
self.pick_confidence[node.id] = self.pick_confidence[node.id]*(1+PICK_CONFIDENCE)
#print self.pick_confidence[node_id]
if self.pick_confidence[node.id] > 1.0: self.pick_confidence[node.id] = 1.0
self.place_confidence[node.id] = self.place_confidence[node.id]*(1-PICK_CONFIDENCE)
if self.place_confidence[node.id] < .1: self.place_confidence[node.id] = 0.1
node.position.pose.position.x = self.perceived_position[node.id][0]
node.position.pose.position.y = self.perceived_position[node.id][1]
node.position.pose.position.z = self.perceived_position[node.id][2]
node.position.pose.orientation.x = self.perceived_orientation[node.id][0]
node.position.pose.orientation.y = self.perceived_orientation[node.id][1]
node.position.pose.orientation.z = self.perceived_orientation[node.id][2]
node.position.pose.orientation.w = self.perceived_orientation[node.id][3]
node.velocity.twist.linear.x = self.perceived_linear_velocity[node.id][0]
node.velocity.twist.linear.y = self.perceived_linear_velocity[node.id][1]
node.velocity.twist.linear.z = self.perceived_linear_velocity[node.id][2]
node.velocity.twist.angular.x = self.perceived_angular_velocity[node.id][0]
node.velocity.twist.angular.y = self.perceived_angular_velocity[node.id][1]
node.velocity.twist.angular.z = self.perceived_angular_velocity[node.id][2]
self.previous_position[node.id] = self.perceived_position[node.id]
self.previous_orientation[node.id] = self.perceived_orientation[node.id]
self.ctx.worlds()[world_name].scene().nodes()[node.id]=node
changes.nodes_to_update.append(node)
else:
if node.id in self.node_action_state:
if self.node_action_state[node.id] == HELD and self.infer_actions:
if self.isPerceived[node.id]:
self.place_confidence[node.id] = self.place_confidence[node.id]*(1+PLACE_CONFIDENCE)
if self.place_confidence[node.id] > 1.0: self.place_confidence[node.id] = 1.0
self.pick_confidence[node.id] = self.pick_confidence[node.id]*(1-PLACE_CONFIDENCE)
if self.pick_confidence[node.id] < .1: self.pick_confidence[node.id] = 0.1
self.release_confidence[node.id] = self.release_confidence[node.id]*(1-RELEASE_CONFIDENCE)
if self.release_confidence[node.id] < .1: self.release_confidence[node.id] = 0.1
if self.place_confidence[node.id] > PLACE_CONFIDENCE:
print node.name + " placed"
situation = Situation()
situation.id = str(uuid.uuid4().hex)
situation.type = ACTION
situation.description = node.name + " placed"
situation.confidence = PLACE_CONFIDENCE
situation.start.data = header.stamp
situation.end.data = header.stamp
situation.properties.append(Property("subject", node.id))
situation.properties.append(Property("action", "Pick"))
changes.situations_to_update.append(situation)
self.node_action_state[node.id] = PLACED
else:
self.release_confidence[node.id] = self.release_confidence[node.id]*(1+RELEASE_CONFIDENCE)
if self.release_confidence[node.id] > 1.0: self.release_confidence[node.id] = 1.0
self.pick_confidence[node.id] = self.pick_confidence[node.id]*(1-PLACE_CONFIDENCE)
if self.pick_confidence[node.id] < .1: self.pick_confidence[node.id] = 0.1
self.place_confidence[node.id] = self.place_confidence[node.id]*(1-PICK_CONFIDENCE)
if self.place_confidence[node.id] < .1: self.place_confidence[node.id] = 0.1
if self.release_confidence[node.id] > RELEASE_CONFIDENCE:
print node.name + " released"
situation = Situation()
situation.id = str(uuid.uuid4().hex)
situation.type = ACTION
situation.description = node.name + " released"
situation.confidence = RELEASE_CONFIDENCE
situation.start.data = header.stamp
situation.end.data = header.stamp
situation.properties.append(Property("subject", node.id))
situation.properties.append(Property("action", "Release"))
changes.situations_to_update.append(situation)
self.node_action_state[node.id] = RELEASED
infered_position, infered_orientation = p.getBasePositionAndOrientation(self.bullet_node_id_map[node.id])
infered_linear_velocity, infered_angular_velocity = p.getBaseVelocity(self.bullet_node_id_map[node.id])
x, y, z = infered_position
node.position.pose.position.x = x
node.position.pose.position.y = y
node.position.pose.position.z = z
x, y, z, w = infered_orientation
node.position.pose.orientation.x = x
node.position.pose.orientation.y = y
node.position.pose.orientation.z = z
node.position.pose.orientation.w = w
x, y, z = infered_linear_velocity
node.velocity.twist.linear.x = x
node.velocity.twist.linear.y = y
node.velocity.twist.linear.z = z
x, y, z = infered_angular_velocity
node.velocity.twist.angular.x = x
node.velocity.twist.angular.y = y
node.velocity.twist.angular.z = z
self.previous_position[node.id] = infered_position
self.previous_orientation[node.id] = infered_orientation
self.ctx.worlds()[world_name].scene().nodes()[node.id]=node
changes.nodes_to_update.append(node)
else:
changes.nodes_to_update.append(node)
now = rospy.Time.now()
for node1_id in self.simulated_node_ids:
node1 = self.ctx.worlds()[world_name].scene().nodes()[node1_id]
if node1.type != MESH:
continue
for node2_id in self.simulated_node_ids:
node2 = self.ctx.worlds()[world_name].scene().nodes()[node2_id]
if node1.id == node2.id:
continue
if node2.type != MESH:
continue
bb1 = self.aabb(node1)
bb2 = self.aabb(node2)
if node1.id not in self.isIn:
self.isIn[node1.id] = {}
if node1.id not in self.isOnTop:
self.isOnTop[node1.id] = {}
if node2.id not in self.isContaining:
self.isContaining[node2.id] = {}
if self.isin(bb1, bb2, node2.id in self.isIn[node1.id]):
if node2.id not in self.isIn[node1.id]:
sit = Situation()
sit.id = str(uuid.uuid4())
sit.type = FACT
sit.description = node1.name + " is in " + node2.name
sit.properties.append(Property("subject", node1.id))
sit.properties.append(Property("object", node2.id))
sit.properties.append(Property("predicate", "isIn"))
sit.confidence = IN_CONFIDENCE
sit.start.data = now
sit.end.data = rospy.Time(0)
self.isIn[node1.id][node2.id] = sit
self.isContaining[node2.id][node1.id] = sit
changes.situations_to_update.append(sit)
else:
if node2.id in self.isIn[node1.id]:
self.isIn[node1.id][node2.id].end.data = now
self.isIn[node1.id][node2.id].description = node1.name + " was in " + node2.name
sit = self.isIn[node1.id][node2.id]
changes.situations_to_update.append(sit)
del self.isIn[node1.id][node2.id]
del self.isContaining[node2.id][node1.id]
if self.isontop(bb1, bb2, node2.id in self.isOnTop[node1.id]):
if node2.id not in self.isOnTop[node1.id]:
sit = Situation()
sit.id = str(uuid.uuid4())
sit.type = FACT
sit.description = node1.name + " is on " + node2.name
sit.properties.append(Property("subject", node1.id))
sit.properties.append(Property("object", node2.id))
sit.properties.append(Property("predicate", "isOn"))
sit.confidence = ONTOP_CONFIDENCE
sit.start.data = now
sit.end.data = rospy.Time(0)
self.isOnTop[node1.id][node2.id] = sit
changes.situations_to_update.append(sit)
else:
if node2.id in self.isOnTop[node1.id]:
self.isOnTop[node1.id][node2.id].description = node1.name + " was on " + node2.name
self.isOnTop[node1.id][node2.id].end.data = now
sit = self.isOnTop[node1.id][node2.id]
changes.situations_to_update.append(sit)
del self.isOnTop[node1.id][node2.id]
end_reasoning_time = rospy.Time.now()
if (1.0/(end_reasoning_time - start_reasoning_time).to_sec() < self.reasoning_frequency*0.5):
rospy.logwarn("[%s::filter] reasoning too slow ! %f", self.ctx.name(), 1.0/(end_reasoning_time - start_reasoning_time).to_sec())
return changes
def updateBulletNode(self, world_name, node_id, position, orientation, linear, angular):
"""
"""
if self.ctx.worlds()[world_name].scene().root_id() not in self.bullet_node_id_map:
self.bullet_node_id_map[self.ctx.worlds()[world_name].scene().root_id()] = p.loadURDF("plane.urdf")
node = self.ctx.worlds()[world_name].scene().nodes()[node_id]
if node_id not in self.bullet_node_id_map:
try:
label = node.name.replace("_"," ").replace("."," ").replace("-"," ").lower()
result = []
for word in label.split(" "):
try:
test = int(word)
except ValueError:
result.append(word)
first = True
for word in result:
if first is True:
label = word
first = False
else:
label += "_" + word
self.bullet_node_id_map[node_id] = p.loadURDF(label+".urdf", position, orientation)
rospy.loginfo("[%s::updateBulletNodeNodes] "+label+".urdf' loaded successfully", self.ctx.name())
p.changeDynamics(self.bullet_node_id_map[node_id], -1, frictionAnchor=1, rollingFriction=1.0, spinningFriction=1.0, lateralFriction=1.0)
self.simulated_node_ids.append(node_id)
if node_id not in self.node_action_state:
self.node_action_state[node_id] = PLACED
self.place_confidence[node_id] = 1.0
self.pick_confidence[node_id] = 0.1
self.release_confidence[node_id] = 0.1
except Exception as e:
self.bullet_node_id_map[node_id] = -1
rospy.logwarn("[%s::updateBulletNodeNodes] "+str(e))
if self.bullet_node_id_map[node_id] > 0:
p.resetBaseVelocity(self.bullet_node_id_map[node_id], linear, angular)
p.resetBasePositionAndOrientation(self.bullet_node_id_map[node_id], position, orientation)
else:
self.bullet_node_id_map[node_id] = -1
def aabb(self, node):
"""
Compute world aabb by transforming the corners of the aabb by the node pose
"""
for property in node.properties:
if property.name == "aabb":
aabb = property.data.split(",")
if len(aabb) == 3:
t = [node.position.pose.position.x, node.position.pose.position.y, node.position.pose.position.z]
q = [node.position.pose.orientation.x, node.position.pose.orientation.y, node.position.pose.orientation.z, node.position.pose.orientation.w]
trans = tf.translation_matrix(t)
rot = tf.quaternion_matrix(q)
transform = tf.concatenate_matrices(trans, rot)
v = []
v.append(tf.translation_from_matrix(np.dot(transform, tf.translation_matrix([ float(aabb[0])/2, float(aabb[1])/2, float(aabb[2])/2]))))
v.append(tf.translation_from_matrix(np.dot(transform, tf.translation_matrix([-float(aabb[0])/2, float(aabb[1])/2, float(aabb[2])/2]))))
v.append(tf.translation_from_matrix(np.dot(transform, tf.translation_matrix([ float(aabb[0])/2, -float(aabb[1])/2, float(aabb[2])/2]))))
v.append(tf.translation_from_matrix(np.dot(transform, tf.translation_matrix([-float(aabb[0])/2, -float(aabb[1])/2, float(aabb[2])/2]))))
v.append(tf.translation_from_matrix(np.dot(transform, tf.translation_matrix([ float(aabb[0])/2, float(aabb[1])/2, -float(aabb[2])/2]))))
v.append(tf.translation_from_matrix(np.dot(transform, tf.translation_matrix([-float(aabb[0])/2, float(aabb[1])/2, -float(aabb[2])/2]))))
v.append(tf.translation_from_matrix(np.dot(transform, tf.translation_matrix([ float(aabb[0])/2, -float(aabb[1])/2, -float(aabb[2])/2]))))
v.append(tf.translation_from_matrix(np.dot(transform, tf.translation_matrix([-float(aabb[0])/2, -float(aabb[1])/2, -float(aabb[2])/2]))))
bb_min = [1e10, 1e10, 1e10]
bb_max = [-1e10, -1e10, -1e10]
for vertex in v:
bb_min = np.minimum(bb_min, vertex)
bb_max = np.maximum(bb_max, vertex)
return bb_min, bb_max
raise RuntimeError("aabb not present")
def bb_footprint(self, bb):
"""
Copied from severin lemaignan underworlds client example :
see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py
"""
x1, y1, z1 = bb[0]
x2, y2, z2 = bb[1]
return (x1, y1), (x2, y2)
def overlap(self, rect1, rect2):
"""Overlapping rectangles overlap both horizontally & vertically
Coped from severin lemaignan underworlds client example :
see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py
"""
(l1, b1), (r1, t1) = rect1
(l2, b2), (r2, t2) = rect2
return self.range_overlap(l1, r1, l2, r2) and self.range_overlap(b1, t1, b2, t2)
def range_overlap(self, a_min, a_max, b_min, b_max):
"""Neither range is completely greater than the other
http://codereview.stackexchange.com/questions/31352/overlapping-rectangles
Modified from severin lemaignan underworlds client example :
see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py
"""
return (a_min <= b_max) and (b_min <= a_max)
def weakly_cont(self, rect1, rect2, prev=False):
"""Obj1 is weakly contained if the base of the object is surrounded
by Obj2
Modified from severin lemaignan underworlds client example :
see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py
"""
(l1, b1), (r1, t1) = rect1
(l2, b2), (r2, t2) = rect2
if prev is False:
return (l1 - 2*EPSILON >= l2) and (b1 - 2*EPSILON >= b2) and (r1 - 2*EPSILON <= r2) and (t1 - 2*EPSILON <= t2)
else:
return (l1 + 2*EPSILON >= l2) and (b1 + 2*EPSILON >= b2) and (r1 + 2*EPSILON <= r2) and (t1 + 2*EPSILON <= t2)
def isabove(self, bb1, bb2, prev=False):
"""
For obj 1 to be above obj 2:
- the bottom of its bounding box must be higher that
the top of obj 2's bounding box
- the bounding box footprint of both objects must overlap
Modified from severin lemaignan underworlds client example :
see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py
"""
bb1_min, _ = bb1
_, bb2_max = bb2
x1, y1, z1 = bb1_min
x2, y2, z2 = bb2_max
if z1 < z2 - 2 * EPSILON:
return False
return self.overlap(self.bb_footprint(bb1), self.bb_footprint(bb2))
def isin(self, bb1, bb2, prev=False):
""" Returns True if bb1 is in bb2.
To be 'in' bb1 is weakly contained by bb2 and the bottom of bb1 is lower
than the top of bb2 and higher than the bottom of bb2.
Modified from severin lemaignan underworlds client example :
see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py
"""
bb1_min, _ = bb1
bb2_min, bb2_max = bb2
x1, y1, z1 = bb1_min
x2, y2, z2 = bb2_max
x3, y3, z3 = bb2_min
if prev is False:
if z1 > z2 - 2*EPSILON:
return False
if z1 < z3 + EPSILON:
return False
else:
if z1 > z2 - EPSILON :
return False
if z1 < z3 :
return False
return self.weakly_cont(self.bb_footprint(bb1), self.bb_footprint(bb2), prev)
def isontop(self, bb1, bb2, prev=False):
"""
For obj 1 to be on top of obj 2:
- obj1 must be above obj 2
- the bottom of obj 1 must be close to the top of obj 2
Modified from severin lemaignan underworlds client example :
see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py
"""
bb1_min, _ = bb1
_, bb2_max = bb2
x1, y1, z1 = bb1_min
x2, y2, z2 = bb2_max
return z1 < z2 + 2 * EPSILON and self.isabove(bb1, bb2)
if __name__ == '__main__':
rospy.init_node("physics_filter")
physics_filter = PhysicsFilter()
rospy.spin()
| #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import rospy
import pybullet as p
import numpy as np
from tf import transformations as tf
import math
import uuid
from pyuwds.reconfigurable_client import ReconfigurableClient
from uwds_msgs.msg import Changes, Situation, Property, Invalidations
from pyuwds.uwds import FILTER
from pyuwds.types.nodes import MESH
from pyuwds.types.situations import ACTION, FACT
from std_msgs.msg import Header
PLACED = 0
HELD = 1
RELEASED = 2
PLACE_CONFIDENCE = 0.85
PICK_CONFIDENCE = 0.85
RELEASE_CONFIDENCE = 0.85
IN_CONFIDENCE = 0.65
ONTOP_CONFIDENCE = 0.95
EPSILON = 0.015 # 1cm
class PhysicsFilter(ReconfigurableClient):
"""
"""
def __init__(self):
"""
"""
self.ressource_folder = rospy.get_param("~ressource_folder")
# reasoning parameters
self.infer_actions = rospy.get_param("~infer_actions", True)
self.perception_duration = rospy.get_param("~perception_duration", 0.9)
self.simulation_tolerance = rospy.get_param("~simulation_tolerance", 0.045)
self.perception_tolerance = rospy.get_param("~perception_tolerance", 0.01)
gui = rospy.get_param("~use_gui", False)
# simulator parameters
self.time_step = rospy.get_param("~time_step", 1.0/240)
self.reasoning_frequency = rospy.get_param("~reasoning_frequency", 24)
self.simulation_step = rospy.get_param("~simulation_step", 0.1)
# self.nb_step_fall = int(self.fall_simulation_step / self.time_step)
self.nb_step = int(self.simulation_step / self.time_step)
# init simulator
if gui is True:
p.connect(p.GUI)
else:
p.connect(p.DIRECT)
p.setGravity(0, 0, -10)
#p.setPhysicsEngineParameter(contactBreakingThreshold=0.01)
p.setAdditionalSearchPath(self.ressource_folder)
p.setTimeStep(self.time_step)
p.setPhysicsEngineParameter(fixedTimeStep=self.time_step)
self.bullet_node_id_map = {}
self.previous_placed_positions = {}
self.corrected_position = {}
self.corrected_orientation = {}
self.corrected_linear_velocity = {}
self.corrected_angular_velocity = {}
self.perceived_position = {}
self.perceived_orientation = {}
self.perceived_linear_velocity = {}
self.perceived_angular_velocity = {}
self.previous_position = {}
self.previous_orientation = {}
self.node_action_state = {}
self.place_confidence = {}
self.pick_confidence = {}
self.release_confidence = {}
self.isontop_confidence = {}
self.isin_confidence = {}
self.invalidation_time = {}
self.max_step = 10
self.simulated_node_ids = []
self.previous_perceived_position = {}
self.previous_perceived_orientation = {}
self.isPerceived = {}
self.isUnstable = {}
self.isMoving = {}
self.isIn = {}
self.isOnTop = {}
self.isContaining = {}
super(PhysicsFilter, self).__init__("gravity_filter", FILTER)
self.timer = rospy.Timer(rospy.Duration(1.0/self.reasoning_frequency), self.reasoningCallback)
def onReconfigure(self, worlds_names):
"""
"""
pass
def onSubscribeChanges(self, world_name):
"""
"""
pass
def onUnsubscribeChanges(self, world_name):
"""
"""
pass
def onChanges(self, world_name, header, invalidations):
"""
"""
now = rospy.Time.now()
for node_id in invalidations.node_ids_deleted:
if node_id in self.perceived_position:
del self.perceived_position[node_id]
if node_id in self.perceived_orientation:
del self.perceived_orientation[node_id]
if node_id in self.perceived_linear_velocity:
del self.perceived_linear_velocity[node_id]
if node_id in self.perceived_angular_velocity:
del self.perceived_angular_velocity[node_id]
if node_id in self.previous_perceived_position:
del self.previous_perceived_position[node_id]
if node_id in self.previous_perceived_orientation:
del self.previous_perceived_orientation[node_id]
if node_id in self.isContaining:
del self.isContaining[node_id]
if node_id in self.isUnstable:
del self.isUnstable[node_id]
if node_id in self.isPerceived:
del self.isPerceived[node_id]
if node_id in self.node_action_state:
del self.node_action_state[node_id]
for node_id in invalidations.node_ids_updated:
node = self.ctx.worlds()[world_name].scene().nodes()[node_id]
if node.type == MESH:
self.invalidation_time[node_id] = now
if node_id not in self.isContaining:
self.isContaining[node_id] = {}
if node_id not in self.isUnstable:
self.isUnstable[node_id] = False
if node_id in self.perceived_position:
self.previous_perceived_position[node_id] = self.perceived_position[node_id]
if node_id in self.perceived_orientation:
self.previous_perceived_orientation[node_id] = self.perceived_orientation[node_id]
self.perceived_position[node_id] = [node.position.pose.position.x, node.position.pose.position.y, node.position.pose.position.z]
self.perceived_orientation[node_id] = [node.position.pose.orientation.x, node.position.pose.orientation.y, node.position.pose.orientation.z, node.position.pose.orientation.w]
self.perceived_linear_velocity[node_id] = [node.velocity.twist.linear.x, node.velocity.twist.linear.y, node.velocity.twist.linear.z]
self.perceived_angular_velocity[node_id] = [node.velocity.twist.angular.x, node.velocity.twist.angular.y, node.velocity.twist.angular.z]
update = False
if node_id in self.previous_perceived_position:
if self.isUnstable[node_id] is False:
if not(np.allclose(self.previous_perceived_position[node_id], self.perceived_position[node_id], atol=self.perception_tolerance) \
and np.allclose(self.previous_perceived_orientation[node_id], self.perceived_orientation[node_id], atol=self.perception_tolerance)):
self.updateBulletNode(world_name, node_id, self.perceived_position[node_id], self.perceived_orientation[node_id], self.perceived_linear_velocity[node_id], self.perceived_angular_velocity[node_id])
update = True
else:
self.updateBulletNode(world_name, node_id, self.perceived_position[node_id], self.perceived_orientation[node_id], self.perceived_linear_velocity[node_id], self.perceived_angular_velocity[node_id])
update = True
else:
self.updateBulletNode(world_name, node_id, self.perceived_position[node_id], self.perceived_orientation[node_id], self.perceived_linear_velocity[node_id], self.perceived_angular_velocity[node_id])
update = True
if update:
for object_id in self.isContaining[node_id]:
object = self.ctx.worlds()[world_name].scene().nodes()[object_id]
if node_id in self.previous_position and object_id in self.previous_position:
if node_id in self.previous_position and object_id in self.previous_position:
#t_prev = tf.compose_matrix(angles=tf.euler_from_quaternion(self.previous_orientation[node_id], axes='sxyz'), translate=self.previous_position[node_id])
#t_perceived = tf.compose_matrix(angles=tf.euler_from_quaternion(self.perceived_orientation[node_id], axes='sxyz'), translate=self.perceived_position[node_id])
t_prev = tf.translation_matrix(self.previous_position[node_id])
t_perceived = tf.translation_matrix(self.perceived_position[node_id])
offset = tf.translation_from_matrix(np.dot(np.linalg.inv(t_prev), t_perceived))
if not np.allclose(offset, [0, 0, 0], atol=0.01):
object_position = self.previous_position[object_id]
object_orientation = self.previous_orientation[object_id]
object_position = [object_position[0]+offset[0], object_position[1]+offset[1], object_position[2]+offset[2]]
self.updateBulletNode(world_name, object_id, object_position, object_orientation, self.perceived_linear_velocity[node_id], self.perceived_angular_velocity[node_id])
def reasoningCallback(self, timer):
header = Header()
header.stamp = rospy.Time.now()
if len(self.input_worlds)>0:
world_name = self.input_worlds[0]
invalidations = Invalidations()
changes = self.filter(world_name, header, invalidations)
self.ctx.worlds()[world_name+"_stable"].update(changes, header)
def filter(self, world_name, header, invalidations):
"""
"""
#print "start reasoning"
start_reasoning_time = rospy.Time.now()
changes = Changes()
for mesh_id in invalidations.mesh_ids_updated:
changes.meshes_to_update.append(self.meshes()[mesh_id])
for situation_id in invalidations.situation_ids_updated:
changes.situations_to_update.append(self.meshes()[mesh_id])
for node in self.ctx.worlds()[world_name].scene().nodes():
if node.type == MESH:
if node.id in self.invalidation_time:
self.isPerceived[node.id] = (header.stamp - self.invalidation_time[node.id]) < rospy.Duration(self.perception_duration)
else:
self.isPerceived[node.id] = True
start_fall_reasoning_time = rospy.Time.now()
for node_id in self.simulated_node_ids:
self.isUnstable[node_id] = False
for i in range(0, self.nb_step):
p.stepSimulation()
for node_id in self.simulated_node_ids:
if self.isPerceived[node_id]:
node = self.ctx.worlds()[world_name].scene().nodes()[node_id]
infered_position, infered_orientation = p.getBasePositionAndOrientation(self.bullet_node_id_map[node_id])
infered_linear_velocity, infered_angular_velocity = p.getBaseVelocity(self.bullet_node_id_map[node_id])
perceived_position = self.perceived_position[node_id]
stability_distance = math.sqrt(pow(perceived_position[0]-infered_position[0], 2) + pow(perceived_position[1]-infered_position[1], 2) + pow(perceived_position[2]-infered_position[2], 2))
is_unstable = stability_distance > self.simulation_tolerance
if self.isUnstable[node_id] is False and is_unstable:
self.isUnstable[node_id] = True
#print node.name + " is unstable after "+str(i)+"/"+str(self.nb_step)+" steps"
for object_id in self.isContaining[node_id]:
if object_id in self.perceived_position:
t_perceived = tf.translation_matrix(self.perceived_position[node_id])
t_infered = tf.translation_matrix(infered_position)
offset = tf.translation_from_matrix(np.dot(np.linalg.inv(t_infered), t_perceived))
#if not np.allclose(offset, [0, 0, 0], atol=0.1):
object_position, object_orientation = p.getBasePositionAndOrientation(self.bullet_node_id_map[object_id])
object_position = [object_position[0]+offset[0], object_position[1]+offset[1], object_position[2]+offset[2]]
self.updateBulletNode(world_name, object_id, object_position, object_orientation, self.perceived_linear_velocity[node_id], self.perceived_angular_velocity[node_id])
if self.isUnstable[node_id]:
self.updateBulletNode(world_name, node_id, self.perceived_position[node_id], self.perceived_orientation[node_id], self.perceived_linear_velocity[node_id], self.perceived_angular_velocity[node_id])
end_fall_reasoning_time = rospy.Time.now()
for node in self.ctx.worlds()[world_name].scene().nodes():
# print len(self.simulated_node_ids)
if node.id in self.simulated_node_ids:
if self.isUnstable[node.id] is True and self.isPerceived[node.id] is True:
if (self.node_action_state[node.id] == PLACED or self.node_action_state[node.id] == RELEASED) and self.infer_actions and self.pick_confidence[node_id] > PICK_CONFIDENCE:
print node.name + " picked up"
situation = Situation()
situation.id = str(uuid.uuid4().hex)
situation.type = ACTION
situation.description = node.name + " picked up"
situation.confidence = PICK_CONFIDENCE
situation.start.data = header.stamp
situation.end.data = header.stamp
situation.properties.append(Property("subject", node.id))
situation.properties.append(Property("action", "Place"))
changes.situations_to_update.append(situation)
self.node_action_state[node.id] = HELD
self.pick_confidence[node.id] = self.pick_confidence[node.id]*(1+PICK_CONFIDENCE)
#print self.pick_confidence[node_id]
if self.pick_confidence[node.id] > 1.0: self.pick_confidence[node.id] = 1.0
self.place_confidence[node.id] = self.place_confidence[node.id]*(1-PICK_CONFIDENCE)
if self.place_confidence[node.id] < .1: self.place_confidence[node.id] = 0.1
node.position.pose.position.x = self.perceived_position[node.id][0]
node.position.pose.position.y = self.perceived_position[node.id][1]
node.position.pose.position.z = self.perceived_position[node.id][2]
node.position.pose.orientation.x = self.perceived_orientation[node.id][0]
node.position.pose.orientation.y = self.perceived_orientation[node.id][1]
node.position.pose.orientation.z = self.perceived_orientation[node.id][2]
node.position.pose.orientation.w = self.perceived_orientation[node.id][3]
node.velocity.twist.linear.x = self.perceived_linear_velocity[node.id][0]
node.velocity.twist.linear.y = self.perceived_linear_velocity[node.id][1]
node.velocity.twist.linear.z = self.perceived_linear_velocity[node.id][2]
node.velocity.twist.angular.x = self.perceived_angular_velocity[node.id][0]
node.velocity.twist.angular.y = self.perceived_angular_velocity[node.id][1]
node.velocity.twist.angular.z = self.perceived_angular_velocity[node.id][2]
self.previous_position[node.id] = self.perceived_position[node.id]
self.previous_orientation[node.id] = self.perceived_orientation[node.id]
self.ctx.worlds()[world_name].scene().nodes()[node.id]=node
changes.nodes_to_update.append(node)
else:
if node.id in self.node_action_state:
if self.node_action_state[node.id] == HELD and self.infer_actions:
if self.isPerceived[node.id]:
self.place_confidence[node.id] = self.place_confidence[node.id]*(1+PLACE_CONFIDENCE)
if self.place_confidence[node.id] > 1.0: self.place_confidence[node.id] = 1.0
self.pick_confidence[node.id] = self.pick_confidence[node.id]*(1-PLACE_CONFIDENCE)
if self.pick_confidence[node.id] < .1: self.pick_confidence[node.id] = 0.1
self.release_confidence[node.id] = self.release_confidence[node.id]*(1-RELEASE_CONFIDENCE)
if self.release_confidence[node.id] < .1: self.release_confidence[node.id] = 0.1
if self.place_confidence[node.id] > PLACE_CONFIDENCE:
print node.name + " placed"
situation = Situation()
situation.id = str(uuid.uuid4().hex)
situation.type = ACTION
situation.description = node.name + " placed"
situation.confidence = PLACE_CONFIDENCE
situation.start.data = header.stamp
situation.end.data = header.stamp
situation.properties.append(Property("subject", node.id))
situation.properties.append(Property("action", "Pick"))
changes.situations_to_update.append(situation)
self.node_action_state[node.id] = PLACED
else:
self.release_confidence[node.id] = self.release_confidence[node.id]*(1+RELEASE_CONFIDENCE)
if self.release_confidence[node.id] > 1.0: self.release_confidence[node.id] = 1.0
self.pick_confidence[node.id] = self.pick_confidence[node.id]*(1-PLACE_CONFIDENCE)
if self.pick_confidence[node.id] < .1: self.pick_confidence[node.id] = 0.1
self.place_confidence[node.id] = self.place_confidence[node.id]*(1-PICK_CONFIDENCE)
if self.place_confidence[node.id] < .1: self.place_confidence[node.id] = 0.1
if self.release_confidence[node.id] > RELEASE_CONFIDENCE:
print node.name + " released"
situation = Situation()
situation.id = str(uuid.uuid4().hex)
situation.type = ACTION
situation.description = node.name + " released"
situation.confidence = RELEASE_CONFIDENCE
situation.start.data = header.stamp
situation.end.data = header.stamp
situation.properties.append(Property("subject", node.id))
situation.properties.append(Property("action", "Release"))
changes.situations_to_update.append(situation)
self.node_action_state[node.id] = RELEASED
infered_position, infered_orientation = p.getBasePositionAndOrientation(self.bullet_node_id_map[node.id])
infered_linear_velocity, infered_angular_velocity = p.getBaseVelocity(self.bullet_node_id_map[node.id])
x, y, z = infered_position
node.position.pose.position.x = x
node.position.pose.position.y = y
node.position.pose.position.z = z
x, y, z, w = infered_orientation
node.position.pose.orientation.x = x
node.position.pose.orientation.y = y
node.position.pose.orientation.z = z
node.position.pose.orientation.w = w
x, y, z = infered_linear_velocity
node.velocity.twist.linear.x = x
node.velocity.twist.linear.y = y
node.velocity.twist.linear.z = z
x, y, z = infered_angular_velocity
node.velocity.twist.angular.x = x
node.velocity.twist.angular.y = y
node.velocity.twist.angular.z = z
self.previous_position[node.id] = infered_position
self.previous_orientation[node.id] = infered_orientation
self.ctx.worlds()[world_name].scene().nodes()[node.id]=node
changes.nodes_to_update.append(node)
else:
changes.nodes_to_update.append(node)
now = rospy.Time.now()
for node1_id in self.simulated_node_ids:
node1 = self.ctx.worlds()[world_name].scene().nodes()[node1_id]
if node1.type != MESH:
continue
for node2_id in self.simulated_node_ids:
node2 = self.ctx.worlds()[world_name].scene().nodes()[node2_id]
if node1.id == node2.id:
continue
if node2.type != MESH:
continue
bb1 = self.aabb(node1)
bb2 = self.aabb(node2)
if node1.id not in self.isIn:
self.isIn[node1.id] = {}
if node1.id not in self.isOnTop:
self.isOnTop[node1.id] = {}
if node2.id not in self.isContaining:
self.isContaining[node2.id] = {}
if self.isin(bb1, bb2, node2.id in self.isIn[node1.id]):
if node2.id not in self.isIn[node1.id]:
sit = Situation()
sit.id = str(uuid.uuid4())
sit.type = FACT
sit.description = node1.name + " is in " + node2.name
sit.properties.append(Property("subject", node1.id))
sit.properties.append(Property("object", node2.id))
sit.properties.append(Property("predicate", "isIn"))
sit.confidence = IN_CONFIDENCE
sit.start.data = now
sit.end.data = rospy.Time(0)
self.isIn[node1.id][node2.id] = sit
self.isContaining[node2.id][node1.id] = sit
changes.situations_to_update.append(sit)
else:
if node2.id in self.isIn[node1.id]:
self.isIn[node1.id][node2.id].end.data = now
self.isIn[node1.id][node2.id].description = node1.name + " was in " + node2.name
sit = self.isIn[node1.id][node2.id]
changes.situations_to_update.append(sit)
del self.isIn[node1.id][node2.id]
del self.isContaining[node2.id][node1.id]
if self.isontop(bb1, bb2, node2.id in self.isOnTop[node1.id]):
if node2.id not in self.isOnTop[node1.id]:
sit = Situation()
sit.id = str(uuid.uuid4())
sit.type = FACT
sit.description = node1.name + " is on " + node2.name
sit.properties.append(Property("subject", node1.id))
sit.properties.append(Property("object", node2.id))
sit.properties.append(Property("predicate", "isOn"))
sit.confidence = ONTOP_CONFIDENCE
sit.start.data = now
sit.end.data = rospy.Time(0)
self.isOnTop[node1.id][node2.id] = sit
changes.situations_to_update.append(sit)
else:
if node2.id in self.isOnTop[node1.id]:
self.isOnTop[node1.id][node2.id].description = node1.name + " was on " + node2.name
self.isOnTop[node1.id][node2.id].end.data = now
sit = self.isOnTop[node1.id][node2.id]
changes.situations_to_update.append(sit)
del self.isOnTop[node1.id][node2.id]
end_reasoning_time = rospy.Time.now()
if (1.0/(end_reasoning_time - start_reasoning_time).to_sec() < self.reasoning_frequency*0.5):
rospy.logwarn("[%s::filter] reasoning too slow ! %f", self.ctx.name(), 1.0/(end_reasoning_time - start_reasoning_time).to_sec())
return changes
def updateBulletNode(self, world_name, node_id, position, orientation, linear, angular):
"""
"""
if self.ctx.worlds()[world_name].scene().root_id() not in self.bullet_node_id_map:
self.bullet_node_id_map[self.ctx.worlds()[world_name].scene().root_id()] = p.loadURDF("plane.urdf")
node = self.ctx.worlds()[world_name].scene().nodes()[node_id]
if node_id not in self.bullet_node_id_map:
try:
label = node.name.replace("_"," ").replace("."," ").replace("-"," ").lower()
result = []
for word in label.split(" "):
try:
test = int(word)
except ValueError:
result.append(word)
first = True
for word in result:
if first is True:
label = word
first = False
else:
label += "_" + word
self.bullet_node_id_map[node_id] = p.loadURDF(label+".urdf", position, orientation)
rospy.loginfo("[%s::updateBulletNodeNodes] "+label+".urdf' loaded successfully", self.ctx.name())
p.changeDynamics(self.bullet_node_id_map[node_id], -1, frictionAnchor=1, rollingFriction=1.0, spinningFriction=1.0, lateralFriction=1.0)
self.simulated_node_ids.append(node_id)
if node_id not in self.node_action_state:
self.node_action_state[node_id] = PLACED
self.place_confidence[node_id] = 1.0
self.pick_confidence[node_id] = 0.1
self.release_confidence[node_id] = 0.1
except Exception as e:
self.bullet_node_id_map[node_id] = -1
rospy.logwarn("[%s::updateBulletNodeNodes] "+str(e))
if self.bullet_node_id_map[node_id] > 0:
p.resetBaseVelocity(self.bullet_node_id_map[node_id], linear, angular)
p.resetBasePositionAndOrientation(self.bullet_node_id_map[node_id], position, orientation)
else:
self.bullet_node_id_map[node_id] = -1
def aabb(self, node):
"""
Compute world aabb by transforming the corners of the aabb by the node pose
"""
for property in node.properties:
if property.name == "aabb":
aabb = property.data.split(",")
if len(aabb) == 3:
t = [node.position.pose.position.x, node.position.pose.position.y, node.position.pose.position.z]
q = [node.position.pose.orientation.x, node.position.pose.orientation.y, node.position.pose.orientation.z, node.position.pose.orientation.w]
trans = tf.translation_matrix(t)
rot = tf.quaternion_matrix(q)
transform = tf.concatenate_matrices(trans, rot)
v = []
v.append(tf.translation_from_matrix(np.dot(transform, tf.translation_matrix([ float(aabb[0])/2, float(aabb[1])/2, float(aabb[2])/2]))))
v.append(tf.translation_from_matrix(np.dot(transform, tf.translation_matrix([-float(aabb[0])/2, float(aabb[1])/2, float(aabb[2])/2]))))
v.append(tf.translation_from_matrix(np.dot(transform, tf.translation_matrix([ float(aabb[0])/2, -float(aabb[1])/2, float(aabb[2])/2]))))
v.append(tf.translation_from_matrix(np.dot(transform, tf.translation_matrix([-float(aabb[0])/2, -float(aabb[1])/2, float(aabb[2])/2]))))
v.append(tf.translation_from_matrix(np.dot(transform, tf.translation_matrix([ float(aabb[0])/2, float(aabb[1])/2, -float(aabb[2])/2]))))
v.append(tf.translation_from_matrix(np.dot(transform, tf.translation_matrix([-float(aabb[0])/2, float(aabb[1])/2, -float(aabb[2])/2]))))
v.append(tf.translation_from_matrix(np.dot(transform, tf.translation_matrix([ float(aabb[0])/2, -float(aabb[1])/2, -float(aabb[2])/2]))))
v.append(tf.translation_from_matrix(np.dot(transform, tf.translation_matrix([-float(aabb[0])/2, -float(aabb[1])/2, -float(aabb[2])/2]))))
bb_min = [1e10, 1e10, 1e10]
bb_max = [-1e10, -1e10, -1e10]
for vertex in v:
bb_min = np.minimum(bb_min, vertex)
bb_max = np.maximum(bb_max, vertex)
return bb_min, bb_max
raise RuntimeError("aabb not present")
def bb_footprint(self, bb):
"""
Copied from severin lemaignan underworlds client example :
see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py
"""
x1, y1, z1 = bb[0]
x2, y2, z2 = bb[1]
return (x1, y1), (x2, y2)
def overlap(self, rect1, rect2):
"""Overlapping rectangles overlap both horizontally & vertically
Coped from severin lemaignan underworlds client example :
see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py
"""
(l1, b1), (r1, t1) = rect1
(l2, b2), (r2, t2) = rect2
return self.range_overlap(l1, r1, l2, r2) and self.range_overlap(b1, t1, b2, t2)
def range_overlap(self, a_min, a_max, b_min, b_max):
"""Neither range is completely greater than the other
http://codereview.stackexchange.com/questions/31352/overlapping-rectangles
Modified from severin lemaignan underworlds client example :
see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py
"""
return (a_min <= b_max) and (b_min <= a_max)
def weakly_cont(self, rect1, rect2, prev=False):
"""Obj1 is weakly contained if the base of the object is surrounded
by Obj2
Modified from severin lemaignan underworlds client example :
see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py
"""
(l1, b1), (r1, t1) = rect1
(l2, b2), (r2, t2) = rect2
if prev is False:
return (l1 - 2*EPSILON >= l2) and (b1 - 2*EPSILON >= b2) and (r1 - 2*EPSILON <= r2) and (t1 - 2*EPSILON <= t2)
else:
return (l1 + 2*EPSILON >= l2) and (b1 + 2*EPSILON >= b2) and (r1 + 2*EPSILON <= r2) and (t1 + 2*EPSILON <= t2)
def isabove(self, bb1, bb2, prev=False):
"""
For obj 1 to be above obj 2:
- the bottom of its bounding box must be higher that
the top of obj 2's bounding box
- the bounding box footprint of both objects must overlap
Modified from severin lemaignan underworlds client example :
see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py
"""
bb1_min, _ = bb1
_, bb2_max = bb2
x1, y1, z1 = bb1_min
x2, y2, z2 = bb2_max
if z1 < z2 - 2 * EPSILON:
return False
return self.overlap(self.bb_footprint(bb1), self.bb_footprint(bb2))
def isin(self, bb1, bb2, prev=False):
""" Returns True if bb1 is in bb2.
To be 'in' bb1 is weakly contained by bb2 and the bottom of bb1 is lower
than the top of bb2 and higher than the bottom of bb2.
Modified from severin lemaignan underworlds client example :
see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py
"""
bb1_min, _ = bb1
bb2_min, bb2_max = bb2
x1, y1, z1 = bb1_min
x2, y2, z2 = bb2_max
x3, y3, z3 = bb2_min
if prev is False:
if z1 > z2 - 2*EPSILON:
return False
if z1 < z3 + EPSILON:
return False
else:
if z1 > z2 - EPSILON :
return False
if z1 < z3 :
return False
return self.weakly_cont(self.bb_footprint(bb1), self.bb_footprint(bb2), prev)
def isontop(self, bb1, bb2, prev=False):
"""
For obj 1 to be on top of obj 2:
- obj1 must be above obj 2
- the bottom of obj 1 must be close to the top of obj 2
Modified from severin lemaignan underworlds client example :
see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py
"""
bb1_min, _ = bb1
_, bb2_max = bb2
x1, y1, z1 = bb1_min
x2, y2, z2 = bb2_max
return z1 < z2 + 2 * EPSILON and self.isabove(bb1, bb2)
if __name__ == '__main__':
rospy.init_node("physics_filter")
physics_filter = PhysicsFilter()
rospy.spin()
| en | 0.644289 | #!/usr/bin/env python # -*- coding: UTF-8 -*- # 1cm # reasoning parameters # simulator parameters # self.nb_step_fall = int(self.fall_simulation_step / self.time_step) # init simulator #p.setPhysicsEngineParameter(contactBreakingThreshold=0.01) #t_prev = tf.compose_matrix(angles=tf.euler_from_quaternion(self.previous_orientation[node_id], axes='sxyz'), translate=self.previous_position[node_id]) #t_perceived = tf.compose_matrix(angles=tf.euler_from_quaternion(self.perceived_orientation[node_id], axes='sxyz'), translate=self.perceived_position[node_id]) #print "start reasoning" #print node.name + " is unstable after "+str(i)+"/"+str(self.nb_step)+" steps" #if not np.allclose(offset, [0, 0, 0], atol=0.1): # print len(self.simulated_node_ids) #print self.pick_confidence[node_id] Compute world aabb by transforming the corners of the aabb by the node pose Copied from severin lemaignan underworlds client example : see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py Overlapping rectangles overlap both horizontally & vertically Coped from severin lemaignan underworlds client example : see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py Neither range is completely greater than the other http://codereview.stackexchange.com/questions/31352/overlapping-rectangles Modified from severin lemaignan underworlds client example : see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py Obj1 is weakly contained if the base of the object is surrounded by Obj2 Modified from severin lemaignan underworlds client example : see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py For obj 1 to be above obj 2: - the bottom of its bounding box must be higher that the top of obj 2's bounding box - the bounding box footprint of both objects must overlap Modified from severin lemaignan underworlds client example : see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py Returns True if bb1 is in bb2. To be 'in' bb1 is weakly contained by bb2 and the bottom of bb1 is lower than the top of bb2 and higher than the bottom of bb2. Modified from severin lemaignan underworlds client example : see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py For obj 1 to be on top of obj 2: - obj1 must be above obj 2 - the bottom of obj 1 must be close to the top of obj 2 Modified from severin lemaignan underworlds client example : see : https://github.com/severin-lemaignan/underworlds/blob/master/clients/spatial_relations.py | 2.102189 | 2 |
graphwalker/cli.py | spotify/python-graphwalker | 66 | 6631566 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Spotify AB
"""Tool for testing based on finite state machine graphs.
Graphwalker reads FSMs specified by graphs, plans paths, calls model
methods by name from graph labels and reports progress and results.
While conceptually derived from the Graphwalker project[gw],
(implemented in java) this is a complete reimplementation from that
initial concept.
"""
import argparse
import time
from graphwalker import execution
from graphwalker import graph
from graphwalker import planning
from graphwalker import reporting
from graphwalker import halting
epilog = """
Plugins are generally referenced in the form of "mod.Klass:a,b,ka=va,kb=vb",
for a class Klass in the module mod, instantiated with arguments a and b and
the keyword arguments ka and kb.
""".strip()
class ListAction(argparse.Action):
"""Print list of plugins."""
def choose_thing(self, option):
if 'report' in option:
name, things = 'Reporters', reporting.reporters
elif 'plan' in option:
name, things = 'Planners', planning.planners
elif 'stop' in option or 'cond' in option:
name, things = 'Stop Conditions', halting.conditions
return name, things
def __call__(self, parser, ns, values, option, **kw):
name, things = self.choose_thing(option)
print '%s:' % name
print
for x in things:
print ' ' + x.__name__
print ' ' + x.__doc__.split('\n')[0]
print
ns.done = True
def arg_parser():
parser = argparse.ArgumentParser(epilog=epilog)
a = parser.add_argument
a('--suite-name', '--suite', dest='suite', nargs=1)
a('--test-name', '--test', dest='test', nargs=1)
a('--reporter', '--reporters', default=[],
dest='reporters', nargs=1, action='append', metavar='R')
a('--planner', '--planners',
dest='planners', nargs=1, action='append', metavar='P')
a('--stopcond', '--halt', '--halter', '--stop', '--until',
default='Coverage', dest='stop', metavar='C')
a('--debugger', dest='debugger', nargs=1, metavar='D')
a('--debug', action='store_true')
a('--list-reporters', action=ListAction, nargs=0)
a('--list-planners', action=ListAction, nargs=0)
a('--list-stopcond', '--list-halter', action=ListAction, nargs=0)
a('--dry-run', '-n', dest='done', action='store_true')
a('modact', nargs='+', metavar='Model or Actor',
help="Need at least one of each")
return parser
def load_model_actor(ns):
model = graph.Graph.read(ns.modact[0])
for n in ns.modact[1:-1]:
model = model.combine(graph.Graph.read(n))
try:
model = model.combine(graph.Graph.read(ns.modact[-1]))
actor = 'graphwalker.dummy.Mute'
except:
actor = ns.modact[-1]
return model, actor
def run_context(ns, model, plan, reporter, stop, executor, context, **kw):
stop.start(model, context)
path = plan(model, stop, 'Start', context)
reporter.start_suite(ns.suite)
executor.run(ns.test, path, context)
reporter.end_suite()
def build(ns):
reporter = reporting.build(sum(ns.reporters, []))
ns.planners = ns.planners or [['Random']]
plan = planning.build(sum(ns.planners, []))
stop = halting.build(ns.stop)
model, actor = load_model_actor(ns)
debugger = ns.debug and ns.debugger
exe = execution.Executor(actor, reporter, debugger)
context = {
'suite': ns.suite, 'test': ns.test, 'ns': ns,
'model': model, 'actor': actor, 'debugger': debugger, 'executor': exe,
'plan': plan, 'stop': stop, 'reporter': reporter,
}
context['context'] = context
return context
def name_test(ns):
ns.suite = ns.suite or 'graphwalker'
ns.test = ns.test or (ns.modact and (
ns.modact[0].rsplit('/', 1)[-1].split('.')[0] + '-') +
time.strftime('%Y%m%d%H%M%S'))
def main(argv):
parser = arg_parser()
ns = parser.parse_args(argv[1:])
if getattr(ns, 'done', False):
return 0
name_test(ns)
context = build(ns)
return run_context(**context)
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Spotify AB
"""Tool for testing based on finite state machine graphs.
Graphwalker reads FSMs specified by graphs, plans paths, calls model
methods by name from graph labels and reports progress and results.
While conceptually derived from the Graphwalker project[gw],
(implemented in java) this is a complete reimplementation from that
initial concept.
"""
import argparse
import time
from graphwalker import execution
from graphwalker import graph
from graphwalker import planning
from graphwalker import reporting
from graphwalker import halting
epilog = """
Plugins are generally referenced in the form of "mod.Klass:a,b,ka=va,kb=vb",
for a class Klass in the module mod, instantiated with arguments a and b and
the keyword arguments ka and kb.
""".strip()
class ListAction(argparse.Action):
"""Print list of plugins."""
def choose_thing(self, option):
if 'report' in option:
name, things = 'Reporters', reporting.reporters
elif 'plan' in option:
name, things = 'Planners', planning.planners
elif 'stop' in option or 'cond' in option:
name, things = 'Stop Conditions', halting.conditions
return name, things
def __call__(self, parser, ns, values, option, **kw):
name, things = self.choose_thing(option)
print '%s:' % name
print
for x in things:
print ' ' + x.__name__
print ' ' + x.__doc__.split('\n')[0]
print
ns.done = True
def arg_parser():
parser = argparse.ArgumentParser(epilog=epilog)
a = parser.add_argument
a('--suite-name', '--suite', dest='suite', nargs=1)
a('--test-name', '--test', dest='test', nargs=1)
a('--reporter', '--reporters', default=[],
dest='reporters', nargs=1, action='append', metavar='R')
a('--planner', '--planners',
dest='planners', nargs=1, action='append', metavar='P')
a('--stopcond', '--halt', '--halter', '--stop', '--until',
default='Coverage', dest='stop', metavar='C')
a('--debugger', dest='debugger', nargs=1, metavar='D')
a('--debug', action='store_true')
a('--list-reporters', action=ListAction, nargs=0)
a('--list-planners', action=ListAction, nargs=0)
a('--list-stopcond', '--list-halter', action=ListAction, nargs=0)
a('--dry-run', '-n', dest='done', action='store_true')
a('modact', nargs='+', metavar='Model or Actor',
help="Need at least one of each")
return parser
def load_model_actor(ns):
model = graph.Graph.read(ns.modact[0])
for n in ns.modact[1:-1]:
model = model.combine(graph.Graph.read(n))
try:
model = model.combine(graph.Graph.read(ns.modact[-1]))
actor = 'graphwalker.dummy.Mute'
except:
actor = ns.modact[-1]
return model, actor
def run_context(ns, model, plan, reporter, stop, executor, context, **kw):
stop.start(model, context)
path = plan(model, stop, 'Start', context)
reporter.start_suite(ns.suite)
executor.run(ns.test, path, context)
reporter.end_suite()
def build(ns):
reporter = reporting.build(sum(ns.reporters, []))
ns.planners = ns.planners or [['Random']]
plan = planning.build(sum(ns.planners, []))
stop = halting.build(ns.stop)
model, actor = load_model_actor(ns)
debugger = ns.debug and ns.debugger
exe = execution.Executor(actor, reporter, debugger)
context = {
'suite': ns.suite, 'test': ns.test, 'ns': ns,
'model': model, 'actor': actor, 'debugger': debugger, 'executor': exe,
'plan': plan, 'stop': stop, 'reporter': reporter,
}
context['context'] = context
return context
def name_test(ns):
ns.suite = ns.suite or 'graphwalker'
ns.test = ns.test or (ns.modact and (
ns.modact[0].rsplit('/', 1)[-1].split('.')[0] + '-') +
time.strftime('%Y%m%d%H%M%S'))
def main(argv):
parser = arg_parser()
ns = parser.parse_args(argv[1:])
if getattr(ns, 'done', False):
return 0
name_test(ns)
context = build(ns)
return run_context(**context)
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| en | 0.851868 | #!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2015 Spotify AB Tool for testing based on finite state machine graphs. Graphwalker reads FSMs specified by graphs, plans paths, calls model methods by name from graph labels and reports progress and results. While conceptually derived from the Graphwalker project[gw], (implemented in java) this is a complete reimplementation from that initial concept. Plugins are generally referenced in the form of "mod.Klass:a,b,ka=va,kb=vb", for a class Klass in the module mod, instantiated with arguments a and b and the keyword arguments ka and kb. Print list of plugins. | 2.730156 | 3 |
track2/icnet/model_icnet.py | omshinde/dfc2019 | 123 | 6631567 | # Minor modifications added by <NAME>, 2018
# - Original code adapted from https://github.com/aitorzip/Keras-ICNet
# - Keras implementation of ICNet: https://arxiv.org/abs/1704.08545
# - Added output summary of model with PNG illustration
# - Generalized for arbitrary numbers of image bands
# - Removed input image scaling since we're doing that outside the model
# - Replaced Conv2D with SeparableConv2D to reduce number of parameters
# from 6.74M to 3.82M for RGB inputs
# - Added dropout after activation layers - not sure if this helps now, but
# I'm hoping to use this to estimate uncertainty
# MIT License
# Copyright (c) 2018 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from keras.layers import Activation
from keras.layers import Lambda
from keras.layers import Conv2D, SeparableConv2D
from keras.layers import Add
from keras.layers import MaxPooling2D
from keras.layers import AveragePooling2D
from keras.layers import ZeroPadding2D
from keras.layers import Input
from keras.layers import BatchNormalization, Dropout
from keras.models import Model
import keras.backend as K
import tensorflow as tf
from keras.utils import plot_model
from keras.layers import UpSampling2D
def build_icnet(height, width, bands, n_classes, weights_path=None, train=False):
inp = Input(shape=(height, width, bands))
dropout = 0.2
# (1/2)
y = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1])//2, int(x.shape[2])//2)), name='data_sub2')(inp)
y = SeparableConv2D(32, 3, strides=2, padding='same', activation='relu', name='conv1_1_3x3_s2')(y)
y = BatchNormalization(name='conv1_1_3x3_s2_bn')(y)
y = SeparableConv2D(32, 3, padding='same', activation='relu', name='conv1_2_3x3')(y)
y = BatchNormalization(name='conv1_2_3x3_s2_bn')(y)
y = SeparableConv2D(64, 3, padding='same', activation='relu', name='conv1_3_3x3')(y)
y = BatchNormalization(name='conv1_3_3x3_bn')(y)
y_ = MaxPooling2D(pool_size=3, strides=2, name='pool1_3x3_s2')(y)
y = SeparableConv2D(128, 1, name='conv2_1_1x1_proj')(y_)
y = BatchNormalization(name='conv2_1_1x1_proj_bn')(y)
y_ = SeparableConv2D(32, 1, activation='relu', name='conv2_1_1x1_reduce')(y_)
y_ = BatchNormalization(name='conv2_1_1x1_reduce_bn')(y_)
y_ = ZeroPadding2D(name='padding1')(y_)
y_ = SeparableConv2D(32, 3, activation='relu', name='conv2_1_3x3')(y_)
y_ = BatchNormalization(name='conv2_1_3x3_bn')(y_)
y_ = SeparableConv2D(128, 1, name='conv2_1_1x1_increase')(y_)
y_ = BatchNormalization(name='conv2_1_1x1_increase_bn')(y_)
y = Add(name='conv2_1')([y,y_])
y_ = Activation('relu', name='conv2_1/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(32, 1, activation='relu', name='conv2_2_1x1_reduce')(y_)
y = BatchNormalization(name='conv2_2_1x1_reduce_bn')(y)
y = ZeroPadding2D(name='padding2')(y)
y = SeparableConv2D(32, 3, activation='relu', name='conv2_2_3x3')(y)
y = BatchNormalization(name='conv2_2_3x3_bn')(y)
y = SeparableConv2D(128, 1, name='conv2_2_1x1_increase')(y)
y = BatchNormalization(name='conv2_2_1x1_increase_bn')(y)
y = Add(name='conv2_2')([y,y_])
y_ = Activation('relu', name='conv2_2/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(32, 1, activation='relu', name='conv2_3_1x1_reduce')(y_)
y = BatchNormalization(name='conv2_3_1x1_reduce_bn')(y)
y = ZeroPadding2D(name='padding3')(y)
y = SeparableConv2D(32, 3, activation='relu', name='conv2_3_3x3')(y)
y = BatchNormalization(name='conv2_3_3x3_bn')(y)
y = SeparableConv2D(128, 1, name='conv2_3_1x1_increase')(y)
y = BatchNormalization(name='conv2_3_1x1_increase_bn')(y)
y = Add(name='conv2_3')([y,y_])
y_ = Activation('relu', name='conv2_3/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(256, 1, strides=2, name='conv3_1_1x1_proj')(y_)
y = BatchNormalization(name='conv3_1_1x1_proj_bn')(y)
y_ = SeparableConv2D(64, 1, strides=2, activation='relu', name='conv3_1_1x1_reduce')(y_)
y_ = BatchNormalization(name='conv3_1_1x1_reduce_bn')(y_)
y_ = ZeroPadding2D(name='padding4')(y_)
y_ = SeparableConv2D(64, 3, activation='relu', name='conv3_1_3x3')(y_)
y_ = BatchNormalization(name='conv3_1_3x3_bn')(y_)
y_ = SeparableConv2D(256, 1, name='conv3_1_1x1_increase')(y_)
y_ = BatchNormalization(name='conv3_1_1x1_increase_bn')(y_)
y = Add(name='conv3_1')([y,y_])
z = Activation('relu', name='conv3_1/relu')(y)
z = Dropout(dropout)(z)
# (1/4)
y_ = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1])//2, int(x.shape[2])//2)), name='conv3_1_sub4')(z)
y = SeparableConv2D(64, 1, activation='relu', name='conv3_2_1x1_reduce')(y_)
y = BatchNormalization(name='conv3_2_1x1_reduce_bn')(y)
y = ZeroPadding2D(name='padding5')(y)
y = SeparableConv2D(64, 3, activation='relu', name='conv3_2_3x3')(y)
y = BatchNormalization(name='conv3_2_3x3_bn')(y)
y = SeparableConv2D(256, 1, name='conv3_2_1x1_increase')(y)
y = BatchNormalization(name='conv3_2_1x1_increase_bn')(y)
y = Add(name='conv3_2')([y,y_])
y_ = Activation('relu', name='conv3_2/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(64, 1, activation='relu', name='conv3_3_1x1_reduce')(y_)
y = BatchNormalization(name='conv3_3_1x1_reduce_bn')(y)
y = ZeroPadding2D(name='padding6')(y)
y = SeparableConv2D(64, 3, activation='relu', name='conv3_3_3x3')(y)
y = BatchNormalization(name='conv3_3_3x3_bn')(y)
y = SeparableConv2D(256, 1, name='conv3_3_1x1_increase')(y)
y = BatchNormalization(name='conv3_3_1x1_increase_bn')(y)
y = Add(name='conv3_3')([y,y_])
y_ = Activation('relu', name='conv3_3/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(64, 1, activation='relu', name='conv3_4_1x1_reduce')(y_)
y = BatchNormalization(name='conv3_4_1x1_reduce_bn')(y)
y = ZeroPadding2D(name='padding7')(y)
y = SeparableConv2D(64, 3, activation='relu', name='conv3_4_3x3')(y)
y = BatchNormalization(name='conv3_4_3x3_bn')(y)
y = SeparableConv2D(256, 1, name='conv3_4_1x1_increase')(y)
y = BatchNormalization(name='conv3_4_1x1_increase_bn')(y)
y = Add(name='conv3_4')([y,y_])
y_ = Activation('relu', name='conv3_4/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(512, 1, name='conv4_1_1x1_proj')(y_)
y = BatchNormalization(name='conv4_1_1x1_proj_bn')(y)
y_ = SeparableConv2D(128, 1, activation='relu', name='conv4_1_1x1_reduce')(y_)
y_ = BatchNormalization(name='conv4_1_1x1_reduce_bn')(y_)
y_ = ZeroPadding2D(padding=2, name='padding8')(y_)
y_ = SeparableConv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_1_3x3')(y_)
y_ = BatchNormalization(name='conv4_1_3x3_bn')(y_)
y_ = SeparableConv2D(512, 1, name='conv4_1_1x1_increase')(y_)
y_ = BatchNormalization(name='conv4_1_1x1_increase_bn')(y_)
y = Add(name='conv4_1')([y,y_])
y_ = Activation('relu', name='conv4_1/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(128, 1, activation='relu', name='conv4_2_1x1_reduce')(y_)
y = BatchNormalization(name='conv4_2_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=2, name='padding9')(y)
y = SeparableConv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_2_3x3')(y)
y = BatchNormalization(name='conv4_2_3x3_bn')(y)
y = SeparableConv2D(512, 1, name='conv4_2_1x1_increase')(y)
y = BatchNormalization(name='conv4_2_1x1_increase_bn')(y)
y = Add(name='conv4_2')([y,y_])
y_ = Activation('relu', name='conv4_2/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(128, 1, activation='relu', name='conv4_3_1x1_reduce')(y_)
y = BatchNormalization(name='conv4_3_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=2, name='padding10')(y)
y = SeparableConv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_3_3x3')(y)
y = BatchNormalization(name='conv4_3_3x3_bn')(y)
y = SeparableConv2D(512, 1, name='conv4_3_1x1_increase')(y)
y = BatchNormalization(name='conv4_3_1x1_increase_bn')(y)
y = Add(name='conv4_3')([y,y_])
y_ = Activation('relu', name='conv4_3/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(128, 1, activation='relu', name='conv4_4_1x1_reduce')(y_)
y = BatchNormalization(name='conv4_4_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=2, name='padding11')(y)
y = SeparableConv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_4_3x3')(y)
y = BatchNormalization(name='conv4_4_3x3_bn')(y)
y = SeparableConv2D(512, 1, name='conv4_4_1x1_increase')(y)
y = BatchNormalization(name='conv4_4_1x1_increase_bn')(y)
y = Add(name='conv4_4')([y,y_])
y_ = Activation('relu', name='conv4_4/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(128, 1, activation='relu', name='conv4_5_1x1_reduce')(y_)
y = BatchNormalization(name='conv4_5_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=2, name='padding12')(y)
y = SeparableConv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_5_3x3')(y)
y = BatchNormalization(name='conv4_5_3x3_bn')(y)
y = SeparableConv2D(512, 1, name='conv4_5_1x1_increase')(y)
y = BatchNormalization(name='conv4_5_1x1_increase_bn')(y)
y = Add(name='conv4_5')([y,y_])
y_ = Activation('relu', name='conv4_5/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(128, 1, activation='relu', name='conv4_6_1x1_reduce')(y_)
y = BatchNormalization(name='conv4_6_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=2, name='padding13')(y)
y = SeparableConv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_6_3x3')(y)
y = BatchNormalization(name='conv4_6_3x3_bn')(y)
y = SeparableConv2D(512, 1, name='conv4_6_1x1_increase')(y)
y = BatchNormalization(name='conv4_6_1x1_increase_bn')(y)
y = Add(name='conv4_6')([y,y_])
y = Activation('relu', name='conv4_6/relu')(y)
y = Dropout(dropout)(y)
y_ = SeparableConv2D(1024, 1, name='conv5_1_1x1_proj')(y)
y_ = BatchNormalization(name='conv5_1_1x1_proj_bn')(y_)
y = SeparableConv2D(256, 1, activation='relu', name='conv5_1_1x1_reduce')(y)
y = BatchNormalization(name='conv5_1_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=4, name='padding14')(y)
y = SeparableConv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_1_3x3')(y)
y = BatchNormalization(name='conv5_1_3x3_bn')(y)
y = SeparableConv2D(1024, 1, name='conv5_1_1x1_increase')(y)
y = BatchNormalization(name='conv5_1_1x1_increase_bn')(y)
y = Add(name='conv5_1')([y,y_])
y_ = Activation('relu', name='conv5_1/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(256, 1, activation='relu', name='conv5_2_1x1_reduce')(y_)
y = BatchNormalization(name='conv5_2_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=4, name='padding15')(y)
y = SeparableConv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_2_3x3')(y)
y = BatchNormalization(name='conv5_2_3x3_bn')(y)
y = SeparableConv2D(1024, 1, name='conv5_2_1x1_increase')(y)
y = BatchNormalization(name='conv5_2_1x1_increase_bn')(y)
y = Add(name='conv5_2')([y,y_])
y_ = Activation('relu', name='conv5_2/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(256, 1, activation='relu', name='conv5_3_1x1_reduce')(y_)
y = BatchNormalization(name='conv5_3_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=4, name='padding16')(y)
y = SeparableConv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_3_3x3')(y)
y = BatchNormalization(name='conv5_3_3x3_bn')(y)
y = SeparableConv2D(1024, 1, name='conv5_3_1x1_increase')(y)
y = BatchNormalization(name='conv5_3_1x1_increase_bn')(y)
y = Add(name='conv5_3')([y,y_])
y = Activation('relu', name='conv5_3/relu')(y)
y = Dropout(dropout)(y)
h, w = y.shape[1:3].as_list()
pool1 = AveragePooling2D(pool_size=(h,w), strides=(h,w), name='conv5_3_pool1')(y)
pool1 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(h,w)), name='conv5_3_pool1_interp')(pool1)
pool2 = AveragePooling2D(pool_size=(h/2,w/2), strides=(h//2,w//2), name='conv5_3_pool2')(y)
pool2 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(h,w)), name='conv5_3_pool2_interp')(pool2)
pool3 = AveragePooling2D(pool_size=(h/3,w/3), strides=(h//3,w//3), name='conv5_3_pool3')(y)
pool3 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(h,w)), name='conv5_3_pool3_interp')(pool3)
pool6 = AveragePooling2D(pool_size=(h/4,w/4), strides=(h//4,w//4), name='conv5_3_pool6')(y)
pool6 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(h,w)), name='conv5_3_pool6_interp')(pool6)
y = Add(name='conv5_3_sum')([y, pool1, pool2, pool3, pool6])
y = SeparableConv2D(256, 1, activation='relu', name='conv5_4_k1')(y)
y = BatchNormalization(name='conv5_4_k1_bn')(y)
aux_1 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1])*2, int(x.shape[2])*2)), name='conv5_4_interp')(y)
y = ZeroPadding2D(padding=2, name='padding17')(aux_1)
y = SeparableConv2D(128, 3, dilation_rate=2, name='conv_sub4')(y)
y = BatchNormalization(name='conv_sub4_bn')(y)
y_ = SeparableConv2D(128, 1, name='conv3_1_sub2_proj')(z)
y_ = BatchNormalization(name='conv3_1_sub2_proj_bn')(y_)
y = Add(name='sub24_sum')([y,y_])
y = Activation('relu', name='sub24_sum/relu')(y)
aux_2 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1])*2, int(x.shape[2])*2)), name='sub24_sum_interp')(y)
y = ZeroPadding2D(padding=2, name='padding18')(aux_2)
y_ = SeparableConv2D(128, 3, dilation_rate=2, name='conv_sub2')(y)
y_ = BatchNormalization(name='conv_sub2_bn')(y_)
# (1)
y = SeparableConv2D(32, 3, strides=2, padding='same', activation='relu', name='conv1_sub1')(inp)
y = BatchNormalization(name='conv1_sub1_bn')(y)
y = SeparableConv2D(32, 3, strides=2, padding='same', activation='relu', name='conv2_sub1')(y)
y = BatchNormalization(name='conv2_sub1_bn')(y)
y = SeparableConv2D(64, 3, strides=2, padding='same', activation='relu', name='conv3_sub1')(y)
y = BatchNormalization(name='conv3_sub1_bn')(y)
y = SeparableConv2D(128, 1, name='conv3_sub1_proj')(y)
y = BatchNormalization(name='conv3_sub1_proj_bn')(y)
y = Add(name='sub12_sum')([y,y_])
y = Activation('relu', name='sub12_sum/relu')(y)
y = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1])*2, int(x.shape[2])*2)), name='sub12_sum_interp')(y)
out = SeparableConv2D(n_classes, 1, activation='softmax', name='conv6_cls')(y)
if train:
# if training, incorporate cascade label guidance
aux_1 = SeparableConv2D(n_classes, 1, activation='softmax', name='sub4_out')(aux_1)
aux_2 = SeparableConv2D(n_classes, 1, activation='softmax', name='sub24_out')(aux_2)
model = Model(inputs=inp, outputs=[out, aux_2, aux_1])
print("ICNet Model:")
model.summary()
# plot_model(model, to_file='icnet.png', show_shapes=True)
else:
out = UpSampling2D(4)(out)
model = Model(inputs=inp, outputs=out)
if weights_path is not None:
model.load_weights(weights_path, by_name=True)
return model
| # Minor modifications added by <NAME>, 2018
# - Original code adapted from https://github.com/aitorzip/Keras-ICNet
# - Keras implementation of ICNet: https://arxiv.org/abs/1704.08545
# - Added output summary of model with PNG illustration
# - Generalized for arbitrary numbers of image bands
# - Removed input image scaling since we're doing that outside the model
# - Replaced Conv2D with SeparableConv2D to reduce number of parameters
# from 6.74M to 3.82M for RGB inputs
# - Added dropout after activation layers - not sure if this helps now, but
# I'm hoping to use this to estimate uncertainty
# MIT License
# Copyright (c) 2018 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from keras.layers import Activation
from keras.layers import Lambda
from keras.layers import Conv2D, SeparableConv2D
from keras.layers import Add
from keras.layers import MaxPooling2D
from keras.layers import AveragePooling2D
from keras.layers import ZeroPadding2D
from keras.layers import Input
from keras.layers import BatchNormalization, Dropout
from keras.models import Model
import keras.backend as K
import tensorflow as tf
from keras.utils import plot_model
from keras.layers import UpSampling2D
def build_icnet(height, width, bands, n_classes, weights_path=None, train=False):
inp = Input(shape=(height, width, bands))
dropout = 0.2
# (1/2)
y = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1])//2, int(x.shape[2])//2)), name='data_sub2')(inp)
y = SeparableConv2D(32, 3, strides=2, padding='same', activation='relu', name='conv1_1_3x3_s2')(y)
y = BatchNormalization(name='conv1_1_3x3_s2_bn')(y)
y = SeparableConv2D(32, 3, padding='same', activation='relu', name='conv1_2_3x3')(y)
y = BatchNormalization(name='conv1_2_3x3_s2_bn')(y)
y = SeparableConv2D(64, 3, padding='same', activation='relu', name='conv1_3_3x3')(y)
y = BatchNormalization(name='conv1_3_3x3_bn')(y)
y_ = MaxPooling2D(pool_size=3, strides=2, name='pool1_3x3_s2')(y)
y = SeparableConv2D(128, 1, name='conv2_1_1x1_proj')(y_)
y = BatchNormalization(name='conv2_1_1x1_proj_bn')(y)
y_ = SeparableConv2D(32, 1, activation='relu', name='conv2_1_1x1_reduce')(y_)
y_ = BatchNormalization(name='conv2_1_1x1_reduce_bn')(y_)
y_ = ZeroPadding2D(name='padding1')(y_)
y_ = SeparableConv2D(32, 3, activation='relu', name='conv2_1_3x3')(y_)
y_ = BatchNormalization(name='conv2_1_3x3_bn')(y_)
y_ = SeparableConv2D(128, 1, name='conv2_1_1x1_increase')(y_)
y_ = BatchNormalization(name='conv2_1_1x1_increase_bn')(y_)
y = Add(name='conv2_1')([y,y_])
y_ = Activation('relu', name='conv2_1/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(32, 1, activation='relu', name='conv2_2_1x1_reduce')(y_)
y = BatchNormalization(name='conv2_2_1x1_reduce_bn')(y)
y = ZeroPadding2D(name='padding2')(y)
y = SeparableConv2D(32, 3, activation='relu', name='conv2_2_3x3')(y)
y = BatchNormalization(name='conv2_2_3x3_bn')(y)
y = SeparableConv2D(128, 1, name='conv2_2_1x1_increase')(y)
y = BatchNormalization(name='conv2_2_1x1_increase_bn')(y)
y = Add(name='conv2_2')([y,y_])
y_ = Activation('relu', name='conv2_2/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(32, 1, activation='relu', name='conv2_3_1x1_reduce')(y_)
y = BatchNormalization(name='conv2_3_1x1_reduce_bn')(y)
y = ZeroPadding2D(name='padding3')(y)
y = SeparableConv2D(32, 3, activation='relu', name='conv2_3_3x3')(y)
y = BatchNormalization(name='conv2_3_3x3_bn')(y)
y = SeparableConv2D(128, 1, name='conv2_3_1x1_increase')(y)
y = BatchNormalization(name='conv2_3_1x1_increase_bn')(y)
y = Add(name='conv2_3')([y,y_])
y_ = Activation('relu', name='conv2_3/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(256, 1, strides=2, name='conv3_1_1x1_proj')(y_)
y = BatchNormalization(name='conv3_1_1x1_proj_bn')(y)
y_ = SeparableConv2D(64, 1, strides=2, activation='relu', name='conv3_1_1x1_reduce')(y_)
y_ = BatchNormalization(name='conv3_1_1x1_reduce_bn')(y_)
y_ = ZeroPadding2D(name='padding4')(y_)
y_ = SeparableConv2D(64, 3, activation='relu', name='conv3_1_3x3')(y_)
y_ = BatchNormalization(name='conv3_1_3x3_bn')(y_)
y_ = SeparableConv2D(256, 1, name='conv3_1_1x1_increase')(y_)
y_ = BatchNormalization(name='conv3_1_1x1_increase_bn')(y_)
y = Add(name='conv3_1')([y,y_])
z = Activation('relu', name='conv3_1/relu')(y)
z = Dropout(dropout)(z)
# (1/4)
y_ = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1])//2, int(x.shape[2])//2)), name='conv3_1_sub4')(z)
y = SeparableConv2D(64, 1, activation='relu', name='conv3_2_1x1_reduce')(y_)
y = BatchNormalization(name='conv3_2_1x1_reduce_bn')(y)
y = ZeroPadding2D(name='padding5')(y)
y = SeparableConv2D(64, 3, activation='relu', name='conv3_2_3x3')(y)
y = BatchNormalization(name='conv3_2_3x3_bn')(y)
y = SeparableConv2D(256, 1, name='conv3_2_1x1_increase')(y)
y = BatchNormalization(name='conv3_2_1x1_increase_bn')(y)
y = Add(name='conv3_2')([y,y_])
y_ = Activation('relu', name='conv3_2/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(64, 1, activation='relu', name='conv3_3_1x1_reduce')(y_)
y = BatchNormalization(name='conv3_3_1x1_reduce_bn')(y)
y = ZeroPadding2D(name='padding6')(y)
y = SeparableConv2D(64, 3, activation='relu', name='conv3_3_3x3')(y)
y = BatchNormalization(name='conv3_3_3x3_bn')(y)
y = SeparableConv2D(256, 1, name='conv3_3_1x1_increase')(y)
y = BatchNormalization(name='conv3_3_1x1_increase_bn')(y)
y = Add(name='conv3_3')([y,y_])
y_ = Activation('relu', name='conv3_3/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(64, 1, activation='relu', name='conv3_4_1x1_reduce')(y_)
y = BatchNormalization(name='conv3_4_1x1_reduce_bn')(y)
y = ZeroPadding2D(name='padding7')(y)
y = SeparableConv2D(64, 3, activation='relu', name='conv3_4_3x3')(y)
y = BatchNormalization(name='conv3_4_3x3_bn')(y)
y = SeparableConv2D(256, 1, name='conv3_4_1x1_increase')(y)
y = BatchNormalization(name='conv3_4_1x1_increase_bn')(y)
y = Add(name='conv3_4')([y,y_])
y_ = Activation('relu', name='conv3_4/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(512, 1, name='conv4_1_1x1_proj')(y_)
y = BatchNormalization(name='conv4_1_1x1_proj_bn')(y)
y_ = SeparableConv2D(128, 1, activation='relu', name='conv4_1_1x1_reduce')(y_)
y_ = BatchNormalization(name='conv4_1_1x1_reduce_bn')(y_)
y_ = ZeroPadding2D(padding=2, name='padding8')(y_)
y_ = SeparableConv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_1_3x3')(y_)
y_ = BatchNormalization(name='conv4_1_3x3_bn')(y_)
y_ = SeparableConv2D(512, 1, name='conv4_1_1x1_increase')(y_)
y_ = BatchNormalization(name='conv4_1_1x1_increase_bn')(y_)
y = Add(name='conv4_1')([y,y_])
y_ = Activation('relu', name='conv4_1/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(128, 1, activation='relu', name='conv4_2_1x1_reduce')(y_)
y = BatchNormalization(name='conv4_2_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=2, name='padding9')(y)
y = SeparableConv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_2_3x3')(y)
y = BatchNormalization(name='conv4_2_3x3_bn')(y)
y = SeparableConv2D(512, 1, name='conv4_2_1x1_increase')(y)
y = BatchNormalization(name='conv4_2_1x1_increase_bn')(y)
y = Add(name='conv4_2')([y,y_])
y_ = Activation('relu', name='conv4_2/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(128, 1, activation='relu', name='conv4_3_1x1_reduce')(y_)
y = BatchNormalization(name='conv4_3_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=2, name='padding10')(y)
y = SeparableConv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_3_3x3')(y)
y = BatchNormalization(name='conv4_3_3x3_bn')(y)
y = SeparableConv2D(512, 1, name='conv4_3_1x1_increase')(y)
y = BatchNormalization(name='conv4_3_1x1_increase_bn')(y)
y = Add(name='conv4_3')([y,y_])
y_ = Activation('relu', name='conv4_3/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(128, 1, activation='relu', name='conv4_4_1x1_reduce')(y_)
y = BatchNormalization(name='conv4_4_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=2, name='padding11')(y)
y = SeparableConv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_4_3x3')(y)
y = BatchNormalization(name='conv4_4_3x3_bn')(y)
y = SeparableConv2D(512, 1, name='conv4_4_1x1_increase')(y)
y = BatchNormalization(name='conv4_4_1x1_increase_bn')(y)
y = Add(name='conv4_4')([y,y_])
y_ = Activation('relu', name='conv4_4/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(128, 1, activation='relu', name='conv4_5_1x1_reduce')(y_)
y = BatchNormalization(name='conv4_5_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=2, name='padding12')(y)
y = SeparableConv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_5_3x3')(y)
y = BatchNormalization(name='conv4_5_3x3_bn')(y)
y = SeparableConv2D(512, 1, name='conv4_5_1x1_increase')(y)
y = BatchNormalization(name='conv4_5_1x1_increase_bn')(y)
y = Add(name='conv4_5')([y,y_])
y_ = Activation('relu', name='conv4_5/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(128, 1, activation='relu', name='conv4_6_1x1_reduce')(y_)
y = BatchNormalization(name='conv4_6_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=2, name='padding13')(y)
y = SeparableConv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_6_3x3')(y)
y = BatchNormalization(name='conv4_6_3x3_bn')(y)
y = SeparableConv2D(512, 1, name='conv4_6_1x1_increase')(y)
y = BatchNormalization(name='conv4_6_1x1_increase_bn')(y)
y = Add(name='conv4_6')([y,y_])
y = Activation('relu', name='conv4_6/relu')(y)
y = Dropout(dropout)(y)
y_ = SeparableConv2D(1024, 1, name='conv5_1_1x1_proj')(y)
y_ = BatchNormalization(name='conv5_1_1x1_proj_bn')(y_)
y = SeparableConv2D(256, 1, activation='relu', name='conv5_1_1x1_reduce')(y)
y = BatchNormalization(name='conv5_1_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=4, name='padding14')(y)
y = SeparableConv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_1_3x3')(y)
y = BatchNormalization(name='conv5_1_3x3_bn')(y)
y = SeparableConv2D(1024, 1, name='conv5_1_1x1_increase')(y)
y = BatchNormalization(name='conv5_1_1x1_increase_bn')(y)
y = Add(name='conv5_1')([y,y_])
y_ = Activation('relu', name='conv5_1/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(256, 1, activation='relu', name='conv5_2_1x1_reduce')(y_)
y = BatchNormalization(name='conv5_2_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=4, name='padding15')(y)
y = SeparableConv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_2_3x3')(y)
y = BatchNormalization(name='conv5_2_3x3_bn')(y)
y = SeparableConv2D(1024, 1, name='conv5_2_1x1_increase')(y)
y = BatchNormalization(name='conv5_2_1x1_increase_bn')(y)
y = Add(name='conv5_2')([y,y_])
y_ = Activation('relu', name='conv5_2/relu')(y)
y_ = Dropout(dropout)(y_)
y = SeparableConv2D(256, 1, activation='relu', name='conv5_3_1x1_reduce')(y_)
y = BatchNormalization(name='conv5_3_1x1_reduce_bn')(y)
y = ZeroPadding2D(padding=4, name='padding16')(y)
y = SeparableConv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_3_3x3')(y)
y = BatchNormalization(name='conv5_3_3x3_bn')(y)
y = SeparableConv2D(1024, 1, name='conv5_3_1x1_increase')(y)
y = BatchNormalization(name='conv5_3_1x1_increase_bn')(y)
y = Add(name='conv5_3')([y,y_])
y = Activation('relu', name='conv5_3/relu')(y)
y = Dropout(dropout)(y)
h, w = y.shape[1:3].as_list()
pool1 = AveragePooling2D(pool_size=(h,w), strides=(h,w), name='conv5_3_pool1')(y)
pool1 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(h,w)), name='conv5_3_pool1_interp')(pool1)
pool2 = AveragePooling2D(pool_size=(h/2,w/2), strides=(h//2,w//2), name='conv5_3_pool2')(y)
pool2 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(h,w)), name='conv5_3_pool2_interp')(pool2)
pool3 = AveragePooling2D(pool_size=(h/3,w/3), strides=(h//3,w//3), name='conv5_3_pool3')(y)
pool3 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(h,w)), name='conv5_3_pool3_interp')(pool3)
pool6 = AveragePooling2D(pool_size=(h/4,w/4), strides=(h//4,w//4), name='conv5_3_pool6')(y)
pool6 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(h,w)), name='conv5_3_pool6_interp')(pool6)
y = Add(name='conv5_3_sum')([y, pool1, pool2, pool3, pool6])
y = SeparableConv2D(256, 1, activation='relu', name='conv5_4_k1')(y)
y = BatchNormalization(name='conv5_4_k1_bn')(y)
aux_1 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1])*2, int(x.shape[2])*2)), name='conv5_4_interp')(y)
y = ZeroPadding2D(padding=2, name='padding17')(aux_1)
y = SeparableConv2D(128, 3, dilation_rate=2, name='conv_sub4')(y)
y = BatchNormalization(name='conv_sub4_bn')(y)
y_ = SeparableConv2D(128, 1, name='conv3_1_sub2_proj')(z)
y_ = BatchNormalization(name='conv3_1_sub2_proj_bn')(y_)
y = Add(name='sub24_sum')([y,y_])
y = Activation('relu', name='sub24_sum/relu')(y)
aux_2 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1])*2, int(x.shape[2])*2)), name='sub24_sum_interp')(y)
y = ZeroPadding2D(padding=2, name='padding18')(aux_2)
y_ = SeparableConv2D(128, 3, dilation_rate=2, name='conv_sub2')(y)
y_ = BatchNormalization(name='conv_sub2_bn')(y_)
# (1)
y = SeparableConv2D(32, 3, strides=2, padding='same', activation='relu', name='conv1_sub1')(inp)
y = BatchNormalization(name='conv1_sub1_bn')(y)
y = SeparableConv2D(32, 3, strides=2, padding='same', activation='relu', name='conv2_sub1')(y)
y = BatchNormalization(name='conv2_sub1_bn')(y)
y = SeparableConv2D(64, 3, strides=2, padding='same', activation='relu', name='conv3_sub1')(y)
y = BatchNormalization(name='conv3_sub1_bn')(y)
y = SeparableConv2D(128, 1, name='conv3_sub1_proj')(y)
y = BatchNormalization(name='conv3_sub1_proj_bn')(y)
y = Add(name='sub12_sum')([y,y_])
y = Activation('relu', name='sub12_sum/relu')(y)
y = Lambda(lambda x: tf.image.resize_bilinear(x, size=(int(x.shape[1])*2, int(x.shape[2])*2)), name='sub12_sum_interp')(y)
out = SeparableConv2D(n_classes, 1, activation='softmax', name='conv6_cls')(y)
if train:
# if training, incorporate cascade label guidance
aux_1 = SeparableConv2D(n_classes, 1, activation='softmax', name='sub4_out')(aux_1)
aux_2 = SeparableConv2D(n_classes, 1, activation='softmax', name='sub24_out')(aux_2)
model = Model(inputs=inp, outputs=[out, aux_2, aux_1])
print("ICNet Model:")
model.summary()
# plot_model(model, to_file='icnet.png', show_shapes=True)
else:
out = UpSampling2D(4)(out)
model = Model(inputs=inp, outputs=out)
if weights_path is not None:
model.load_weights(weights_path, by_name=True)
return model
| en | 0.778989 | # Minor modifications added by <NAME>, 2018 # - Original code adapted from https://github.com/aitorzip/Keras-ICNet # - Keras implementation of ICNet: https://arxiv.org/abs/1704.08545 # - Added output summary of model with PNG illustration # - Generalized for arbitrary numbers of image bands # - Removed input image scaling since we're doing that outside the model # - Replaced Conv2D with SeparableConv2D to reduce number of parameters # from 6.74M to 3.82M for RGB inputs # - Added dropout after activation layers - not sure if this helps now, but # I'm hoping to use this to estimate uncertainty # MIT License # Copyright (c) 2018 <NAME> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # (1/2) # (1/4) # (1) # if training, incorporate cascade label guidance # plot_model(model, to_file='icnet.png', show_shapes=True) | 2.151869 | 2 |
enaml/widgets/constraints_widget.py | ContinuumIO/enaml | 2 | 6631568 | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import (
DefaultValue, Enum, Typed, List, Constant, ForwardTyped, observe
)
from casuarius import ConstraintVariable
from enaml.core.declarative import d_
from enaml.layout.ab_constrainable import ABConstrainable
from enaml.layout.layout_helpers import expand_constraints
from .widget import Widget, ProxyWidget
#: An atom enum which defines the allowable constraints strengths.
#: Clones will be made by selecting a new default via 'select'.
PolicyEnum = Enum('ignore', 'weak', 'medium', 'strong', 'required')
class ConstraintMember(Constant):
""" A custom Member class that generates a ConstraintVariable.
"""
__slots__ = ()
def __init__(self):
super(ConstraintMember, self).__init__()
mode = DefaultValue.MemberMethod_Object
self.set_default_value_mode(mode, "default")
def default(self, owner):
""" Create the constraint variable for the member.
"""
return ConstraintVariable(self.name)
class ProxyConstraintsWidget(ProxyWidget):
""" The abstract definition of a proxy ConstraintsWidget object.
"""
#: A reference to the ConstraintsWidget declaration.
declaration = ForwardTyped(lambda: ConstraintsWidget)
def request_relayout(self):
raise NotImplementedError
class ConstraintsWidget(Widget):
""" A Widget subclass which adds constraint information.
A ConstraintsWidget is augmented with symbolic constraint variables
which define a box model on the widget. This box model is used to
declare constraints between this widget and other components which
participate in constraints-based layout.
Constraints are added to a widget by assigning a list to the
'constraints' attribute. This list may contain raw LinearConstraint
objects (which are created by manipulating the symbolic constraint
variables) or DeferredConstraints objects which generated these
LinearConstraint objects on-the-fly.
"""
#: The list of user-specified constraints or constraint-generating
#: objects for this component.
constraints = d_(List())
#: A constant symbolic object that represents the left boundary of
#: the widget.
left = ConstraintMember()
#: A constant symbolic object that represents the top boundary of
#: the widget.
top = ConstraintMember()
#: A constant symbolic object that represents the width of the
#: widget.
width = ConstraintMember()
#: A constant symbolic object that represents the height of the
#: widget.
height = ConstraintMember()
#: A constant symbolic object that represents the right boundary
#: of the component. This is computed as left + width.
right = Constant()
def _default_right(self):
return self.left + self.width
#: A constant symbolic object that represents the bottom boundary
#: of the component. This is computed as top + height.
bottom = Constant()
def _default_bottom(self):
return self.top + self.height
#: A constant symbolic object that represents the vertical center
#: of the width. This is computed as top + 0.5 * height.
v_center = Constant()
def _default_v_center(self):
return self.top + self.height / 2.0
#: A constant symbolic object that represents the horizontal center
#: of the widget. This is computed as left + 0.5 * width.
h_center = Constant()
def _default_h_center(self):
return self.left + self.width / 2.0
#: How strongly a component hugs it's width hint. Valid strengths
#: are 'weak', 'medium', 'strong', 'required' and 'ignore'. Default
#: is 'strong'. This can be overridden on a per-control basis to
#: specify a logical default for the given control.
hug_width = d_(PolicyEnum('strong'))
#: How strongly a component hugs it's height hint. Valid strengths
#: are 'weak', 'medium', 'strong', 'required' and 'ignore'. Default
#: is 'strong'. This can be overridden on a per-control basis to
#: specify a logical default for the given control.
hug_height = d_(PolicyEnum('strong'))
#: How strongly a component resists clipping its contents. Valid
#: strengths are 'weak', 'medium', 'strong', 'required' and 'ignore'.
#: The default is 'strong' for width.
resist_width = d_(PolicyEnum('strong'))
#: How strongly a component resists clipping its contents. Valid
#: strengths are 'weak', 'medium', 'strong', 'required' and 'ignore'.
#: The default is 'strong' for height.
resist_height = d_(PolicyEnum('strong'))
#: A reference to the ProxyConstraintsWidget object.
proxy = Typed(ProxyConstraintsWidget)
#--------------------------------------------------------------------------
# Observers
#--------------------------------------------------------------------------
@observe(('constraints', 'hug_width', 'hug_height', 'resist_width',
'resist_height'))
def _layout_invalidated(self, change):
""" An observer which will relayout the proxy widget.
"""
self.request_relayout()
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def request_relayout(self):
""" Request a relayout from the proxy widget.
This will invoke the 'request_relayout' method on an active
proxy. The proxy should collapse the requests as necessary.
"""
if self.proxy_is_active:
self.proxy.request_relayout()
def when(self, switch):
""" A method which returns `self` or None based on the truthness
of the argument.
This can be useful to easily turn off the effects of an object
in constraints-based layout.
Parameters
----------
switch : bool
A boolean which indicates whether this instance or None
should be returned.
Returns
-------
result : self or None
If 'switch' is boolean True, self is returned. Otherwise,
None is returned.
"""
if switch:
return self
#--------------------------------------------------------------------------
# Private API
#--------------------------------------------------------------------------
def _collect_constraints(self):
""" The constraints to use for the component.
This will return the expanded list of constraints to use for
the component. It will not include the hard constraints.
"""
cns = self.constraints
if not cns:
cns = self._get_default_constraints()
cns += self._component_constraints()
return list(expand_constraints(self, cns))
def _hard_constraints(self):
""" The constraints required for the component.
These are constraints that must apply to the internal layout
computations of a component as well as that of containers which
may parent this component. By default, all components will have
their 'left', 'right', 'width', and 'height' symbols constrained
to >= 0. Subclasses which need to add more constraints should
reimplement this method.
"""
cns = [
self.left >= 0, self.top >= 0,
self.width >= 0, self.height >= 0,
]
return cns
def _component_constraints(self):
""" The required constraints for a particular component.
These are constraints which should be applied on top of any user
constraints and hard constraints. The default implementation
returns an empty list.
"""
return []
def _get_default_constraints(self):
""" The constraints to include if the user has none defined.
These are constraints to include if the user has not specified
their own in the 'constraints' list. The default implementation
returns an empty list.
"""
return []
ABConstrainable.register(ConstraintsWidget)
| #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import (
DefaultValue, Enum, Typed, List, Constant, ForwardTyped, observe
)
from casuarius import ConstraintVariable
from enaml.core.declarative import d_
from enaml.layout.ab_constrainable import ABConstrainable
from enaml.layout.layout_helpers import expand_constraints
from .widget import Widget, ProxyWidget
#: An atom enum which defines the allowable constraints strengths.
#: Clones will be made by selecting a new default via 'select'.
PolicyEnum = Enum('ignore', 'weak', 'medium', 'strong', 'required')
class ConstraintMember(Constant):
""" A custom Member class that generates a ConstraintVariable.
"""
__slots__ = ()
def __init__(self):
super(ConstraintMember, self).__init__()
mode = DefaultValue.MemberMethod_Object
self.set_default_value_mode(mode, "default")
def default(self, owner):
""" Create the constraint variable for the member.
"""
return ConstraintVariable(self.name)
class ProxyConstraintsWidget(ProxyWidget):
""" The abstract definition of a proxy ConstraintsWidget object.
"""
#: A reference to the ConstraintsWidget declaration.
declaration = ForwardTyped(lambda: ConstraintsWidget)
def request_relayout(self):
raise NotImplementedError
class ConstraintsWidget(Widget):
""" A Widget subclass which adds constraint information.
A ConstraintsWidget is augmented with symbolic constraint variables
which define a box model on the widget. This box model is used to
declare constraints between this widget and other components which
participate in constraints-based layout.
Constraints are added to a widget by assigning a list to the
'constraints' attribute. This list may contain raw LinearConstraint
objects (which are created by manipulating the symbolic constraint
variables) or DeferredConstraints objects which generated these
LinearConstraint objects on-the-fly.
"""
#: The list of user-specified constraints or constraint-generating
#: objects for this component.
constraints = d_(List())
#: A constant symbolic object that represents the left boundary of
#: the widget.
left = ConstraintMember()
#: A constant symbolic object that represents the top boundary of
#: the widget.
top = ConstraintMember()
#: A constant symbolic object that represents the width of the
#: widget.
width = ConstraintMember()
#: A constant symbolic object that represents the height of the
#: widget.
height = ConstraintMember()
#: A constant symbolic object that represents the right boundary
#: of the component. This is computed as left + width.
right = Constant()
def _default_right(self):
return self.left + self.width
#: A constant symbolic object that represents the bottom boundary
#: of the component. This is computed as top + height.
bottom = Constant()
def _default_bottom(self):
return self.top + self.height
#: A constant symbolic object that represents the vertical center
#: of the width. This is computed as top + 0.5 * height.
v_center = Constant()
def _default_v_center(self):
return self.top + self.height / 2.0
#: A constant symbolic object that represents the horizontal center
#: of the widget. This is computed as left + 0.5 * width.
h_center = Constant()
def _default_h_center(self):
return self.left + self.width / 2.0
#: How strongly a component hugs it's width hint. Valid strengths
#: are 'weak', 'medium', 'strong', 'required' and 'ignore'. Default
#: is 'strong'. This can be overridden on a per-control basis to
#: specify a logical default for the given control.
hug_width = d_(PolicyEnum('strong'))
#: How strongly a component hugs it's height hint. Valid strengths
#: are 'weak', 'medium', 'strong', 'required' and 'ignore'. Default
#: is 'strong'. This can be overridden on a per-control basis to
#: specify a logical default for the given control.
hug_height = d_(PolicyEnum('strong'))
#: How strongly a component resists clipping its contents. Valid
#: strengths are 'weak', 'medium', 'strong', 'required' and 'ignore'.
#: The default is 'strong' for width.
resist_width = d_(PolicyEnum('strong'))
#: How strongly a component resists clipping its contents. Valid
#: strengths are 'weak', 'medium', 'strong', 'required' and 'ignore'.
#: The default is 'strong' for height.
resist_height = d_(PolicyEnum('strong'))
#: A reference to the ProxyConstraintsWidget object.
proxy = Typed(ProxyConstraintsWidget)
#--------------------------------------------------------------------------
# Observers
#--------------------------------------------------------------------------
@observe(('constraints', 'hug_width', 'hug_height', 'resist_width',
'resist_height'))
def _layout_invalidated(self, change):
""" An observer which will relayout the proxy widget.
"""
self.request_relayout()
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def request_relayout(self):
""" Request a relayout from the proxy widget.
This will invoke the 'request_relayout' method on an active
proxy. The proxy should collapse the requests as necessary.
"""
if self.proxy_is_active:
self.proxy.request_relayout()
def when(self, switch):
""" A method which returns `self` or None based on the truthness
of the argument.
This can be useful to easily turn off the effects of an object
in constraints-based layout.
Parameters
----------
switch : bool
A boolean which indicates whether this instance or None
should be returned.
Returns
-------
result : self or None
If 'switch' is boolean True, self is returned. Otherwise,
None is returned.
"""
if switch:
return self
#--------------------------------------------------------------------------
# Private API
#--------------------------------------------------------------------------
def _collect_constraints(self):
""" The constraints to use for the component.
This will return the expanded list of constraints to use for
the component. It will not include the hard constraints.
"""
cns = self.constraints
if not cns:
cns = self._get_default_constraints()
cns += self._component_constraints()
return list(expand_constraints(self, cns))
def _hard_constraints(self):
""" The constraints required for the component.
These are constraints that must apply to the internal layout
computations of a component as well as that of containers which
may parent this component. By default, all components will have
their 'left', 'right', 'width', and 'height' symbols constrained
to >= 0. Subclasses which need to add more constraints should
reimplement this method.
"""
cns = [
self.left >= 0, self.top >= 0,
self.width >= 0, self.height >= 0,
]
return cns
def _component_constraints(self):
""" The required constraints for a particular component.
These are constraints which should be applied on top of any user
constraints and hard constraints. The default implementation
returns an empty list.
"""
return []
def _get_default_constraints(self):
""" The constraints to include if the user has none defined.
These are constraints to include if the user has not specified
their own in the 'constraints' list. The default implementation
returns an empty list.
"""
return []
ABConstrainable.register(ConstraintsWidget)
| en | 0.7739 | #------------------------------------------------------------------------------ # Copyright (c) 2013, Nucleic Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #------------------------------------------------------------------------------ #: An atom enum which defines the allowable constraints strengths. #: Clones will be made by selecting a new default via 'select'. A custom Member class that generates a ConstraintVariable. Create the constraint variable for the member. The abstract definition of a proxy ConstraintsWidget object. #: A reference to the ConstraintsWidget declaration. A Widget subclass which adds constraint information. A ConstraintsWidget is augmented with symbolic constraint variables which define a box model on the widget. This box model is used to declare constraints between this widget and other components which participate in constraints-based layout. Constraints are added to a widget by assigning a list to the 'constraints' attribute. This list may contain raw LinearConstraint objects (which are created by manipulating the symbolic constraint variables) or DeferredConstraints objects which generated these LinearConstraint objects on-the-fly. #: The list of user-specified constraints or constraint-generating #: objects for this component. #: A constant symbolic object that represents the left boundary of #: the widget. #: A constant symbolic object that represents the top boundary of #: the widget. #: A constant symbolic object that represents the width of the #: widget. #: A constant symbolic object that represents the height of the #: widget. #: A constant symbolic object that represents the right boundary #: of the component. This is computed as left + width. #: A constant symbolic object that represents the bottom boundary #: of the component. This is computed as top + height. #: A constant symbolic object that represents the vertical center #: of the width. This is computed as top + 0.5 * height. #: A constant symbolic object that represents the horizontal center #: of the widget. This is computed as left + 0.5 * width. #: How strongly a component hugs it's width hint. Valid strengths #: are 'weak', 'medium', 'strong', 'required' and 'ignore'. Default #: is 'strong'. This can be overridden on a per-control basis to #: specify a logical default for the given control. #: How strongly a component hugs it's height hint. Valid strengths #: are 'weak', 'medium', 'strong', 'required' and 'ignore'. Default #: is 'strong'. This can be overridden on a per-control basis to #: specify a logical default for the given control. #: How strongly a component resists clipping its contents. Valid #: strengths are 'weak', 'medium', 'strong', 'required' and 'ignore'. #: The default is 'strong' for width. #: How strongly a component resists clipping its contents. Valid #: strengths are 'weak', 'medium', 'strong', 'required' and 'ignore'. #: The default is 'strong' for height. #: A reference to the ProxyConstraintsWidget object. #-------------------------------------------------------------------------- # Observers #-------------------------------------------------------------------------- An observer which will relayout the proxy widget. #-------------------------------------------------------------------------- # Public API #-------------------------------------------------------------------------- Request a relayout from the proxy widget. This will invoke the 'request_relayout' method on an active proxy. The proxy should collapse the requests as necessary. A method which returns `self` or None based on the truthness of the argument. This can be useful to easily turn off the effects of an object in constraints-based layout. Parameters ---------- switch : bool A boolean which indicates whether this instance or None should be returned. Returns ------- result : self or None If 'switch' is boolean True, self is returned. Otherwise, None is returned. #-------------------------------------------------------------------------- # Private API #-------------------------------------------------------------------------- The constraints to use for the component. This will return the expanded list of constraints to use for the component. It will not include the hard constraints. The constraints required for the component. These are constraints that must apply to the internal layout computations of a component as well as that of containers which may parent this component. By default, all components will have their 'left', 'right', 'width', and 'height' symbols constrained to >= 0. Subclasses which need to add more constraints should reimplement this method. The required constraints for a particular component. These are constraints which should be applied on top of any user constraints and hard constraints. The default implementation returns an empty list. The constraints to include if the user has none defined. These are constraints to include if the user has not specified their own in the 'constraints' list. The default implementation returns an empty list. | 2.029515 | 2 |
DataSynthesizer/datatypes/utils/AttributeLoader.py | crangelsmith/synthetic-data-tutorial | 68 | 6631569 | from pandas import Series
from datatypes.DateTimeAttribute import DateTimeAttribute
from datatypes.FloatAttribute import FloatAttribute
from datatypes.IntegerAttribute import IntegerAttribute
from datatypes.SocialSecurityNumberAttribute import SocialSecurityNumberAttribute
from datatypes.StringAttribute import StringAttribute
from datatypes.utils.DataType import DataType
def parse_json(attribute_in_json):
name = attribute_in_json['name']
data_type = DataType(attribute_in_json['data_type'])
is_candidate_key = attribute_in_json['is_candidate_key']
is_categorical = attribute_in_json['is_categorical']
histogram_size = len(attribute_in_json['distribution_bins'])
if data_type is DataType.INTEGER:
attribute = IntegerAttribute(name, is_candidate_key, is_categorical, histogram_size, Series())
elif data_type is DataType.FLOAT:
attribute = FloatAttribute(name, is_candidate_key, is_categorical, histogram_size, Series())
elif data_type is DataType.DATETIME:
attribute = DateTimeAttribute(name, is_candidate_key, is_categorical, histogram_size, Series())
elif data_type is DataType.STRING:
attribute = StringAttribute(name, is_candidate_key, is_categorical, histogram_size, Series())
elif data_type is data_type.SOCIAL_SECURITY_NUMBER:
attribute = SocialSecurityNumberAttribute(name, is_candidate_key, is_categorical, histogram_size, Series())
else:
raise Exception('Data type {} is unknown.'.format(data_type.value))
attribute.missing_rate = attribute_in_json['missing_rate']
attribute.min = attribute_in_json['min']
attribute.max = attribute_in_json['max']
attribute.distribution_bins = attribute_in_json['distribution_bins']
attribute.distribution_probabilities = attribute_in_json['distribution_probabilities']
return attribute
| from pandas import Series
from datatypes.DateTimeAttribute import DateTimeAttribute
from datatypes.FloatAttribute import FloatAttribute
from datatypes.IntegerAttribute import IntegerAttribute
from datatypes.SocialSecurityNumberAttribute import SocialSecurityNumberAttribute
from datatypes.StringAttribute import StringAttribute
from datatypes.utils.DataType import DataType
def parse_json(attribute_in_json):
name = attribute_in_json['name']
data_type = DataType(attribute_in_json['data_type'])
is_candidate_key = attribute_in_json['is_candidate_key']
is_categorical = attribute_in_json['is_categorical']
histogram_size = len(attribute_in_json['distribution_bins'])
if data_type is DataType.INTEGER:
attribute = IntegerAttribute(name, is_candidate_key, is_categorical, histogram_size, Series())
elif data_type is DataType.FLOAT:
attribute = FloatAttribute(name, is_candidate_key, is_categorical, histogram_size, Series())
elif data_type is DataType.DATETIME:
attribute = DateTimeAttribute(name, is_candidate_key, is_categorical, histogram_size, Series())
elif data_type is DataType.STRING:
attribute = StringAttribute(name, is_candidate_key, is_categorical, histogram_size, Series())
elif data_type is data_type.SOCIAL_SECURITY_NUMBER:
attribute = SocialSecurityNumberAttribute(name, is_candidate_key, is_categorical, histogram_size, Series())
else:
raise Exception('Data type {} is unknown.'.format(data_type.value))
attribute.missing_rate = attribute_in_json['missing_rate']
attribute.min = attribute_in_json['min']
attribute.max = attribute_in_json['max']
attribute.distribution_bins = attribute_in_json['distribution_bins']
attribute.distribution_probabilities = attribute_in_json['distribution_probabilities']
return attribute
| none | 1 | 2.977364 | 3 |
|
sdb/commands/zfs/dbuf.py | alan-maguire/sdb | 0 | 6631570 | #
# Copyright 2019 Delphix
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=missing-docstring
import argparse
from typing import Iterable
import drgn
import sdb
from sdb.commands.walk import Walk
from sdb.commands.cast import Cast
class Dbuf(sdb.Locator, sdb.PrettyPrinter):
"""Iterate, filter, and pretty-print dbufs (dmu_buf_impl_t*)"""
names = ["dbuf"]
input_type = "dmu_buf_impl_t *"
output_type = "dmu_buf_impl_t *"
@classmethod
def _init_parser(cls, name: str) -> argparse.ArgumentParser:
parser = super()._init_parser(name)
parser.add_argument('-o',
'--object',
type=int,
help='filter: only dbufs of this object')
parser.add_argument('-l',
'--level',
type=int,
help='filter: only dbufs of this level')
parser.add_argument('-b',
'--blkid',
type=int,
help='filter: only dbufs of this blkid')
parser.add_argument(
'-d',
'--dataset',
type=str,
help='filter: only dbufs of this dataset name (or "poolname/_MOS")')
parser.add_argument('-H',
'--has-holds',
action='store_true',
help='filter: only dbufs that have nonzero holds')
return parser
@staticmethod
def DslDirName(dd: drgn.Object):
pname = ''
if dd.dd_parent:
pname = Dbuf.DslDirName(dd.dd_parent) + '/'
return pname + dd.dd_myname.string_().decode("utf-8")
@staticmethod
def DatasetName(ds: drgn.Object):
name = Dbuf.DslDirName(ds.ds_dir)
if not ds.ds_prev:
sn = ds.ds_snapname.string_().decode("utf-8")
if len(sn) == 0:
sn = '%UNKNOWN_SNAP_NAME%'
name += '@' + sn
return name
@staticmethod
def ObjsetName(os: drgn.Object):
if not os.os_dsl_dataset:
return '{}/_MOS'.format(
os.os_spa.spa_name.string_().decode("utf-8"))
return Dbuf.DatasetName(os.os_dsl_dataset)
def pretty_print(self, dbufs):
print("{:>20} {:>8} {:>4} {:>8} {:>5} {}".format(
"addr", "object", "lvl", "blkid", "holds", "os"))
for dbuf in dbufs:
print("{:>20} {:>8d} {:>4d} {:>8d} {:>5d} {}".format(
hex(dbuf), int(dbuf.db.db_object), int(dbuf.db_level),
int(dbuf.db_blkid), int(dbuf.db_holds.rc_count),
Dbuf.ObjsetName(dbuf.db_objset)))
def argfilter(self, db: drgn.Object) -> bool:
# self.args.object (and friends) may be set to 0, indicating a search
# for object 0 (the meta-dnode). Therefore we need to check
# `is not None` rather than just the truthiness of self.args.object.
if self.args.object is not None and db.db.db_object != self.args.object:
return False
if self.args.level is not None and db.db_level != self.args.level:
return False
if self.args.blkid is not None and db.db_blkid != self.args.blkid:
return False
if self.args.has_holds and db.db_holds.rc_count == 0:
return False
if self.args.dataset is not None and Dbuf.ObjsetName(
db.db_objset) != self.args.dataset:
return False
return True
def all_dnode_dbufs(self, dn: drgn.Object) -> Iterable[drgn.Object]:
yield from sdb.execute_pipeline([dn.dn_dbufs.address_of_()],
[Walk(), Cast(self.output_type)])
@sdb.InputHandler('dnode_t*')
def from_dnode(self, dn: drgn.Object) -> Iterable[drgn.Object]:
yield from filter(self.argfilter, self.all_dnode_dbufs(dn))
@staticmethod
def all_dbufs() -> Iterable[drgn.Object]:
hash_map = sdb.prog["dbuf_hash_table"].address_of_()
for i in range(hash_map.hash_table_mask):
dbuf = hash_map.hash_table[i]
while dbuf:
yield dbuf
dbuf = dbuf.db_hash_next
def no_input(self) -> Iterable[drgn.Object]:
yield from filter(self.argfilter, self.all_dbufs())
| #
# Copyright 2019 Delphix
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=missing-docstring
import argparse
from typing import Iterable
import drgn
import sdb
from sdb.commands.walk import Walk
from sdb.commands.cast import Cast
class Dbuf(sdb.Locator, sdb.PrettyPrinter):
"""Iterate, filter, and pretty-print dbufs (dmu_buf_impl_t*)"""
names = ["dbuf"]
input_type = "dmu_buf_impl_t *"
output_type = "dmu_buf_impl_t *"
@classmethod
def _init_parser(cls, name: str) -> argparse.ArgumentParser:
parser = super()._init_parser(name)
parser.add_argument('-o',
'--object',
type=int,
help='filter: only dbufs of this object')
parser.add_argument('-l',
'--level',
type=int,
help='filter: only dbufs of this level')
parser.add_argument('-b',
'--blkid',
type=int,
help='filter: only dbufs of this blkid')
parser.add_argument(
'-d',
'--dataset',
type=str,
help='filter: only dbufs of this dataset name (or "poolname/_MOS")')
parser.add_argument('-H',
'--has-holds',
action='store_true',
help='filter: only dbufs that have nonzero holds')
return parser
@staticmethod
def DslDirName(dd: drgn.Object):
pname = ''
if dd.dd_parent:
pname = Dbuf.DslDirName(dd.dd_parent) + '/'
return pname + dd.dd_myname.string_().decode("utf-8")
@staticmethod
def DatasetName(ds: drgn.Object):
name = Dbuf.DslDirName(ds.ds_dir)
if not ds.ds_prev:
sn = ds.ds_snapname.string_().decode("utf-8")
if len(sn) == 0:
sn = '%UNKNOWN_SNAP_NAME%'
name += '@' + sn
return name
@staticmethod
def ObjsetName(os: drgn.Object):
if not os.os_dsl_dataset:
return '{}/_MOS'.format(
os.os_spa.spa_name.string_().decode("utf-8"))
return Dbuf.DatasetName(os.os_dsl_dataset)
def pretty_print(self, dbufs):
print("{:>20} {:>8} {:>4} {:>8} {:>5} {}".format(
"addr", "object", "lvl", "blkid", "holds", "os"))
for dbuf in dbufs:
print("{:>20} {:>8d} {:>4d} {:>8d} {:>5d} {}".format(
hex(dbuf), int(dbuf.db.db_object), int(dbuf.db_level),
int(dbuf.db_blkid), int(dbuf.db_holds.rc_count),
Dbuf.ObjsetName(dbuf.db_objset)))
def argfilter(self, db: drgn.Object) -> bool:
# self.args.object (and friends) may be set to 0, indicating a search
# for object 0 (the meta-dnode). Therefore we need to check
# `is not None` rather than just the truthiness of self.args.object.
if self.args.object is not None and db.db.db_object != self.args.object:
return False
if self.args.level is not None and db.db_level != self.args.level:
return False
if self.args.blkid is not None and db.db_blkid != self.args.blkid:
return False
if self.args.has_holds and db.db_holds.rc_count == 0:
return False
if self.args.dataset is not None and Dbuf.ObjsetName(
db.db_objset) != self.args.dataset:
return False
return True
def all_dnode_dbufs(self, dn: drgn.Object) -> Iterable[drgn.Object]:
yield from sdb.execute_pipeline([dn.dn_dbufs.address_of_()],
[Walk(), Cast(self.output_type)])
@sdb.InputHandler('dnode_t*')
def from_dnode(self, dn: drgn.Object) -> Iterable[drgn.Object]:
yield from filter(self.argfilter, self.all_dnode_dbufs(dn))
@staticmethod
def all_dbufs() -> Iterable[drgn.Object]:
hash_map = sdb.prog["dbuf_hash_table"].address_of_()
for i in range(hash_map.hash_table_mask):
dbuf = hash_map.hash_table[i]
while dbuf:
yield dbuf
dbuf = dbuf.db_hash_next
def no_input(self) -> Iterable[drgn.Object]:
yield from filter(self.argfilter, self.all_dbufs())
| en | 0.828498 | # # Copyright 2019 Delphix # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pylint: disable=missing-docstring Iterate, filter, and pretty-print dbufs (dmu_buf_impl_t*) # self.args.object (and friends) may be set to 0, indicating a search # for object 0 (the meta-dnode). Therefore we need to check # `is not None` rather than just the truthiness of self.args.object. | 2.111146 | 2 |
image_gallery/migrations/0004_gallery_slug.py | tehfink/cmsplugin-image-gallery | 0 | 6631571 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-27 18:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('image_gallery', '0003_auto_20190120_2316'),
]
operations = [
migrations.AddField(
model_name='gallery',
name='slug',
field=models.SlugField(default='gallery', verbose_name='Slug'),
preserve_default=False,
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-27 18:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('image_gallery', '0003_auto_20190120_2316'),
]
operations = [
migrations.AddField(
model_name='gallery',
name='slug',
field=models.SlugField(default='gallery', verbose_name='Slug'),
preserve_default=False,
),
]
| en | 0.68825 | # -*- coding: utf-8 -*- # Generated by Django 1.11.18 on 2019-01-27 18:08 | 1.520075 | 2 |
toppra/algorithm/algorithm.py | Linjackffy/toppra | 0 | 6631572 | """
"""
import numpy as np
from ..constants import TINY
from toppra.interpolator import SplineInterpolator, AbstractGeometricPath
import toppra.interpolator as interpolator
import logging
logger = logging.getLogger(__name__)
class ParameterizationAlgorithm(object):
"""Base class for all parameterization algorithms.
All algorithms should have three attributes: `constraints`, `path`
and `gridpoints` and also implement the method
`compute_parameterization`.
Parameters
----------
constraint_list: list of `Constraint`
path: `AbstractGeometricPath`
The geometric path, or the trajectory to parameterize.
gridpoints: array, optional
If not given, automatically generate a grid with 100 steps.
"""
def __init__(self, constraint_list, path, gridpoints=None):
self.constraints = constraint_list # Attr
self.path = path # Attr
self._problem_data = {}
# Handle gridpoints
if gridpoints is None:
gridpoints = interpolator.propose_gridpoints(path, max_err_threshold=1e-3)
logger.info(
"No gridpoint specified. Automatically choose a gridpoint. See `propose_gridpoints`."
)
if (
path.path_interval[0] != gridpoints[0]
or path.path_interval[1] != gridpoints[-1]
):
raise ValueError("Invalid manually supplied gridpoints.")
self.gridpoints = np.array(gridpoints)
self._N = len(gridpoints) - 1 # Number of stages. Number of point is _N + 1
for i in range(self._N):
if gridpoints[i + 1] <= gridpoints[i]:
logger.fatal("Input gridpoints are not monotonically increasing.")
raise ValueError("Bad input gridpoints.")
@property
def problem_data(self):
"""Dict[str, Any]: Intermediate data obtained while solving the problem."""
return self._problem_data
def compute_parameterization(self, sd_start, sd_end):
"""Compute a path parameterization.
If fail, whether because there is no valid parameterization or
because of numerical error, the arrays returns should contain
np.nan.
Parameters
----------
sd_start: float
Starting path velocity. Must be positive.
sd_end: float
Goal path velocity. Must be positive.
return_data: bool, optional
If is True, also return matrix K which contains the controllable sets.
Returns
-------
sdd_vec: (_N,) array or None
Path accelerations.
sd_vec: (_N+1,) array None
Path velocities.
v_vec: (_N,) array or None
Auxiliary variables.
K: (N+1, 2) array
Return the controllable set if `return_data` is True.
"""
raise NotImplementedError
def compute_trajectory(self, sd_start=0, sd_end=0, return_data=False):
"""Compute the resulting joint trajectory and auxilliary trajectory.
If parameterization fails, return a tuple of None(s).
Parameters
----------
sd_start: float
Starting path velocity.
sd_end: float
Goal path velocity.
return_data: bool, optional
If true, return a dict containing the internal data.
Returns
-------
:class:`.AbstractGeometricPath`
Time-parameterized joint position trajectory. If unable to
parameterize, return None.
:class:`.AbstractGeometricPath`
Time-parameterized auxiliary variable trajectory. If
unable to parameterize or if there is no auxiliary
variable, return None.
"""
sdd_grid, sd_grid, v_grid, K = self.compute_parameterization(
sd_start, sd_end, return_data=True
)
# fail condition: sd_grid is None, or there is nan in sd_grid
if sd_grid is None or np.isnan(sd_grid).any():
return None, None
# Gridpoint time instances
t_grid = np.zeros(self._N + 1)
skip_ent = []
for i in range(1, self._N + 1):
sd_average = (sd_grid[i - 1] + sd_grid[i]) / 2
delta_s = self.gridpoints[i] - self.gridpoints[i - 1]
if sd_average > TINY:
delta_t = delta_s / sd_average
else:
delta_t = 5 # If average speed is too slow.
t_grid[i] = t_grid[i - 1] + delta_t
if delta_t < TINY: # if a time increment is too small, skip.
skip_ent.append(i)
t_grid = np.delete(t_grid, skip_ent)
scaling = self.gridpoints[-1] / self.path.duration
gridpoints = np.delete(self.gridpoints, skip_ent) / scaling
q_grid = self.path(gridpoints)
traj_spline = SplineInterpolator(
t_grid,
q_grid,
(
(1, self.path(0, 1) * sd_start),
(1, self.path(self.path.duration, 1) * sd_end),
),
)
if v_grid.shape[1] == 0:
v_spline = None
else:
v_grid_ = np.zeros((v_grid.shape[0] + 1, v_grid.shape[1]))
v_grid_[:-1] = v_grid
v_grid_[-1] = v_grid[-1]
v_grid_ = np.delete(v_grid_, skip_ent, axis=0)
v_spline = SplineInterpolator(t_grid, v_grid_)
self._problem_data.update(
{"sdd": sdd_grid, "sd": sd_grid, "v": v_grid, "K": K, "v_traj": v_spline}
)
if self.path.waypoints is not None:
t_waypts = np.interp(self.path.waypoints[0], gridpoints, t_grid)
self._problem_data.update({"t_waypts": t_waypts})
return traj_spline
| """
"""
import numpy as np
from ..constants import TINY
from toppra.interpolator import SplineInterpolator, AbstractGeometricPath
import toppra.interpolator as interpolator
import logging
logger = logging.getLogger(__name__)
class ParameterizationAlgorithm(object):
"""Base class for all parameterization algorithms.
All algorithms should have three attributes: `constraints`, `path`
and `gridpoints` and also implement the method
`compute_parameterization`.
Parameters
----------
constraint_list: list of `Constraint`
path: `AbstractGeometricPath`
The geometric path, or the trajectory to parameterize.
gridpoints: array, optional
If not given, automatically generate a grid with 100 steps.
"""
def __init__(self, constraint_list, path, gridpoints=None):
self.constraints = constraint_list # Attr
self.path = path # Attr
self._problem_data = {}
# Handle gridpoints
if gridpoints is None:
gridpoints = interpolator.propose_gridpoints(path, max_err_threshold=1e-3)
logger.info(
"No gridpoint specified. Automatically choose a gridpoint. See `propose_gridpoints`."
)
if (
path.path_interval[0] != gridpoints[0]
or path.path_interval[1] != gridpoints[-1]
):
raise ValueError("Invalid manually supplied gridpoints.")
self.gridpoints = np.array(gridpoints)
self._N = len(gridpoints) - 1 # Number of stages. Number of point is _N + 1
for i in range(self._N):
if gridpoints[i + 1] <= gridpoints[i]:
logger.fatal("Input gridpoints are not monotonically increasing.")
raise ValueError("Bad input gridpoints.")
@property
def problem_data(self):
"""Dict[str, Any]: Intermediate data obtained while solving the problem."""
return self._problem_data
def compute_parameterization(self, sd_start, sd_end):
"""Compute a path parameterization.
If fail, whether because there is no valid parameterization or
because of numerical error, the arrays returns should contain
np.nan.
Parameters
----------
sd_start: float
Starting path velocity. Must be positive.
sd_end: float
Goal path velocity. Must be positive.
return_data: bool, optional
If is True, also return matrix K which contains the controllable sets.
Returns
-------
sdd_vec: (_N,) array or None
Path accelerations.
sd_vec: (_N+1,) array None
Path velocities.
v_vec: (_N,) array or None
Auxiliary variables.
K: (N+1, 2) array
Return the controllable set if `return_data` is True.
"""
raise NotImplementedError
def compute_trajectory(self, sd_start=0, sd_end=0, return_data=False):
"""Compute the resulting joint trajectory and auxilliary trajectory.
If parameterization fails, return a tuple of None(s).
Parameters
----------
sd_start: float
Starting path velocity.
sd_end: float
Goal path velocity.
return_data: bool, optional
If true, return a dict containing the internal data.
Returns
-------
:class:`.AbstractGeometricPath`
Time-parameterized joint position trajectory. If unable to
parameterize, return None.
:class:`.AbstractGeometricPath`
Time-parameterized auxiliary variable trajectory. If
unable to parameterize or if there is no auxiliary
variable, return None.
"""
sdd_grid, sd_grid, v_grid, K = self.compute_parameterization(
sd_start, sd_end, return_data=True
)
# fail condition: sd_grid is None, or there is nan in sd_grid
if sd_grid is None or np.isnan(sd_grid).any():
return None, None
# Gridpoint time instances
t_grid = np.zeros(self._N + 1)
skip_ent = []
for i in range(1, self._N + 1):
sd_average = (sd_grid[i - 1] + sd_grid[i]) / 2
delta_s = self.gridpoints[i] - self.gridpoints[i - 1]
if sd_average > TINY:
delta_t = delta_s / sd_average
else:
delta_t = 5 # If average speed is too slow.
t_grid[i] = t_grid[i - 1] + delta_t
if delta_t < TINY: # if a time increment is too small, skip.
skip_ent.append(i)
t_grid = np.delete(t_grid, skip_ent)
scaling = self.gridpoints[-1] / self.path.duration
gridpoints = np.delete(self.gridpoints, skip_ent) / scaling
q_grid = self.path(gridpoints)
traj_spline = SplineInterpolator(
t_grid,
q_grid,
(
(1, self.path(0, 1) * sd_start),
(1, self.path(self.path.duration, 1) * sd_end),
),
)
if v_grid.shape[1] == 0:
v_spline = None
else:
v_grid_ = np.zeros((v_grid.shape[0] + 1, v_grid.shape[1]))
v_grid_[:-1] = v_grid
v_grid_[-1] = v_grid[-1]
v_grid_ = np.delete(v_grid_, skip_ent, axis=0)
v_spline = SplineInterpolator(t_grid, v_grid_)
self._problem_data.update(
{"sdd": sdd_grid, "sd": sd_grid, "v": v_grid, "K": K, "v_traj": v_spline}
)
if self.path.waypoints is not None:
t_waypts = np.interp(self.path.waypoints[0], gridpoints, t_grid)
self._problem_data.update({"t_waypts": t_waypts})
return traj_spline
| en | 0.587478 | Base class for all parameterization algorithms. All algorithms should have three attributes: `constraints`, `path` and `gridpoints` and also implement the method `compute_parameterization`. Parameters ---------- constraint_list: list of `Constraint` path: `AbstractGeometricPath` The geometric path, or the trajectory to parameterize. gridpoints: array, optional If not given, automatically generate a grid with 100 steps. # Attr # Attr # Handle gridpoints # Number of stages. Number of point is _N + 1 Dict[str, Any]: Intermediate data obtained while solving the problem. Compute a path parameterization. If fail, whether because there is no valid parameterization or because of numerical error, the arrays returns should contain np.nan. Parameters ---------- sd_start: float Starting path velocity. Must be positive. sd_end: float Goal path velocity. Must be positive. return_data: bool, optional If is True, also return matrix K which contains the controllable sets. Returns ------- sdd_vec: (_N,) array or None Path accelerations. sd_vec: (_N+1,) array None Path velocities. v_vec: (_N,) array or None Auxiliary variables. K: (N+1, 2) array Return the controllable set if `return_data` is True. Compute the resulting joint trajectory and auxilliary trajectory. If parameterization fails, return a tuple of None(s). Parameters ---------- sd_start: float Starting path velocity. sd_end: float Goal path velocity. return_data: bool, optional If true, return a dict containing the internal data. Returns ------- :class:`.AbstractGeometricPath` Time-parameterized joint position trajectory. If unable to parameterize, return None. :class:`.AbstractGeometricPath` Time-parameterized auxiliary variable trajectory. If unable to parameterize or if there is no auxiliary variable, return None. # fail condition: sd_grid is None, or there is nan in sd_grid # Gridpoint time instances # If average speed is too slow. # if a time increment is too small, skip. | 2.854184 | 3 |
tools/chrome_proxy/integration_tests/chrome_proxy_metrics_unittest.py | kjthegod/chromium | 1 | 6631573 | <filename>tools/chrome_proxy/integration_tests/chrome_proxy_metrics_unittest.py
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import unittest
from integration_tests import chrome_proxy_metrics as metrics
from integration_tests import network_metrics_unittest as network_unittest
from metrics import test_page_test_results
# Timeline events used in tests.
# An HTML not via proxy.
EVENT_HTML_PROXY = network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.html1',
response_headers={
'Content-Type': 'text/html',
'Content-Length': str(len(network_unittest.HTML_BODY)),
},
body=network_unittest.HTML_BODY)
# An HTML via proxy with the deprecated Via header.
EVENT_HTML_PROXY_DEPRECATED_VIA = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.html2',
response_headers={
'Content-Type': 'text/html',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(len(network_unittest.HTML_BODY)),
'Via': (metrics.CHROME_PROXY_VIA_HEADER_DEPRECATED +
',other-via'),
},
body=network_unittest.HTML_BODY))
# An image via proxy with Via header and it is cached.
EVENT_IMAGE_PROXY_CACHED = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(network_unittest.IMAGE_OCL),
'Via': '1.1 ' + metrics.CHROME_PROXY_VIA_HEADER,
},
body=base64.b64encode(network_unittest.IMAGE_BODY),
base64_encoded_body=True,
served_from_cache=True))
# An image fetched directly.
EVENT_IMAGE_DIRECT = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
},
body=base64.b64encode(network_unittest.IMAGE_BODY),
base64_encoded_body=True))
# A safe-browsing malware response.
EVENT_MALWARE_PROXY = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.malware',
response_headers={
'X-Malware-Url': '1',
'Via': '1.1 ' + metrics.CHROME_PROXY_VIA_HEADER,
'Location': 'http://test.malware',
},
status=307))
# An HTML via proxy with the deprecated Via header.
EVENT_IMAGE_BYPASS = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Chrome-Proxy': 'bypass=1',
'Content-Type': 'text/html',
'Via': '1.1 ' + metrics.CHROME_PROXY_VIA_HEADER,
},
status=502))
# An image fetched directly.
EVENT_IMAGE_DIRECT = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
},
body=base64.b64encode(network_unittest.IMAGE_BODY),
base64_encoded_body=True))
class ChromeProxyMetricTest(unittest.TestCase):
_test_proxy_info = {}
def _StubGetProxyInfo(self, info):
def stub(unused_tab, unused_url=''): # pylint: disable=W0613
return ChromeProxyMetricTest._test_proxy_info
metrics.GetProxyInfoFromNetworkInternals = stub
ChromeProxyMetricTest._test_proxy_info = info
def testChromeProxyResponse(self):
# An https non-proxy response.
resp = metrics.ChromeProxyResponse(
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='https://test.url',
response_headers={
'Content-Type': 'text/html',
'Content-Length': str(len(network_unittest.HTML_BODY)),
'Via': 'some other via',
},
body=network_unittest.HTML_BODY))
self.assertFalse(resp.ShouldHaveChromeProxyViaHeader())
self.assertFalse(resp.HasChromeProxyViaHeader())
self.assertTrue(resp.IsValidByViaHeader())
# A proxied JPEG image response
resp = metrics.ChromeProxyResponse(
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
'Via': '1.1 ' + metrics.CHROME_PROXY_VIA_HEADER,
'X-Original-Content-Length': str(network_unittest.IMAGE_OCL),
},
body=base64.b64encode(network_unittest.IMAGE_BODY),
base64_encoded_body=True))
self.assertTrue(resp.ShouldHaveChromeProxyViaHeader())
self.assertTrue(resp.HasChromeProxyViaHeader())
self.assertTrue(resp.IsValidByViaHeader())
def testChromeProxyMetricForDataSaving(self):
metric = metrics.ChromeProxyMetric()
events = [
EVENT_HTML_PROXY,
EVENT_HTML_PROXY_DEPRECATED_VIA,
EVENT_IMAGE_PROXY_CACHED,
EVENT_IMAGE_DIRECT]
metric.SetEvents(events)
self.assertTrue(len(events), len(list(metric.IterResponses(None))))
results = test_page_test_results.TestPageTestResults(self)
metric.AddResultsForDataSaving(None, results)
results.AssertHasPageSpecificScalarValue('resources_via_proxy', 'count', 2)
results.AssertHasPageSpecificScalarValue('resources_from_cache', 'count', 1)
results.AssertHasPageSpecificScalarValue('resources_direct', 'count', 2)
def testChromeProxyMetricForHeaderValidation(self):
metric = metrics.ChromeProxyMetric()
metric.SetEvents([
EVENT_HTML_PROXY,
EVENT_HTML_PROXY_DEPRECATED_VIA,
EVENT_IMAGE_PROXY_CACHED,
EVENT_IMAGE_DIRECT])
results = test_page_test_results.TestPageTestResults(self)
missing_via_exception = False
try:
metric.AddResultsForHeaderValidation(None, results)
except metrics.ChromeProxyMetricException:
missing_via_exception = True
# Only the HTTP image response does not have a valid Via header.
self.assertTrue(missing_via_exception)
# Two events with valid Via headers.
metric.SetEvents([
EVENT_HTML_PROXY_DEPRECATED_VIA,
EVENT_IMAGE_PROXY_CACHED])
metric.AddResultsForHeaderValidation(None, results)
results.AssertHasPageSpecificScalarValue('checked_via_header', 'count', 2)
def testChromeProxyMetricForBypass(self):
metric = metrics.ChromeProxyMetric()
metric.SetEvents([
EVENT_HTML_PROXY,
EVENT_HTML_PROXY_DEPRECATED_VIA,
EVENT_IMAGE_PROXY_CACHED,
EVENT_IMAGE_DIRECT])
results = test_page_test_results.TestPageTestResults(self)
bypass_exception = False
try:
metric.AddResultsForBypass(None, results)
except metrics.ChromeProxyMetricException:
bypass_exception = True
# Two of the first three events have Via headers.
self.assertTrue(bypass_exception)
# Use directly fetched image only. It is treated as bypassed.
metric.SetEvents([EVENT_IMAGE_DIRECT])
metric.AddResultsForBypass(None, results)
results.AssertHasPageSpecificScalarValue('bypass', 'count', 1)
def testChromeProxyMetricForCorsBypass(self):
metric = metrics.ChromeProxyMetric()
metric.SetEvents([EVENT_HTML_PROXY_DEPRECATED_VIA,
EVENT_IMAGE_BYPASS,
EVENT_IMAGE_DIRECT])
results = test_page_test_results.TestPageTestResults(self)
metric.AddResultsForCorsBypass(None, results)
results.AssertHasPageSpecificScalarValue('cors_bypass', 'count', 1)
def testChromeProxyMetricForHTTPFallback(self):
metric = metrics.ChromeProxyMetric()
metric.SetEvents([
EVENT_HTML_PROXY,
EVENT_HTML_PROXY_DEPRECATED_VIA])
results = test_page_test_results.TestPageTestResults(self)
fallback_exception = False
info = {}
info['enabled'] = False
self._StubGetProxyInfo(info)
try:
metric.AddResultsForBypass(None, results)
except metrics.ChromeProxyMetricException:
fallback_exception = True
self.assertTrue(fallback_exception)
fallback_exception = False
info['enabled'] = True
info['proxies'] = [
'something.else.com:80',
metrics.PROXY_SETTING_DIRECT
]
self._StubGetProxyInfo(info)
try:
metric.AddResultsForBypass(None, results)
except metrics.ChromeProxyMetricException:
fallback_exception = True
self.assertTrue(fallback_exception)
info['enabled'] = True
info['proxies'] = [
metrics.PROXY_SETTING_HTTP,
metrics.PROXY_SETTING_DIRECT
]
self._StubGetProxyInfo(info)
metric.AddResultsForHTTPFallback(None, results)
def testChromeProxyMetricForSafebrowsing(self):
metric = metrics.ChromeProxyMetric()
metric.SetEvents([EVENT_MALWARE_PROXY])
results = test_page_test_results.TestPageTestResults(self)
metric.AddResultsForSafebrowsing(None, results)
results.AssertHasPageSpecificScalarValue('safebrowsing', 'boolean', True)
# Clear results and metrics to test no response for safebrowsing
results = test_page_test_results.TestPageTestResults(self)
metric.SetEvents([])
metric.AddResultsForSafebrowsing(None, results)
results.AssertHasPageSpecificScalarValue('safebrowsing', 'boolean', True)
| <filename>tools/chrome_proxy/integration_tests/chrome_proxy_metrics_unittest.py
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import unittest
from integration_tests import chrome_proxy_metrics as metrics
from integration_tests import network_metrics_unittest as network_unittest
from metrics import test_page_test_results
# Timeline events used in tests.
# An HTML not via proxy.
EVENT_HTML_PROXY = network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.html1',
response_headers={
'Content-Type': 'text/html',
'Content-Length': str(len(network_unittest.HTML_BODY)),
},
body=network_unittest.HTML_BODY)
# An HTML via proxy with the deprecated Via header.
EVENT_HTML_PROXY_DEPRECATED_VIA = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.html2',
response_headers={
'Content-Type': 'text/html',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(len(network_unittest.HTML_BODY)),
'Via': (metrics.CHROME_PROXY_VIA_HEADER_DEPRECATED +
',other-via'),
},
body=network_unittest.HTML_BODY))
# An image via proxy with Via header and it is cached.
EVENT_IMAGE_PROXY_CACHED = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(network_unittest.IMAGE_OCL),
'Via': '1.1 ' + metrics.CHROME_PROXY_VIA_HEADER,
},
body=base64.b64encode(network_unittest.IMAGE_BODY),
base64_encoded_body=True,
served_from_cache=True))
# An image fetched directly.
EVENT_IMAGE_DIRECT = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
},
body=base64.b64encode(network_unittest.IMAGE_BODY),
base64_encoded_body=True))
# A safe-browsing malware response.
EVENT_MALWARE_PROXY = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.malware',
response_headers={
'X-Malware-Url': '1',
'Via': '1.1 ' + metrics.CHROME_PROXY_VIA_HEADER,
'Location': 'http://test.malware',
},
status=307))
# An HTML via proxy with the deprecated Via header.
EVENT_IMAGE_BYPASS = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Chrome-Proxy': 'bypass=1',
'Content-Type': 'text/html',
'Via': '1.1 ' + metrics.CHROME_PROXY_VIA_HEADER,
},
status=502))
# An image fetched directly.
EVENT_IMAGE_DIRECT = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
},
body=base64.b64encode(network_unittest.IMAGE_BODY),
base64_encoded_body=True))
class ChromeProxyMetricTest(unittest.TestCase):
_test_proxy_info = {}
def _StubGetProxyInfo(self, info):
def stub(unused_tab, unused_url=''): # pylint: disable=W0613
return ChromeProxyMetricTest._test_proxy_info
metrics.GetProxyInfoFromNetworkInternals = stub
ChromeProxyMetricTest._test_proxy_info = info
def testChromeProxyResponse(self):
# An https non-proxy response.
resp = metrics.ChromeProxyResponse(
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='https://test.url',
response_headers={
'Content-Type': 'text/html',
'Content-Length': str(len(network_unittest.HTML_BODY)),
'Via': 'some other via',
},
body=network_unittest.HTML_BODY))
self.assertFalse(resp.ShouldHaveChromeProxyViaHeader())
self.assertFalse(resp.HasChromeProxyViaHeader())
self.assertTrue(resp.IsValidByViaHeader())
# A proxied JPEG image response
resp = metrics.ChromeProxyResponse(
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
'Via': '1.1 ' + metrics.CHROME_PROXY_VIA_HEADER,
'X-Original-Content-Length': str(network_unittest.IMAGE_OCL),
},
body=base64.b64encode(network_unittest.IMAGE_BODY),
base64_encoded_body=True))
self.assertTrue(resp.ShouldHaveChromeProxyViaHeader())
self.assertTrue(resp.HasChromeProxyViaHeader())
self.assertTrue(resp.IsValidByViaHeader())
def testChromeProxyMetricForDataSaving(self):
metric = metrics.ChromeProxyMetric()
events = [
EVENT_HTML_PROXY,
EVENT_HTML_PROXY_DEPRECATED_VIA,
EVENT_IMAGE_PROXY_CACHED,
EVENT_IMAGE_DIRECT]
metric.SetEvents(events)
self.assertTrue(len(events), len(list(metric.IterResponses(None))))
results = test_page_test_results.TestPageTestResults(self)
metric.AddResultsForDataSaving(None, results)
results.AssertHasPageSpecificScalarValue('resources_via_proxy', 'count', 2)
results.AssertHasPageSpecificScalarValue('resources_from_cache', 'count', 1)
results.AssertHasPageSpecificScalarValue('resources_direct', 'count', 2)
def testChromeProxyMetricForHeaderValidation(self):
metric = metrics.ChromeProxyMetric()
metric.SetEvents([
EVENT_HTML_PROXY,
EVENT_HTML_PROXY_DEPRECATED_VIA,
EVENT_IMAGE_PROXY_CACHED,
EVENT_IMAGE_DIRECT])
results = test_page_test_results.TestPageTestResults(self)
missing_via_exception = False
try:
metric.AddResultsForHeaderValidation(None, results)
except metrics.ChromeProxyMetricException:
missing_via_exception = True
# Only the HTTP image response does not have a valid Via header.
self.assertTrue(missing_via_exception)
# Two events with valid Via headers.
metric.SetEvents([
EVENT_HTML_PROXY_DEPRECATED_VIA,
EVENT_IMAGE_PROXY_CACHED])
metric.AddResultsForHeaderValidation(None, results)
results.AssertHasPageSpecificScalarValue('checked_via_header', 'count', 2)
def testChromeProxyMetricForBypass(self):
metric = metrics.ChromeProxyMetric()
metric.SetEvents([
EVENT_HTML_PROXY,
EVENT_HTML_PROXY_DEPRECATED_VIA,
EVENT_IMAGE_PROXY_CACHED,
EVENT_IMAGE_DIRECT])
results = test_page_test_results.TestPageTestResults(self)
bypass_exception = False
try:
metric.AddResultsForBypass(None, results)
except metrics.ChromeProxyMetricException:
bypass_exception = True
# Two of the first three events have Via headers.
self.assertTrue(bypass_exception)
# Use directly fetched image only. It is treated as bypassed.
metric.SetEvents([EVENT_IMAGE_DIRECT])
metric.AddResultsForBypass(None, results)
results.AssertHasPageSpecificScalarValue('bypass', 'count', 1)
def testChromeProxyMetricForCorsBypass(self):
metric = metrics.ChromeProxyMetric()
metric.SetEvents([EVENT_HTML_PROXY_DEPRECATED_VIA,
EVENT_IMAGE_BYPASS,
EVENT_IMAGE_DIRECT])
results = test_page_test_results.TestPageTestResults(self)
metric.AddResultsForCorsBypass(None, results)
results.AssertHasPageSpecificScalarValue('cors_bypass', 'count', 1)
def testChromeProxyMetricForHTTPFallback(self):
metric = metrics.ChromeProxyMetric()
metric.SetEvents([
EVENT_HTML_PROXY,
EVENT_HTML_PROXY_DEPRECATED_VIA])
results = test_page_test_results.TestPageTestResults(self)
fallback_exception = False
info = {}
info['enabled'] = False
self._StubGetProxyInfo(info)
try:
metric.AddResultsForBypass(None, results)
except metrics.ChromeProxyMetricException:
fallback_exception = True
self.assertTrue(fallback_exception)
fallback_exception = False
info['enabled'] = True
info['proxies'] = [
'something.else.com:80',
metrics.PROXY_SETTING_DIRECT
]
self._StubGetProxyInfo(info)
try:
metric.AddResultsForBypass(None, results)
except metrics.ChromeProxyMetricException:
fallback_exception = True
self.assertTrue(fallback_exception)
info['enabled'] = True
info['proxies'] = [
metrics.PROXY_SETTING_HTTP,
metrics.PROXY_SETTING_DIRECT
]
self._StubGetProxyInfo(info)
metric.AddResultsForHTTPFallback(None, results)
def testChromeProxyMetricForSafebrowsing(self):
metric = metrics.ChromeProxyMetric()
metric.SetEvents([EVENT_MALWARE_PROXY])
results = test_page_test_results.TestPageTestResults(self)
metric.AddResultsForSafebrowsing(None, results)
results.AssertHasPageSpecificScalarValue('safebrowsing', 'boolean', True)
# Clear results and metrics to test no response for safebrowsing
results = test_page_test_results.TestPageTestResults(self)
metric.SetEvents([])
metric.AddResultsForSafebrowsing(None, results)
results.AssertHasPageSpecificScalarValue('safebrowsing', 'boolean', True)
| en | 0.868906 | # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Timeline events used in tests. # An HTML not via proxy. # An HTML via proxy with the deprecated Via header. # An image via proxy with Via header and it is cached. # An image fetched directly. # A safe-browsing malware response. # An HTML via proxy with the deprecated Via header. # An image fetched directly. # pylint: disable=W0613 # An https non-proxy response. # A proxied JPEG image response # Only the HTTP image response does not have a valid Via header. # Two events with valid Via headers. # Two of the first three events have Via headers. # Use directly fetched image only. It is treated as bypassed. # Clear results and metrics to test no response for safebrowsing | 2.051633 | 2 |
Phase5/script.py | DamianoP/AdaptiveMethods | 2 | 6631574 | import os
import sys
import subprocess
import json
import re
import IPython as ip
import pandas as pd
import matplotlib
import time
from matplotlib.patches import Rectangle
matplotlib.use('Agg')
import numpy as np
import matplotlib as mp
import seaborn as sb
import ck.kernel as ck
import matplotlib.pyplot as plt
from matplotlib import cm
from IPython.display import Image, display
from operator import itemgetter
np.set_printoptions(threshold='nan')
from joblib import load
def myPrint(string):
print(string)
newFile.write(string+"\n")
print("Please insert the name of the folder and the precisione for the experiments")
print("Inside the folder the script need the model")
print("Then the script will search for a nested folder called 'default' or 'uint8', and then inside this folder you must have the file 'data.csv' with the tensors, and the ranking file 'ranking.txt'")
if(len(sys.argv)>1):
folder =sys.argv[1]
else:
folder =raw_input("Name of folder (example dataset_DecisionTree_CV): ") #"alexnetDecisionTree"
if(len(sys.argv)>2):
precision =sys.argv[2]
else:
precision =raw_input("Insert the precision (default or uint8): ")
path =folder+"/"+precision
shapeCSV =path+"/data.csv"
rankingFile =path+"/ranking.txt"
fileName ="results" #raw_input("Name of dataset: ")
newFile =open(path+"/"+fileName+"_comparation.txt","w")
accuracy =0
tuner ="false"#raw_input("Tuner (false / true): ")#"false"
architecture ="midgard"#raw_input("Architecture (midgard / byfrost): ")#"midgard"
classifierName ="ML"#raw_input("Classifier name (for example: Decision tree): ")
images =1#int(raw_input("Image graph generation (digit 1 for yes, or 0 for not): "))#1 for generate the images, 0 for generate only the text file
modelName=folder
myPrint("loading model...")
clf = load(folder+"/"+modelName+".joblib")
myPrint("model loaded !")
#########
if(images==1):
os.system("rm -r "+path+"/img")
os.system("mkdir "+path+"/img")
#########
#PREPROCESSING
precisionText=precision
if(tuner=="false"):
tuner=0
else:
tuner=1
if (precision=="default"):
precision=0
else:
precision=1
if(architecture=="midgard"):
architecture=0
else:
architecture=1
#LOADING DATA
myPrint("Loading data in memory..")
rankings = [line.rstrip('\n') for line in open(rankingFile)]
for i in range(0,len(rankings)):
rankings[i]=eval(rankings[i])
myPrint("Ranking File loaded")
shapes = [line.rstrip('\n') for line in open(shapeCSV)]
myPrint("Shapes File loaded")
myPrint("Data loaded correctly")
predictedBest =""
predicted =0
localPredictedTime =0
globalPredictedTime =0
realBest =""
localBestTime =0
globalBestTime =0
# TIME FOR THE CALCULATION OF ALL THE DATASET
globalTimeConv =0
globalTimeDirectConv =0
globalTimeWinogradConv =0
# TIME FOR THE CALCULATION OF ALL THE SHAPE FOR EACH NETWORK
localTimeConv =0
localTimeDirectConv =0
localTimeWinogradConv =0
# TIME FOR THE CALCULATION OF ALL THE SHAPE FOR EACH NETWORK
localTimePredictedConv =0
localTimePredictedDirectConv =0
localTimePredictedWinogradConv =0
localPredConv =0
localPredDirect =0
localPredWinog =0
# TIME FOR THE CALCULATION OF ALL THE SHAPE FOR EACH NETWORK
globalTimePredictedConv =0
globalTimePredictedDirectConv =0
globalTimePredictedWinogradConv =0
globalPredConv =0
globalPredDirect =0
globalPredWinog =0
###################################################
globalBestTimeConv =0
globalBestTimedirectconv =0
globalBestTimeWinogradcon =0
globalCounterBestConv =0
globalCounterBestDirectconv =0
globalCounterBestWinogradconv =0
localBestTimeConv =0
localBestTimedirectconv =0
localBestTimeWinogradcon =0
localCounterBestConv =0
localCounterBestDirectconv =0
localCounterBestWinogradconv =0
#counter
nConv =0
nWinograd =0
nDirectConv =0
nPredicted =0
nShapes =0
nLocalPredicted =0
nLocalConv =0
nLocalDirectConv =0
nLocalWinograd =0
nCorrectPredicted =0
shapesNumber =0
localShapeCounter =0
myPrint("Preprocessing ended")
#END PREPROCESSING
currentNetwork="_empty"
ax=""
def autolabel(rects, text):
global ax
rect=rects[0]
height = rect.get_height()
ax.text(rect.get_x() + (rect.get_width()/2)-0.05, 1.01*height,text)
def generateImage(imgName,time1,time2,time3,predictedConv,predictedDirectConv,predictedWinograd,
stringPredicted,classifier,numConv,numDirect,numWinog,
numPred,numPredConv,numPredDirect,numPredWinog,
bestTimeConv,bestTimeDirect,bestTimeWinog,
nBestConv,nBestDirect,nBestWinog):
global ax,shapesNumber,localShapeCounter
imgName=str(imgName)
lcounter=0
if(imgName[0]!="["):
if(imgName!="global"):
if(localShapeCounter>1):
imgTitle=imgName+": "+str(localShapeCounter)+" convolution layers"
else:
imgTitle=imgName+": "+str(localShapeCounter)+" convolution layer"
lcounter=localShapeCounter
else:#else for the "global" images
if(shapesNumber>1):
imgTitle=imgName+": "+str(shapesNumber)+" shapes"
else:
imgTitle=imgName+": "+str(shapesNumber)+" shape"
lcounter=shapesNumber
else:
imgTitle=imgName
xLabels=[]
plt.figure(figsize=(10,10))
plt.rcParams.update({'font.size': 14})
if(time1=="null"):
time1=0
if(time2=="null"):
time2=0
if(time3=="null"):
time3=0
if(predictedConv=="null"):
predictedConv=0
if(predictedDirectConv=="null"):
predictedDirectConv=0
if(predictedWinograd=="null"):
predictedWinograd=0
if(bestTimeConv=="null"):
bestTimeConv=0
if(bestTimeDirect=="null"):
bestTimeDirect=0
if(bestTimeWinog=="null"):
bestTimeWinog=0
predictedTimeTotal=predictedConv+predictedDirectConv+predictedWinograd
bestTimeTotal=bestTimeConv+bestTimeDirect+bestTimeWinog
if(numConv!="null"):
convSTR="Conv" +"\n"+str(time1) +"\n"
if(numConv<=1):
convSTR+=str(numConv) +" layer"
else:
convSTR+=str(numConv) +" layers"
else:
convSTR="Conv" +"\n"+str(time1)
if(numDirect!="null"):
directSTR="Directconv" +"\n"+str(time2) +"\n"
if(numDirect<=1):
directSTR+=str(numDirect) +" layer"
else:
directSTR+=str(numDirect) +" layers"
else:
directSTR="Directconv"+"\n"+str(time2)
if(numWinog!="null"):
winogSTR="Winograd" +"\n"+str(time3) +"\n"
if(numWinog<=1):
winogSTR+=str(numWinog) +" layer"
else:
winogSTR+=str(numWinog) +" layers"
else:
winogSTR="Winograd" +"\n"+str(time3)
if(numPred!="null"):
predicSTR="Predicted" +"\n"+str(predictedTimeTotal) +"\n"
if(numPred<=1):
predicSTR+=str(numPred) +" layer"
else:
predicSTR+=str(numPred) +" layers"
predicSTR+="\n"+"("+str(numPredConv)+", "+str(numPredDirect)+", "+str(numPredWinog)+")"
else:
predicSTR="Predicted" +"\n"+str(predictedTimeTotal)
bestcount=nBestConv+nBestDirect+nBestWinog
bestSTR="Oracle" +"\n"+str(bestTimeTotal)+"\n"
if(bestcount<=1):
bestSTR+=str(bestcount) +" layer"
else:
bestSTR+=str(bestcount) +" layers"
bestSTR+="\n"+"("+str(nBestConv)+", "+str(nBestDirect)+", "+str(nBestWinog)+")"
ind = np.arange(5) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
#time3= 10000 # DA LEVARE ######################
b1=[]
b1.append(time1)
b1.append(0)
b1.append(0)
b1.append(predictedConv)
b1.append(bestTimeConv)
b2=[]
b2.append(0)
b2.append(time2)
b2.append(0)
b2.append(predictedDirectConv)
b2.append(bestTimeDirect)
b3=[]
b3.append(0)
b3.append(0)
b3.append(time3)
b3.append(predictedWinograd)
b3.append(bestTimeWinog)
bottomValue=np.array(b1)+np.array(b2)
p1 = plt.bar(ind, b1, width)
p2 = plt.bar(ind, b2, width,bottom=b1)
p3 = plt.bar(ind, b3, width,bottom=bottomValue)
plt.ylabel('Execution Time (microseconds)')
if(precisionText=="default"):
precisionImage="fp32"
else:
precisionImage=precisionText
folderText=""
temp=folder.split("_")
for o in range(0,len(temp)):
if(o==0):
folderText+=temp[o]
else:
folderText+=","+temp[o]
plt.title(folderText+" "+precisionImage+"\n"+imgTitle)
plt.xticks(ind, (convSTR, directSTR, winogSTR, predicSTR, bestSTR))
if(imgName[0]!="["):
if(numConv!=lcounter):
plt.gca().get_xticklabels()[0].set_color("red")
if(numDirect!=lcounter):
plt.gca().get_xticklabels()[1].set_color("red")
if(numWinog!=lcounter):
plt.gca().get_xticklabels()[2].set_color("red")
if(numPred!=lcounter):
plt.gca().get_xticklabels()[3].set_color("red")
if(bestcount!=lcounter):
plt.gca().get_xticklabels()[4].set_color("red")
if(time1==0):
plt.gca().get_xticklabels()[0].set_color("red")
if(time2==0):
plt.gca().get_xticklabels()[1].set_color("red")
if(time3==0):
plt.gca().get_xticklabels()[2].set_color("red")
if(predictedTimeTotal==0):
plt.gca().get_xticklabels()[3].set_color("red")
if(bestTimeTotal==0):
plt.gca().get_xticklabels()[4].set_color("red")
extra = Rectangle((0, 0), 1, 1, fc="w", fill=False, edgecolor='none', linewidth=0)
plt.legend((p1[0], p2[0], p3[0],extra), ('Conv', 'Directconv','Winograd',"Red = Failure"),loc='upper center', bbox_to_anchor=(0.96,1.167), fancybox=True, shadow=True,ncol=1)
plt.savefig(path+"/img/"+imgName+".png", format='png')
plt.cla()
plt.clf()
plt.close('all')
def middleReport():
global localTimeConv,localTimeDirectConv,localTimeWinogradConv,localBestTime,predictedBest,localPredictedTime,classifierName,currentNetwork,images,nLocalConv,nLocalPredicted,nLocalWinograd,nLocalDirectConv,localTimePredictedConv,localTimePredictedDirectConv,localTimePredictedWinogradConv,localPredConv,localPredDirect,localPredWinog,localCounterBestConv,localCounterBestDirectconv,localCounterBestWinogradconv,localBestTimeConv,localBestTimedirectconv,localBestTimeWinogradcon,localShapeCounter
myPrint("Results:")
myPrint("manual Conv time:" +str(localTimeConv))
myPrint("manual Directconv time:" +str(localTimeDirectConv))
myPrint("manual Winogradconv time:" +str(localTimeWinogradConv))
myPrint("best possible time:" +str(localBestTime))
myPrint("predicted time for the network:" +str(localPredictedTime))
myPrint("-----------------------------------------------------------")
myPrint("\n \n \n")
if (images==1):
generateImage(currentNetwork,localTimeConv,localTimeDirectConv,localTimeWinogradConv,
localTimePredictedConv,localTimePredictedDirectConv,localTimePredictedWinogradConv,"",
classifierName,nLocalConv,nLocalDirectConv,nLocalWinograd,nLocalPredicted,localPredConv,localPredDirect,localPredWinog,
localBestTimeConv,localBestTimedirectconv,localBestTimeWinogradcon,localCounterBestConv,localCounterBestDirectconv,localCounterBestWinogradconv) # image for the shape
localTimeConv =0
localTimeWinogradConv =0
localTimeDirectConv =0
localBestTime =0
localPredictedTime =0
predictedBest =""
nLocalPredicted =0
nLocalConv =0
nLocalDirectConv =0
nLocalWinograd =0
localPredConv =0
localPredDirect =0
localPredWinog =0
localTimePredictedConv =0
localTimePredictedDirectConv =0
localTimePredictedWinogradConv =0
localCounterBestConv=0
localCounterBestDirectconv=0
localCounterBestWinogradconv=0
localBestTimeConv=0
localBestTimedirectconv=0
localBestTimeWinogradcon=0
localShapeCounter=0
##################
#MAIN
##################
shapesNumber=len(shapes)-1
for i in range(1,len(shapes)):
shape=shapes[i].split(",")
if(len(shape[0])==0 and len(shape[1])==0 and len(shape[2])==0 ): #"skipping case : ',,,,,,,,' "
continue
if(len(shape[0])>0 and currentNetwork=="_empty"):
currentNetwork=shape[0]
myPrint("Analyzing "+shape[0])
if(len(shape[0])>0 and i>1 and currentNetwork!=shape[0]):
middleReport()
currentNetwork=shape[0]
myPrint("Analyzing "+shape[0])
nShapes +=1
workingShape =shape[3]+"-"+shape[1]+"-"+shape[2]+"-"+shape[4]+"-"+shape[5]+"-"+shape[6]+"-"+shape[7]
workingShapeARFF =shape[3]+","+shape[1]+","+shape[2]+","+shape[4]+","+shape[5]+","+shape[6]+","+shape[7]+","+str(tuner)+","+str(precision)+","+str(architecture)
print workingShapeARFF
workingShapeARFF =eval("[["+workingShapeARFF+"]]")
predictedBest =clf.predict(workingShapeARFF)[0]
convTime ="null"
directconvTime ="null"
winogradconvTime ="null"
predictedTimeShape ="null"
finded =False
localShapeCounter +=1
bestShapeConv =0
bestShapeDirect =0
bestShapeWinog =0
cB =0
dB =0
wB =0
for j in range(0,len(rankings)):
if(rankings[j][0][0]==workingShape):
timeList=rankings[j][1]
for h in range(0,len(rankings[j][1])):
if(rankings[j][1][h][0][0]==predictedBest):
predictedTimeShape =float(rankings[j][1][h][1][0])
localPredictedTime +=float(rankings[j][1][h][1][0])
globalPredictedTime +=float(rankings[j][1][h][1][0])
nPredicted +=1
nLocalPredicted +=1
if(rankings[j][1][h][0][0]=="conv"):
localTimePredictedConv +=float(rankings[j][1][h][1][0])
globalTimePredictedConv +=float(rankings[j][1][h][1][0])
globalPredConv +=1
localPredConv +=1
if(rankings[j][1][h][0][0]=="winogradconv"):
localTimePredictedWinogradConv +=float(rankings[j][1][h][1][0])
globalTimePredictedWinogradConv +=float(rankings[j][1][h][1][0])
globalPredWinog +=1
localPredWinog +=1
if(rankings[j][1][h][0][0]=="directconv"):
localTimePredictedDirectConv +=float(rankings[j][1][h][1][0])
globalTimePredictedDirectConv +=float(rankings[j][1][h][1][0])
globalPredDirect +=1
localPredDirect +=1
if(h==0):
realBest =rankings[j][1][h][0][0]
localBestTime +=float(rankings[j][1][h][1][0])
globalBestTime +=float(rankings[j][1][h][1][0])
# QUI
if(realBest=="conv"):
globalBestTimeConv +=float(rankings[j][1][h][1][0])
localBestTimeConv +=float(rankings[j][1][h][1][0])
globalCounterBestConv +=1
localCounterBestConv +=1
bestShapeConv =float(rankings[j][1][h][1][0])
cB =1
elif(realBest=="directconv"):
globalBestTimedirectconv +=float(rankings[j][1][h][1][0])
localBestTimedirectconv +=float(rankings[j][1][h][1][0])
globalCounterBestDirectconv +=1
localCounterBestDirectconv +=1
bestShapeDirect =float(rankings[j][1][h][1][0])
dB =1
elif(realBest=="winogradconv"):
globalBestTimeWinogradcon +=float(rankings[j][1][h][1][0])
localBestTimeWinogradcon +=float(rankings[j][1][h][1][0])
globalCounterBestWinogradconv +=1
localCounterBestWinogradconv +=1
bestShapeWinog =float(rankings[j][1][h][1][0])
wB =1
if(rankings[j][1][h][0][0]=="conv"):
convTime =float(rankings[j][1][h][1][0])
localTimeConv +=float(rankings[j][1][h][1][0])
globalTimeConv +=float(rankings[j][1][h][1][0])
nConv +=1
nLocalConv +=1
finded=True
elif (rankings[j][1][h][0][0]=="winogradconv"):
winogradconvTime =float(rankings[j][1][h][1][0])
localTimeWinogradConv +=float(rankings[j][1][h][1][0])
globalTimeWinogradConv +=float(rankings[j][1][h][1][0])
nWinograd +=1
nLocalWinograd +=1
finded=True
elif (rankings[j][1][h][0][0]=="directconv"):
directconvTime =float(rankings[j][1][h][1][0])
localTimeDirectConv +=float(rankings[j][1][h][1][0])
globalTimeDirectConv +=float(rankings[j][1][h][1][0])
nDirectConv +=1
nLocalDirectConv +=1
finded=True
else:
continue
#rankings.remove(rankings[j])
break
if(finded==False):
myPrint(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
myPrint("Analyzing: "+str(workingShape))
myPrint("manual conv time: " +str(convTime))
myPrint("manual directconv time: " +str(directconvTime))
myPrint("manual winogradconvTime time: "+str(winogradconvTime))
myPrint("predicted best: " +str(predictedBest))
myPrint("\n")
cPred=0
wPred=0
dPred=0
x=0
y=0
z=0
if(predictedBest=="conv"):
cPred=convTime
x=1
if(predictedBest=="directconv"):
dPred=directconvTime
y=1
if(predictedBest=="winogradconv"):
wPred=winogradconvTime
z=1
h=x+y+z
if (images==1):
generateImage(workingShapeARFF,convTime,directconvTime,winogradconvTime,cPred,dPred,wPred,"",classifierName,"null","null","null",h,x,y,z,bestShapeConv,bestShapeDirect,bestShapeWinog,cB,dB,wB) # image for the shape
if(predictedBest==realBest):
nCorrectPredicted+=1
# SHAPE + time 1, time 2, time 3, time predicted, predicted method as string
middleReport() #last shape
staticMethod=[]
if(nConv==nShapes):
staticMethod.append([globalTimeConv,"Conv"])
if(nDirectConv==nShapes):
staticMethod.append([globalTimeDirectConv,"Directconv"])
if(nWinograd==nShapes):
staticMethod.append([globalTimeWinogradConv,"Winograd"])
if(len(staticMethod)==0):
speedUp="+inf"
staticMethod.append(["0","none"])
else:
staticMethod.sort(key=itemgetter(0))
speedUp= round(((float(staticMethod[0][0])/float(globalPredictedTime))*float(100))-100,2)
if(speedUp>=0):
speedUp="+"+str(speedUp)
accuracy=round(float(nCorrectPredicted)/float(nShapes)*float(100),2)
myPrint("\n")
myPrint("\n")
myPrint("-----------------------------------------------------------")
myPrint("\n")
myPrint("Final Report:")
myPrint("If you run all the dataset you will get this time:")
myPrint("Manual with conv:" +str(globalTimeConv) + " | " + str(nConv) +" experiments successfully achieved on "+str(nShapes) )
myPrint("Manual with directconv:" +str(globalTimeDirectConv) + " | " + str(nDirectConv) +" experiments successfully achieved on "+str(nShapes) )
myPrint("Manual with winogradconv:" +str(globalTimeWinogradConv)+ " | " + str(nWinograd) +" experiments successfully achieved on "+str(nShapes) )
myPrint("With dynamic prediction of the algorithm:" +str(globalPredictedTime) + " | " + str(nPredicted) +" experiments successfully achieved on "+str(nShapes) )
myPrint("Best possible time:" +str(globalBestTime) )
myPrint("Best static method:" +str(staticMethod[0][0]) + " | with "+str(staticMethod[0][1]))
myPrint("Accuracy:"+str(accuracy)+"%")
myPrint("SpeedUp:" +str(speedUp)+"%")
myPrint(time.strftime("%d/%m/%Y %H:%M:%S"))
if (images==1):
generateImage("global",globalTimeConv,globalTimeDirectConv,globalTimeWinogradConv,globalTimePredictedConv,globalTimePredictedDirectConv,globalTimePredictedWinogradConv,"",classifierName,nConv,nDirectConv,nWinograd,nPredicted,globalPredConv,globalPredDirect,globalPredWinog, globalBestTimeConv,globalBestTimedirectconv,globalBestTimeWinogradcon,globalCounterBestConv,globalCounterBestDirectconv,globalCounterBestWinogradconv)
myPrint("\n")
myPrint("Done!")
newFile.close()
if(len(sys.argv)>3):
subprocess.Popen(["python","script.py",folder,sys.argv[3]])
else:
sys.exit()
| import os
import sys
import subprocess
import json
import re
import IPython as ip
import pandas as pd
import matplotlib
import time
from matplotlib.patches import Rectangle
matplotlib.use('Agg')
import numpy as np
import matplotlib as mp
import seaborn as sb
import ck.kernel as ck
import matplotlib.pyplot as plt
from matplotlib import cm
from IPython.display import Image, display
from operator import itemgetter
np.set_printoptions(threshold='nan')
from joblib import load
def myPrint(string):
print(string)
newFile.write(string+"\n")
print("Please insert the name of the folder and the precisione for the experiments")
print("Inside the folder the script need the model")
print("Then the script will search for a nested folder called 'default' or 'uint8', and then inside this folder you must have the file 'data.csv' with the tensors, and the ranking file 'ranking.txt'")
if(len(sys.argv)>1):
folder =sys.argv[1]
else:
folder =raw_input("Name of folder (example dataset_DecisionTree_CV): ") #"alexnetDecisionTree"
if(len(sys.argv)>2):
precision =sys.argv[2]
else:
precision =raw_input("Insert the precision (default or uint8): ")
path =folder+"/"+precision
shapeCSV =path+"/data.csv"
rankingFile =path+"/ranking.txt"
fileName ="results" #raw_input("Name of dataset: ")
newFile =open(path+"/"+fileName+"_comparation.txt","w")
accuracy =0
tuner ="false"#raw_input("Tuner (false / true): ")#"false"
architecture ="midgard"#raw_input("Architecture (midgard / byfrost): ")#"midgard"
classifierName ="ML"#raw_input("Classifier name (for example: Decision tree): ")
images =1#int(raw_input("Image graph generation (digit 1 for yes, or 0 for not): "))#1 for generate the images, 0 for generate only the text file
modelName=folder
myPrint("loading model...")
clf = load(folder+"/"+modelName+".joblib")
myPrint("model loaded !")
#########
if(images==1):
os.system("rm -r "+path+"/img")
os.system("mkdir "+path+"/img")
#########
#PREPROCESSING
precisionText=precision
if(tuner=="false"):
tuner=0
else:
tuner=1
if (precision=="default"):
precision=0
else:
precision=1
if(architecture=="midgard"):
architecture=0
else:
architecture=1
#LOADING DATA
myPrint("Loading data in memory..")
rankings = [line.rstrip('\n') for line in open(rankingFile)]
for i in range(0,len(rankings)):
rankings[i]=eval(rankings[i])
myPrint("Ranking File loaded")
shapes = [line.rstrip('\n') for line in open(shapeCSV)]
myPrint("Shapes File loaded")
myPrint("Data loaded correctly")
predictedBest =""
predicted =0
localPredictedTime =0
globalPredictedTime =0
realBest =""
localBestTime =0
globalBestTime =0
# TIME FOR THE CALCULATION OF ALL THE DATASET
globalTimeConv =0
globalTimeDirectConv =0
globalTimeWinogradConv =0
# TIME FOR THE CALCULATION OF ALL THE SHAPE FOR EACH NETWORK
localTimeConv =0
localTimeDirectConv =0
localTimeWinogradConv =0
# TIME FOR THE CALCULATION OF ALL THE SHAPE FOR EACH NETWORK
localTimePredictedConv =0
localTimePredictedDirectConv =0
localTimePredictedWinogradConv =0
localPredConv =0
localPredDirect =0
localPredWinog =0
# TIME FOR THE CALCULATION OF ALL THE SHAPE FOR EACH NETWORK
globalTimePredictedConv =0
globalTimePredictedDirectConv =0
globalTimePredictedWinogradConv =0
globalPredConv =0
globalPredDirect =0
globalPredWinog =0
###################################################
globalBestTimeConv =0
globalBestTimedirectconv =0
globalBestTimeWinogradcon =0
globalCounterBestConv =0
globalCounterBestDirectconv =0
globalCounterBestWinogradconv =0
localBestTimeConv =0
localBestTimedirectconv =0
localBestTimeWinogradcon =0
localCounterBestConv =0
localCounterBestDirectconv =0
localCounterBestWinogradconv =0
#counter
nConv =0
nWinograd =0
nDirectConv =0
nPredicted =0
nShapes =0
nLocalPredicted =0
nLocalConv =0
nLocalDirectConv =0
nLocalWinograd =0
nCorrectPredicted =0
shapesNumber =0
localShapeCounter =0
myPrint("Preprocessing ended")
#END PREPROCESSING
currentNetwork="_empty"
ax=""
def autolabel(rects, text):
global ax
rect=rects[0]
height = rect.get_height()
ax.text(rect.get_x() + (rect.get_width()/2)-0.05, 1.01*height,text)
def generateImage(imgName,time1,time2,time3,predictedConv,predictedDirectConv,predictedWinograd,
stringPredicted,classifier,numConv,numDirect,numWinog,
numPred,numPredConv,numPredDirect,numPredWinog,
bestTimeConv,bestTimeDirect,bestTimeWinog,
nBestConv,nBestDirect,nBestWinog):
global ax,shapesNumber,localShapeCounter
imgName=str(imgName)
lcounter=0
if(imgName[0]!="["):
if(imgName!="global"):
if(localShapeCounter>1):
imgTitle=imgName+": "+str(localShapeCounter)+" convolution layers"
else:
imgTitle=imgName+": "+str(localShapeCounter)+" convolution layer"
lcounter=localShapeCounter
else:#else for the "global" images
if(shapesNumber>1):
imgTitle=imgName+": "+str(shapesNumber)+" shapes"
else:
imgTitle=imgName+": "+str(shapesNumber)+" shape"
lcounter=shapesNumber
else:
imgTitle=imgName
xLabels=[]
plt.figure(figsize=(10,10))
plt.rcParams.update({'font.size': 14})
if(time1=="null"):
time1=0
if(time2=="null"):
time2=0
if(time3=="null"):
time3=0
if(predictedConv=="null"):
predictedConv=0
if(predictedDirectConv=="null"):
predictedDirectConv=0
if(predictedWinograd=="null"):
predictedWinograd=0
if(bestTimeConv=="null"):
bestTimeConv=0
if(bestTimeDirect=="null"):
bestTimeDirect=0
if(bestTimeWinog=="null"):
bestTimeWinog=0
predictedTimeTotal=predictedConv+predictedDirectConv+predictedWinograd
bestTimeTotal=bestTimeConv+bestTimeDirect+bestTimeWinog
if(numConv!="null"):
convSTR="Conv" +"\n"+str(time1) +"\n"
if(numConv<=1):
convSTR+=str(numConv) +" layer"
else:
convSTR+=str(numConv) +" layers"
else:
convSTR="Conv" +"\n"+str(time1)
if(numDirect!="null"):
directSTR="Directconv" +"\n"+str(time2) +"\n"
if(numDirect<=1):
directSTR+=str(numDirect) +" layer"
else:
directSTR+=str(numDirect) +" layers"
else:
directSTR="Directconv"+"\n"+str(time2)
if(numWinog!="null"):
winogSTR="Winograd" +"\n"+str(time3) +"\n"
if(numWinog<=1):
winogSTR+=str(numWinog) +" layer"
else:
winogSTR+=str(numWinog) +" layers"
else:
winogSTR="Winograd" +"\n"+str(time3)
if(numPred!="null"):
predicSTR="Predicted" +"\n"+str(predictedTimeTotal) +"\n"
if(numPred<=1):
predicSTR+=str(numPred) +" layer"
else:
predicSTR+=str(numPred) +" layers"
predicSTR+="\n"+"("+str(numPredConv)+", "+str(numPredDirect)+", "+str(numPredWinog)+")"
else:
predicSTR="Predicted" +"\n"+str(predictedTimeTotal)
bestcount=nBestConv+nBestDirect+nBestWinog
bestSTR="Oracle" +"\n"+str(bestTimeTotal)+"\n"
if(bestcount<=1):
bestSTR+=str(bestcount) +" layer"
else:
bestSTR+=str(bestcount) +" layers"
bestSTR+="\n"+"("+str(nBestConv)+", "+str(nBestDirect)+", "+str(nBestWinog)+")"
ind = np.arange(5) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
#time3= 10000 # DA LEVARE ######################
b1=[]
b1.append(time1)
b1.append(0)
b1.append(0)
b1.append(predictedConv)
b1.append(bestTimeConv)
b2=[]
b2.append(0)
b2.append(time2)
b2.append(0)
b2.append(predictedDirectConv)
b2.append(bestTimeDirect)
b3=[]
b3.append(0)
b3.append(0)
b3.append(time3)
b3.append(predictedWinograd)
b3.append(bestTimeWinog)
bottomValue=np.array(b1)+np.array(b2)
p1 = plt.bar(ind, b1, width)
p2 = plt.bar(ind, b2, width,bottom=b1)
p3 = plt.bar(ind, b3, width,bottom=bottomValue)
plt.ylabel('Execution Time (microseconds)')
if(precisionText=="default"):
precisionImage="fp32"
else:
precisionImage=precisionText
folderText=""
temp=folder.split("_")
for o in range(0,len(temp)):
if(o==0):
folderText+=temp[o]
else:
folderText+=","+temp[o]
plt.title(folderText+" "+precisionImage+"\n"+imgTitle)
plt.xticks(ind, (convSTR, directSTR, winogSTR, predicSTR, bestSTR))
if(imgName[0]!="["):
if(numConv!=lcounter):
plt.gca().get_xticklabels()[0].set_color("red")
if(numDirect!=lcounter):
plt.gca().get_xticklabels()[1].set_color("red")
if(numWinog!=lcounter):
plt.gca().get_xticklabels()[2].set_color("red")
if(numPred!=lcounter):
plt.gca().get_xticklabels()[3].set_color("red")
if(bestcount!=lcounter):
plt.gca().get_xticklabels()[4].set_color("red")
if(time1==0):
plt.gca().get_xticklabels()[0].set_color("red")
if(time2==0):
plt.gca().get_xticklabels()[1].set_color("red")
if(time3==0):
plt.gca().get_xticklabels()[2].set_color("red")
if(predictedTimeTotal==0):
plt.gca().get_xticklabels()[3].set_color("red")
if(bestTimeTotal==0):
plt.gca().get_xticklabels()[4].set_color("red")
extra = Rectangle((0, 0), 1, 1, fc="w", fill=False, edgecolor='none', linewidth=0)
plt.legend((p1[0], p2[0], p3[0],extra), ('Conv', 'Directconv','Winograd',"Red = Failure"),loc='upper center', bbox_to_anchor=(0.96,1.167), fancybox=True, shadow=True,ncol=1)
plt.savefig(path+"/img/"+imgName+".png", format='png')
plt.cla()
plt.clf()
plt.close('all')
def middleReport():
global localTimeConv,localTimeDirectConv,localTimeWinogradConv,localBestTime,predictedBest,localPredictedTime,classifierName,currentNetwork,images,nLocalConv,nLocalPredicted,nLocalWinograd,nLocalDirectConv,localTimePredictedConv,localTimePredictedDirectConv,localTimePredictedWinogradConv,localPredConv,localPredDirect,localPredWinog,localCounterBestConv,localCounterBestDirectconv,localCounterBestWinogradconv,localBestTimeConv,localBestTimedirectconv,localBestTimeWinogradcon,localShapeCounter
myPrint("Results:")
myPrint("manual Conv time:" +str(localTimeConv))
myPrint("manual Directconv time:" +str(localTimeDirectConv))
myPrint("manual Winogradconv time:" +str(localTimeWinogradConv))
myPrint("best possible time:" +str(localBestTime))
myPrint("predicted time for the network:" +str(localPredictedTime))
myPrint("-----------------------------------------------------------")
myPrint("\n \n \n")
if (images==1):
generateImage(currentNetwork,localTimeConv,localTimeDirectConv,localTimeWinogradConv,
localTimePredictedConv,localTimePredictedDirectConv,localTimePredictedWinogradConv,"",
classifierName,nLocalConv,nLocalDirectConv,nLocalWinograd,nLocalPredicted,localPredConv,localPredDirect,localPredWinog,
localBestTimeConv,localBestTimedirectconv,localBestTimeWinogradcon,localCounterBestConv,localCounterBestDirectconv,localCounterBestWinogradconv) # image for the shape
localTimeConv =0
localTimeWinogradConv =0
localTimeDirectConv =0
localBestTime =0
localPredictedTime =0
predictedBest =""
nLocalPredicted =0
nLocalConv =0
nLocalDirectConv =0
nLocalWinograd =0
localPredConv =0
localPredDirect =0
localPredWinog =0
localTimePredictedConv =0
localTimePredictedDirectConv =0
localTimePredictedWinogradConv =0
localCounterBestConv=0
localCounterBestDirectconv=0
localCounterBestWinogradconv=0
localBestTimeConv=0
localBestTimedirectconv=0
localBestTimeWinogradcon=0
localShapeCounter=0
##################
#MAIN
##################
shapesNumber=len(shapes)-1
for i in range(1,len(shapes)):
shape=shapes[i].split(",")
if(len(shape[0])==0 and len(shape[1])==0 and len(shape[2])==0 ): #"skipping case : ',,,,,,,,' "
continue
if(len(shape[0])>0 and currentNetwork=="_empty"):
currentNetwork=shape[0]
myPrint("Analyzing "+shape[0])
if(len(shape[0])>0 and i>1 and currentNetwork!=shape[0]):
middleReport()
currentNetwork=shape[0]
myPrint("Analyzing "+shape[0])
nShapes +=1
workingShape =shape[3]+"-"+shape[1]+"-"+shape[2]+"-"+shape[4]+"-"+shape[5]+"-"+shape[6]+"-"+shape[7]
workingShapeARFF =shape[3]+","+shape[1]+","+shape[2]+","+shape[4]+","+shape[5]+","+shape[6]+","+shape[7]+","+str(tuner)+","+str(precision)+","+str(architecture)
print workingShapeARFF
workingShapeARFF =eval("[["+workingShapeARFF+"]]")
predictedBest =clf.predict(workingShapeARFF)[0]
convTime ="null"
directconvTime ="null"
winogradconvTime ="null"
predictedTimeShape ="null"
finded =False
localShapeCounter +=1
bestShapeConv =0
bestShapeDirect =0
bestShapeWinog =0
cB =0
dB =0
wB =0
for j in range(0,len(rankings)):
if(rankings[j][0][0]==workingShape):
timeList=rankings[j][1]
for h in range(0,len(rankings[j][1])):
if(rankings[j][1][h][0][0]==predictedBest):
predictedTimeShape =float(rankings[j][1][h][1][0])
localPredictedTime +=float(rankings[j][1][h][1][0])
globalPredictedTime +=float(rankings[j][1][h][1][0])
nPredicted +=1
nLocalPredicted +=1
if(rankings[j][1][h][0][0]=="conv"):
localTimePredictedConv +=float(rankings[j][1][h][1][0])
globalTimePredictedConv +=float(rankings[j][1][h][1][0])
globalPredConv +=1
localPredConv +=1
if(rankings[j][1][h][0][0]=="winogradconv"):
localTimePredictedWinogradConv +=float(rankings[j][1][h][1][0])
globalTimePredictedWinogradConv +=float(rankings[j][1][h][1][0])
globalPredWinog +=1
localPredWinog +=1
if(rankings[j][1][h][0][0]=="directconv"):
localTimePredictedDirectConv +=float(rankings[j][1][h][1][0])
globalTimePredictedDirectConv +=float(rankings[j][1][h][1][0])
globalPredDirect +=1
localPredDirect +=1
if(h==0):
realBest =rankings[j][1][h][0][0]
localBestTime +=float(rankings[j][1][h][1][0])
globalBestTime +=float(rankings[j][1][h][1][0])
# QUI
if(realBest=="conv"):
globalBestTimeConv +=float(rankings[j][1][h][1][0])
localBestTimeConv +=float(rankings[j][1][h][1][0])
globalCounterBestConv +=1
localCounterBestConv +=1
bestShapeConv =float(rankings[j][1][h][1][0])
cB =1
elif(realBest=="directconv"):
globalBestTimedirectconv +=float(rankings[j][1][h][1][0])
localBestTimedirectconv +=float(rankings[j][1][h][1][0])
globalCounterBestDirectconv +=1
localCounterBestDirectconv +=1
bestShapeDirect =float(rankings[j][1][h][1][0])
dB =1
elif(realBest=="winogradconv"):
globalBestTimeWinogradcon +=float(rankings[j][1][h][1][0])
localBestTimeWinogradcon +=float(rankings[j][1][h][1][0])
globalCounterBestWinogradconv +=1
localCounterBestWinogradconv +=1
bestShapeWinog =float(rankings[j][1][h][1][0])
wB =1
if(rankings[j][1][h][0][0]=="conv"):
convTime =float(rankings[j][1][h][1][0])
localTimeConv +=float(rankings[j][1][h][1][0])
globalTimeConv +=float(rankings[j][1][h][1][0])
nConv +=1
nLocalConv +=1
finded=True
elif (rankings[j][1][h][0][0]=="winogradconv"):
winogradconvTime =float(rankings[j][1][h][1][0])
localTimeWinogradConv +=float(rankings[j][1][h][1][0])
globalTimeWinogradConv +=float(rankings[j][1][h][1][0])
nWinograd +=1
nLocalWinograd +=1
finded=True
elif (rankings[j][1][h][0][0]=="directconv"):
directconvTime =float(rankings[j][1][h][1][0])
localTimeDirectConv +=float(rankings[j][1][h][1][0])
globalTimeDirectConv +=float(rankings[j][1][h][1][0])
nDirectConv +=1
nLocalDirectConv +=1
finded=True
else:
continue
#rankings.remove(rankings[j])
break
if(finded==False):
myPrint(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
myPrint("Analyzing: "+str(workingShape))
myPrint("manual conv time: " +str(convTime))
myPrint("manual directconv time: " +str(directconvTime))
myPrint("manual winogradconvTime time: "+str(winogradconvTime))
myPrint("predicted best: " +str(predictedBest))
myPrint("\n")
cPred=0
wPred=0
dPred=0
x=0
y=0
z=0
if(predictedBest=="conv"):
cPred=convTime
x=1
if(predictedBest=="directconv"):
dPred=directconvTime
y=1
if(predictedBest=="winogradconv"):
wPred=winogradconvTime
z=1
h=x+y+z
if (images==1):
generateImage(workingShapeARFF,convTime,directconvTime,winogradconvTime,cPred,dPred,wPred,"",classifierName,"null","null","null",h,x,y,z,bestShapeConv,bestShapeDirect,bestShapeWinog,cB,dB,wB) # image for the shape
if(predictedBest==realBest):
nCorrectPredicted+=1
# SHAPE + time 1, time 2, time 3, time predicted, predicted method as string
middleReport() #last shape
staticMethod=[]
if(nConv==nShapes):
staticMethod.append([globalTimeConv,"Conv"])
if(nDirectConv==nShapes):
staticMethod.append([globalTimeDirectConv,"Directconv"])
if(nWinograd==nShapes):
staticMethod.append([globalTimeWinogradConv,"Winograd"])
if(len(staticMethod)==0):
speedUp="+inf"
staticMethod.append(["0","none"])
else:
staticMethod.sort(key=itemgetter(0))
speedUp= round(((float(staticMethod[0][0])/float(globalPredictedTime))*float(100))-100,2)
if(speedUp>=0):
speedUp="+"+str(speedUp)
accuracy=round(float(nCorrectPredicted)/float(nShapes)*float(100),2)
myPrint("\n")
myPrint("\n")
myPrint("-----------------------------------------------------------")
myPrint("\n")
myPrint("Final Report:")
myPrint("If you run all the dataset you will get this time:")
myPrint("Manual with conv:" +str(globalTimeConv) + " | " + str(nConv) +" experiments successfully achieved on "+str(nShapes) )
myPrint("Manual with directconv:" +str(globalTimeDirectConv) + " | " + str(nDirectConv) +" experiments successfully achieved on "+str(nShapes) )
myPrint("Manual with winogradconv:" +str(globalTimeWinogradConv)+ " | " + str(nWinograd) +" experiments successfully achieved on "+str(nShapes) )
myPrint("With dynamic prediction of the algorithm:" +str(globalPredictedTime) + " | " + str(nPredicted) +" experiments successfully achieved on "+str(nShapes) )
myPrint("Best possible time:" +str(globalBestTime) )
myPrint("Best static method:" +str(staticMethod[0][0]) + " | with "+str(staticMethod[0][1]))
myPrint("Accuracy:"+str(accuracy)+"%")
myPrint("SpeedUp:" +str(speedUp)+"%")
myPrint(time.strftime("%d/%m/%Y %H:%M:%S"))
if (images==1):
generateImage("global",globalTimeConv,globalTimeDirectConv,globalTimeWinogradConv,globalTimePredictedConv,globalTimePredictedDirectConv,globalTimePredictedWinogradConv,"",classifierName,nConv,nDirectConv,nWinograd,nPredicted,globalPredConv,globalPredDirect,globalPredWinog, globalBestTimeConv,globalBestTimedirectconv,globalBestTimeWinogradcon,globalCounterBestConv,globalCounterBestDirectconv,globalCounterBestWinogradconv)
myPrint("\n")
myPrint("Done!")
newFile.close()
if(len(sys.argv)>3):
subprocess.Popen(["python","script.py",folder,sys.argv[3]])
else:
sys.exit()
| en | 0.384333 | #"alexnetDecisionTree" #raw_input("Name of dataset: ") #"false" #"midgard" #int(raw_input("Image graph generation (digit 1 for yes, or 0 for not): "))#1 for generate the images, 0 for generate only the text file ######### ######### #PREPROCESSING #LOADING DATA # TIME FOR THE CALCULATION OF ALL THE DATASET # TIME FOR THE CALCULATION OF ALL THE SHAPE FOR EACH NETWORK # TIME FOR THE CALCULATION OF ALL THE SHAPE FOR EACH NETWORK # TIME FOR THE CALCULATION OF ALL THE SHAPE FOR EACH NETWORK ################################################### #counter #END PREPROCESSING #else for the "global" images # the x locations for the groups # the width of the bars: can also be len(x) sequence #time3= 10000 # DA LEVARE ###################### # image for the shape ################## #MAIN ################## #"skipping case : ',,,,,,,,' " # QUI #rankings.remove(rankings[j]) # image for the shape # SHAPE + time 1, time 2, time 3, time predicted, predicted method as string #last shape | 2.530672 | 3 |
replay/management/commands/record.py | Liliai/django-replay | 1 | 6631575 | """Record Management Command
TODO
* Add support for "runserver" options.
"""
import itertools
from django.core.management import call_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
can_import_settings = True
def handle(self, *args, **options):
# pylint: disable=import-outside-toplevel
from django.conf import settings
iterator = itertools.chain(
('replay.middleware.RecorderMiddleware',),
settings.MIDDLEWARE,
)
settings.MIDDLEWARE = tuple(iterator)
call_command('runserver', *args, **options)
| """Record Management Command
TODO
* Add support for "runserver" options.
"""
import itertools
from django.core.management import call_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
can_import_settings = True
def handle(self, *args, **options):
# pylint: disable=import-outside-toplevel
from django.conf import settings
iterator = itertools.chain(
('replay.middleware.RecorderMiddleware',),
settings.MIDDLEWARE,
)
settings.MIDDLEWARE = tuple(iterator)
call_command('runserver', *args, **options)
| en | 0.609655 | Record Management Command TODO * Add support for "runserver" options. # pylint: disable=import-outside-toplevel | 2.069059 | 2 |
predictions/utils/future.py | SimonMolinsky/pygda-hydra | 1 | 6631576 | <filename>predictions/utils/future.py
import pandas as pd
def set_future_series(forecasted_values, series_name, last_date, steps_ahead, frequency):
"""
Function sets future predictions.
:param forecasted_values: array of predictions,
:param series_name: name of the forecasted series,
:param last_date: the last observation time,
:param steps_ahead: how many steps ahead of predictions,
:param frequency: frequency of time steps.
:return: Series with predicted values and time index.
"""
new_index = pd.date_range(start=last_date,
periods=steps_ahead + 1,
freq=frequency)
new_index = new_index[1:]
y_pred = pd.Series(forecasted_values, index=new_index)
y_pred.name = series_name
y_pred.index.freq = frequency
return y_pred
| <filename>predictions/utils/future.py
import pandas as pd
def set_future_series(forecasted_values, series_name, last_date, steps_ahead, frequency):
"""
Function sets future predictions.
:param forecasted_values: array of predictions,
:param series_name: name of the forecasted series,
:param last_date: the last observation time,
:param steps_ahead: how many steps ahead of predictions,
:param frequency: frequency of time steps.
:return: Series with predicted values and time index.
"""
new_index = pd.date_range(start=last_date,
periods=steps_ahead + 1,
freq=frequency)
new_index = new_index[1:]
y_pred = pd.Series(forecasted_values, index=new_index)
y_pred.name = series_name
y_pred.index.freq = frequency
return y_pred
| en | 0.805858 | Function sets future predictions. :param forecasted_values: array of predictions, :param series_name: name of the forecasted series, :param last_date: the last observation time, :param steps_ahead: how many steps ahead of predictions, :param frequency: frequency of time steps. :return: Series with predicted values and time index. | 3.465598 | 3 |
src/assets/spanish/year7.py | pz325/one-page-a-day | 0 | 6631577 | # -*- coding: utf-8 -*-
# pylint: disable=C0301
'''
contents of Zhenglin's Spanish year 7
'''
ZHENGLIN_YEAR7 = {
'title': 'Spanish Year 7',
'user': 'Zhenglin',
'template': 'table',
'subject': 'spanish',
'sections':[
{
'title': 'Numbers 1 - 10',
'exercises': [
('uno', '1'),
('dos', '2'),
('tres', '3'),
('cuatro', '4'),
('cinco', '5'),
('seis', '6'),
('sieta', '7'),
('ocho', '8'),
('nueve', '9'),
('diez', '10'),
('números', 'numbers')
]
}, # a section object
{
'title': 'Family',
'exercises': [
('En mi familia, hey...', 'In my family, there is/are...'),
('y', 'and'),
('mi', 'my (singular noun)'),
('mis', 'my (plural noun)'),
('los abuelos', 'grandparents'),
('el abuelo', 'grandfather'),
('la abuela', 'grandmother'),
('los padres', 'parents'),
('el padre', 'the father'),
('la madre', 'the mother'),
('el padrasto', 'step dad'),
('la madrastra', 'step mother'),
('el marido', 'husband'),
('la esposa??', 'wife'),
('los parientes', 'the relatives'),
('la tía', 'aunt'),
('el tío', 'uncle'),
('los tíos', 'uncles'),
('las tías', 'aunts'),
('el primo', 'male counsin'),
('la prima', 'female counsin'),
('los primos', 'the counsins'),
('las primas', 'only girl counsins'),
('los niños', 'the children'),
('el hermano', 'the brother'),
('la hermana', 'the sister'),
('el hermanastro', 'step brother'),
('la hermanastra', 'step sister'),
('el hijo única', 'the only son'),
('la hija única', 'the only daughter'),
('casado', 'married (male)'),
('casada', 'married (female)'),
('divorciado', 'divorced (male)'),
('divorciada', 'divorced (female)'),
('el nieto', 'the grandson'),
('la nieta', 'the granddaughter'),
]
}, # a section object
{
'title': 'Talking about my family',
'exercises': [
('Tengo', 'I have'),
('No', '(I) do not (have)'),
('Se llama', '(He/She) is called'),
('Se llaman', '(They are) called'),
('el/un', 'the/a (singlular nouns, musculine)'),
('la/una', 'the/a (sigular nouns, feminine)'),
('los/unos', 'the/some (plural nouns masculine and feminine)'),
('las/unas', 'the/some (plural nouns feminine only)'),
('unas frases sobre la familia', 'some phrases about the family'),
('El tío se llama Ted', 'The uncle is called Ted'),
('Tengo un hermano', 'I have a brother'),
('No tengo una hermana', 'I do not have a sister'),
('Los abuelos se llaman Fred y Phil', 'The grandfathers are called Fred and Phil'),
('¡Hola! Tengo un hermano pero no tengo hermanas', 'Hello! I have a brother but I do not have sisters'),
('Soy', 'I am'),
('de', 'of/from'),
('más largas', 'longer'),
('El hermano de mi madre es mi tío', 'My mother\'s brother is my uncle'),
('Los padres de mi padre son mis abuelos', 'My father\'s parents are my grandparents'),
]
}, # a section object
{
'title': 'Numbers 11-31',
'exercises': [
('once', '11'),
('doce', '12'),
('trece', '13'),
('catorce', '14'),
('quince', '15'),
('dieciséis', '16'),
('diecisiete', '17'),
('dieciocho', '18'),
('diecinueve', '19'),
('veinte', '20'),
('veinteuno', '21'),
('veintedós', '22'),
('veintitrés', '23'),
('veinticuatro', '24'),
('veinticinco', '25'),
('veintiséis', '26'),
('veintisiete', '27'),
('veintiocho', '28'),
('veintinueve', '29'),
('treinta', '30'),
('treinta y uno', '31')
]
}, # a section object
{
'title': 'Mi Vida Loca',
'exercises': [
('loca', 'crazy'),
('vida', 'life'),
('también', 'also'),
('perdón', 'excuse me'),
('gracias', 'thank you'),
('adiós', 'good bye'),
('hola', 'hello'),
('¿Cómo se llama...?', 'What is the name of ...?'),
('¿Cómo te llamas?', 'What is your name?'),
('la calle', 'the street'),
('eres', 'are you (?)'),
('De nada', 'You are welcome'),
('No entiendo', 'I do not understand'),
('entiendo', 'I get it'),
('El amiga/amigo', 'the friend'),
('sí', 'yes'),
('si', 'if'),
('nunca', 'never'),
('un piso', 'a flat'),
('dormitorio', 'bedroom'),
('salir', 'to go out / to leave'),
('quiero', 'I want'),
('un cafe', 'a coffee'),
('con leche', 'with milk'),
('una tostada', 'a piece of toast'),
('frío', 'cold'),
('fría', 'cool'),
('cerveza', 'beer'),
('caliente', 'warm/hot')
]
}, # a section object
{
'title': 'Los meses',
'exercises': [
('meses', 'months'),
('enero', 'January'),
('febrero', 'February'),
('marzo', 'March'),
('abril', 'April'),
('mayo', 'May'),
('junio', 'June'),
('julio', 'July'),
('agosto', 'August'),
('septiembre/setiembre', 'September'),
('octubre', 'October'),
('noviembre', 'November'),
('diciembre', 'December'),
('Feliz Cumpleaños', 'Happy birthday')
]
},
]
}
'''
contents of ZOOM espanol 1
'''
ZOOM_ESPANOL_1 = {
'title': 'Zoom Espanol 1',
'user': 'Zhenglin',
'template': 'table',
'subject': 'spanish',
'sections': [
{
'title': 'O Me presento',
'exercises': [
('¡Hola!', 'Hello!'),
('Saludos', 'Greetings'),
('Adiós', 'Goodbye'),
('Hasta luego', 'See you later'),
('Hasta pronto', 'See you soon'),
('Soy ana', 'I am Ana'),
('Me llamo Federico', 'My name is Federico'),
('escucha', 'listen'),
('repite', 'repeat'),
('habla', 'speak'),
('lee', 'read'),
('escribe', 'write'),
('pregunta', 'ask'),
('indica', 'point to'),
('contesta', 'answer'),
('mira', 'look'),
('empareja', 'match'),
('¿Cómo te llamas?', 'What\'s your name?'),
('Se llama Olivia', 'Her name is Olivia'),
('¿Qué tal?', 'How are you?'),
('Buenos días', 'Good morning'),
('Buenas tardes', 'Good afternoon'),
('Buenas noches', 'Good night'),
('uno', 'one'),
('dos', 'two'),
('tres', 'three'),
('cuatro', 'four'),
('cinco', 'five'),
('seis', 'six'),
('siete', 'seven'),
('ocho', 'eight'),
('nueve', 'nine'),
('diez', 'ten'),
('once', 'eleven'),
('doce', 'twelve'),
('trece', 'thirteen'),
('catorce', 'fourteen'),
('quince', 'fifteen'),
('dieciséis', 'sixteen'),
('diecisiete', 'seventeen'),
('dieciocho', 'eighteen'),
('diecinueve', 'nineteen'),
('veinte', 'twenty'),
('el abuelo', 'grandfather'),
('la abuela', 'grandmother'),
('los abuelos', 'grandparents'),
('mi padre', 'my father'),
('mi madre', 'my mother'),
('mis padres', 'my parents'),
('el hermano', 'brother'),
('la hermana', 'sister'),
('el tío', 'uncle'),
('la tía', 'aunt'),
('el primo', 'cousin(m)'),
('la prima', 'cousin(f)'),
('el hermanastro', 'stepbrother'),
('la hermanastra', 'stepsister'),
('el padrastro', 'stepfather'),
('la madrastra', 'stepmother'),
('Soy hijo único', 'I am an only child (m)'),
('Soy hija única', 'I am an only child (f)'),
('la familia', 'the family'),
('¿Cuántos años tienes?', 'How old are you?'),
('Tengo X años', 'I am X years old'),
('treinta', 'thirty'),
('cuarenta', 'forty'),
('cincuenta', 'fifty'),
('sesenta', 'sixty'),
('setenta', 'seventy'),
('ochenta', 'eightly'),
('noventa', 'ninety'),
('cien', 'a hundred'),
('gente y números', 'people and numbers'),
('hay', 'there is, there are'),
('no hay', 'there is not, there are not'),
('ser', 'to be'),
('tener', 'to have'),
('llamarse', 'to be called')
]
}, # a section object
{
'title': '1A Me describo',
'exercises': [
('Cumpleaños y fiestas', 'birthdays and festivals'),
('enero', 'January'),
('febrero', 'February'),
('marco', 'March'),
('abril', 'April'),
('mayo', 'May'),
('junio', 'June'),
('julio', 'July'),
('agosto', 'August'),
('septiembre/setiembre', 'September'),
('octubre', 'October'),
('noviembre', 'November'),
('diciembre', 'December'),
('lunes', 'Monday'),
('martes', 'Tuesday'),
('miércoles', 'Wednesday'),
('jueves', 'Thursday'),
('viernes', 'Friday'),
('sábado', 'Saturday'),
('domingo', 'Sunday'),
('mis mascotas', 'my pets'),
('un ratón', 'a mouse'),
('un pájaro', 'a bird'),
('un gato', 'a cat'),
('una rata', 'a rat'),
('una tortuga', 'a turtle / tortoise'),
('un perro', 'a dog'),
('una araña', 'a spider'),
('un pez', 'a fish'),
('un conejo', 'a rabbit'),
('un caballo', 'a horse'),
('una cobaya', 'a guinea pig'),
('una serpiente', 'a snake'),
('blanco/a', 'white'),
('negro/a', 'black'),
('rojo/a', 'red'),
('azul', 'blue'),
('verde', 'green'),
('amarillo/a', 'yellow'),
('naranja', 'orange'),
('gris', 'grey'),
('marrón', 'brown'),
('rosa', 'pink'),
('morado/a', 'purple'),
('lenguas y nacionalidades', 'languages and nationalities'),
('inglés / inglesa', 'English'),
('escocés / escocesa', 'Scottish'),
('irlandés / irlandesa', 'Irish'),
('galés / galesa', 'Welsh'),
('francés / francesa', 'French'),
('español(a)', 'Spanish'),
('portugués / portuguesa', 'Portuguese'),
('italiano/a', 'Italian'),
('¿Cómo eres?', 'What are you like?'),
('Tengo...', 'I have...'),
('el pelo', 'hair'),
('largo', 'long'),
('corto', 'short'),
('liso', 'straight'),
('rizado', 'curly'),
('ondulado', 'wavy'),
('de punta', 'spiky'),
('los ojos', 'eyes'),
('bigote', 'moustache'),
('barba', 'beard'),
('pecas', 'freckles'),
('Llevo gafas', 'I wear glasses'),
('Soy...', 'I am...'),
('alto/a', 'tall'),
('bajo/a', 'short'),
('delgado/a', 'slim'),
('gordo/a', 'fat'),
('de talla mediana', 'medium size'),
('ordenado/a', 'tidy'),
('desordenado/a', 'untidy'),
('simpático/a', 'friendly'),
('antipático/a', 'unfriendly'),
('paciente', 'patient'),
('impaciente', 'impatient'),
('estudioso/a', 'studious'),
('perezoso/a', 'lazy'),
('testarudo/a', 'stubborn'),
('extrovertido/a', 'outgoing'),
('tímido/a', 'shy'),
('inteligente', 'intelligent'),
('bobo/a', 'silly'),
('maduro/a', 'mature'),
('inmaduro/a', 'immature'),
('el chico', 'the boy'),
('la chica', 'the girl'),
('alguien', 'someone'),
('un poco', 'a little'),
('bastante', 'quite'),
('muy', 'very'),
('demasiado', 'too'),
('y', 'and'),
('pero', 'but'),
('también', 'also'),
('sin embargo', 'however'),
('casa', 'home')
]
}, # a section object
{
'title': '1B El insti',
'exercises': [
('la educación física', 'PE'),
('el español', 'Spanish'),
('el inglés', 'English'),
('la geografía', 'geography'),
('la historia', 'history'),
('la informática', 'ICT'),
('la tecnología', 'design and technology'),
('las ciencias', 'science'),
('las matemáticas', 'mathematics'),
('fácil', 'easy'),
('diffícil', 'difficult'),
('útil', 'useful'),
('aburrido/a', 'boring'),
('divertido/a', 'fun, amusing'),
('interesante', 'interesting'),
('un poco', 'a little'),
('bastante', 'fairly, quite'),
('tan', 'so'),
('muy', 'very'),
('demasiado', 'too (much)'),
('para mí', 'for me'),
('pero', 'but'),
('me gusta', 'I like'),
('te gusta', 'you like'),
('correcto', 'true'),
('mentira', 'false'),
('la hora y el horario', 'time and timetable'),
('Es la una', 'It is one o\'clock'),
('... y cinco', '... five past one'),
('... y cuarto', '... a quarter past one'),
('... y veinte', '... twenty past one'),
('... y media', '... half past one'),
('Son las dos', 'It is two o\'clock'),
('... menos venticinco', '... twenty-five to two'),
('... menos cuarto', '... a quarter to two'),
('... menos diez', '... ten to two'),
('Es el mediodiá', 'It is midday'),
('Es la medianoche', 'It is midnight'),
('las instalaciones', 'school buildings'),
('el aula', 'the classroom'),
('el gimnasio', 'the gym'),
('el laboratorio', 'the laboratory'),
('el patio', 'the playground'),
('la biblioteca', 'the library'),
('la oficina', 'the office'),
('la oficina del director', 'the headteather\'s office'),
('grande', 'large'),
('pequeño/a', 'small'),
('moderno/a', 'modern'),
('antiguo/a', 'old'),
('bonito/a', 'attractive'),
('feo/a', 'ugly'),
('cómodo/a', 'comfortable'),
('hay', 'there is / there are'),
('leer un libro', 'to read a book'),
('comer un bocadillo', 'to eat a sandwich'),
('estudiar ciencias', 'to study science'),
('charlar con amigos', 'to chat with friends'),
('practicar deporte', 'to play sport'),
('escribir cartas', 'to write letters'),
('la ropa', 'clothes'),
('llevar', 'to wear'),
('un jersey', 'a jersey'),
('una camisa', 'a shirt'),
('una camiseta', 'a blouse'),
('una corbata', 'a tie'),
('una falda', 'a skirt'),
('una sudadera', 'a sweatshirt'),
('unas zapatillas', 'trainers'),
('unos calcetines', 'socks'),
('unos pantalones', 'trousers'),
('unos vaqueros', 'jeans'),
('unos zapatos', 'shoes'),
('incómodo/a', 'uncomfortable'),
('elegante', 'stylish'),
('práctico/a', 'practical'),
('formal', 'formal'),
('ridículo/a', 'ridiculous'),
('informal', 'informal')
]
}, # a section object
{
'title': '2A Mi semana',
'exercises': [
('el tiempo', 'the weather'),
('hace buen tiempo', 'it is fine / it is a nice day'),
('hace mal tiemo', 'it is bad weather / it is not a nice day'),
('hace sol', 'it is sunny'),
('hace calor', 'it is hot'),
('hace frío', 'it is cold'),
('hace viento', 'it is windy'),
('hay tormenta', 'it is stormy'),
('hay niebla', 'it is foggy'),
('hay nubes', 'it is cloudy'),
('llueve', 'it is raining'),
('nieva', 'it is snowing'),
('la primavera', 'spring'),
('el verano', 'summer'),
('el otoño', 'autumn'),
('el invierno', 'winter'),
('jugar a / al / a la', 'to play'),
('el fútbol', 'football'),
('el baloncesto', 'basketball'),
('el ciclismo', 'cycling'),
('el atletismo', 'athletics'),
('el boxeo', 'boxing'),
('la pelota vasca', 'pelota'),
('el voleibol', 'volleyball'),
('tiempo libre', 'free time'),
('ver la tele', 'to watch TV'),
('salir con amigos', 'to go out with friends'),
('tocar la guitarra', 'to play the guitar'),
('ir al cine', 'to go to the cinema'),
('montar a caballo', 'to ride a horse'),
('bailar en la disco', 'to dance in a disco'),
('jugar al ajedrez', 'to play chess'),
('jugar con videojuegos', 'to play computer games'),
('me apasiona', 'I love'),
('me aburre', 'it is boring'),
('me molesta', 'it annoys me'),
('me fastidia', 'it gets on my nerves'),
('navegar por internet', 'to surf the net'),
('poder', 'to be able'),
('preferir', 'to prefer'),
('prefiero', 'I prefer'),
('querer', 'to like / want'),
('si', 'if'),
('sí', 'yes'),
('por la mañana', 'in the morning'),
('levantarse', 'to get up'),
('lavarse', 'to get washed'),
('me lavo (los dientes)', 'I clean (my teeth)'),
('ducharse', 'to have a shower'),
('cepillarse', 'to brush'),
('me cepillo (el pelo)', 'I brush my hair'),
('peinarse', 'to comb / do hair'),
('ponerse', 'to put on (clothes)'),
('desayunar', 'to have breakfast'),
('despertarse', 'to wake up'),
('vestirse', 'to dress'),
('almorzar(ue)', 'to have lunch'),
('por la tarde', 'in the afternoon'),
('a las trece horas', 'at 13:00 hours'),
('descansar', 'to relax'),
('merendar(ie)', 'to have a snack'),
('pasear al perro', 'to walk the dog'),
('hacer los deberes', 'to do homework'),
('cenar', 'to have suppe'),
('acostarse(ue)', 'to go to bed'),
('dormirse(ue)', 'to fall asleep'),
('ir', 'to go'),
('hacer compras', 'to do the shopping'),
('la piscina', 'swimming pool'),
('nadar', 'to swim'),
('el fin de semana', '(at) the weekend'),
('hasta las diez', 'until ten oclock'),
('tarde', 'late'),
('temprano', 'early'),
('de acuerdo', 'agreed'),
('montar en bicicleta', 'to ride a bike'),
('el sábado', 'on Saturday'),
('los sábados', 'on Saturdays'),
('la cocina', 'kitchen'),
('las tostadas', 'toast'),
('los cereales', 'cereal'),
('en tren', 'by train'),
('frente a', 'opposite'),
('el mar', 'the sea'),
('pasarlo bomba', 'to have a great time')
]
}, # a section object
{
'title': '2B Donde vivo yo',
'exercises': [
('Vivo en ...', 'I live in ...'),
('Está en ...', 'It is in ...'),
('la montaña', 'the mountains'),
('la costa', 'the coast'),
('el campo', 'the countryside'),
('la ciudad', 'the city'),
('un pueblo', 'a town'),
('una aldea', 'a village'),
('un barrio', 'a neighbourhood'),
('las afueras', 'the outskirts'),
('¿Dónde está?', 'Where is it?'),
('siempre', 'always'),
('todos los días', 'every day'),
('a menudo', 'often'),
('a veces', 'sometimes'),
('nunca', 'never'),
('una vez a la semana', 'once a week'),
('dos veces a la semana', 'twice a week'),
('un supermercado', 'a supermarket'),
('un parque', 'a park'),
('una estación', 'a station'),
('un banco', 'a bank'),
('un museo', 'a museum'),
('una catedral', 'a cathedral'),
('un zoo', 'a zoo'),
('un colegio', 'a school'),
('un cine', 'a cinema'),
('un parque de atracciones', 'a theme park'),
('la bolera', 'the bowling alley'),
('el polideportivo', 'the sports centre'),
('la piscina', 'the swimming pool'),
('la oficina de Correos', 'the post office'),
('el ayuntamiento', 'the town hall'),
('la iglesia', 'the church'),
('los grandes almacenes', 'the department store'),
('la parada de autobús', 'the bus stop'),
('las tiendas', 'the shops'),
('Sigue/Siga', 'Carry on'),
('Todo recto', 'Straight ahead'),
('Tuerce/Tuerza', 'Turn'),
('Cruza/Cruce', 'Cross'),
('Toma/Tome', 'Take'),
('el puente', 'the bridge'),
('la primera/segunda/tercera', 'the first/second/thrid'),
('la calle', 'the street'),
('a la derecha', 'on the right'),
('a la izquierda', 'on the left'),
('Mi casa', 'My house'),
('un sótano', 'a basement'),
('la planta baja', 'the ground floor'),
('la primera planta', 'the first floor'),
('el ático', 'the attic'),
('una entrada', 'an entrance hall'),
('las escaleras', 'the stairs'),
('una cocina', 'a kitchen'),
('un salón', 'a living room'),
('un comedor', 'a dining room'),
('un dormitorio', 'a bedroom'),
('un cuarto de baño', 'a bathroom'),
('una ducha', 'a shower'),
('un aseo', 'a toilet'),
('un despacho', 'an office'),
('un jardín', 'a garden'),
('un balcón', 'a balcony'),
('una piscina', 'a swimming pool'),
('Mi dormitorio', 'My bedroom'),
('un armario', 'a wardrobe'),
('una cama', 'a bed'),
('un escritorio', 'a desk'),
('unas estanterías', 'a bookcase / some shelves'),
('una mestia de noche', 'a bedside table'),
('una alfombra', 'a rug'),
('una cómoda', 'a chest of drawers'),
('una silla', 'a chair'),
('unas cortinas', 'curtains'),
('una puerta', 'a door'),
('una ventana', 'a window'),
('una lámpara', 'a lamp'),
('delante de', 'in front of'),
('enfrente de', 'facing'),
('detrás de', 'behind'),
('encima de', 'on / on top of'),
('debajo de', 'under'),
('entre', 'between'),
('al lado de', 'next to'),
('cerca de', 'near to'),
('lejos de', 'far from'),
]
}, # a section object
{
'title': 'Me gusta comer ...',
'exercises': [
('Es la hora de comer', 'It\'s time to eat'),
('la cena', 'evening meal'),
('la comida', 'midday meal'),
('la merienda', '(afternoon) snack'),
('a eso de', 'at about'),
('el chocolate', 'chocolate'),
('el pan de ajo', 'garlic bread'),
('la carne', 'meat'),
('las verduras', 'vegetables'),
('los cereales', 'cereal'),
('los churros', 'churros'),
('un bocadillo de queso', 'cheese sandwich'),
('un paquete de patatas fritas', 'packet of crisps'),
('un pollo asado', 'roast chicken'),
('una paella', 'paella'),
('una pizza', 'pizza'),
('Comida sana', 'Healthy food'),
('el atún', 'tuna'),
('los mariscos', 'shellfish'),
('el pescado', 'fish'),
('el salmón', 'salmon'),
('las gambas', 'prawns'),
('los calamares', 'squid'),
('un melocotón', 'peach'),
('un plátano', 'banana'),
('una ensalada verde', 'green salad'),
('una manzana', 'apple'),
('una naranja', 'orange'),
('contiene mucha grasa', 'it contains a lot of fat'),
('contiene mucho azúcar', 'it contains a lot of sugar'),
('es (muy)', 'it\'s (very)'),
('es sano/a', 'it\'s healthy'),
('es malsano/a', 'it\'s unhealthy'),
('es soso/a', 'it\'s bland'),
('es delicioso/a', 'it\'s delicious'),
('son (muy)', 'they are (very)'),
('son sanos/as', 'they are healthy'),
('son malsanos/as', 'they are unhealthy'),
('son sosos/as', 'they are bland'),
('son deliciosos/as', 'they are delicious'),
('¡Tengo hambre!', 'I\'m hungry!'),
('una coca-cola', 'a coca-cola'),
('una fanta naranja', 'fizzy orange'),
('un café solo', 'black coffee'),
('un café con leche', 'white coffee'),
('un vaso de vino tinto', 'a glass of red wine'),
('un vaso de vino blanco', 'a glass of white wine'),
('una cerveza', 'a beer'),
('un agua mineral con gas', 'sparkling mineral water'),
('un agua mineral sin gas', 'still mineral water'),
('tengo sed', 'I\'m thirsty'),
('para comer', 'to eat'),
('para beber', 'to drink'),
('¡Oiga, camarero!', 'Waiter!'),
('una cuchara', 'a spoon'),
('un tenedor', 'a fork'),
('un cuchillo', 'a knife'),
('¿Dónde está ...?', 'Where is ...?'),
('pedí', 'I asked for'),
('hey', 'there is'),
('una mosca', 'a fly'),
('lo siento', 'I\'m sorry'),
('lo traigo', 'I\'ll bring it'),
('en seguida', 'at once'),
('traigo otro', 'I\'ll bring another'),
('Me encanta la comida', 'I love food'),
('el arroz', 'rice'),
('la pasta', 'pasta'),
('las especias', 'spices'),
('es muy / es poco', 'it\'s very / it\'s not very'),
('contiene mucho', 'it contains a lot (of)'),
('contiene poco', 'it contains little (not a lot of)'),
('utiliza mucho', 'it uses a lot (of)'),
('utiliza poco', 'it uses little (not a lot of)'),
]
}, # a section object
]
}
'''
contents of spelling bee
'''
SPELLING_BEE = {
'title': 'Spelling Bee',
'user': 'Zhenglin',
'template': 'table',
'subject': 'spanish',
'sections':[
{
'title': 'Stage 1',
'exercises': [
('bienvenido', 'welcome'),
('y', 'and'),
('pero', 'but'),
('con', 'with'),
('sin', 'without'),
('para', 'in order to'),
('si', 'if'),
('primero', 'first'),
('segundo', 'second'),
('tercero', 'third'),
('un hermano', 'a brother'),
('una hermana', 'a sister'),
('grande', 'big'),
('pequeño', 'small'),
('hay', 'there is/are'),
('ayer', 'yesterday'),
('hoy', 'today'),
('mañana', 'tomorrow'),
('porque', 'because'),
('¿quién?', 'who?'),
('¿dónde?', 'where?'),
('¿cómo?', 'how?'),
('¿por qué?', 'why?'),
('¿qué?', 'what?'),
('¿cuánto?', 'how much/many?')
]
}, # a section object
{
'title': 'Stage 2',
'exercises': [
('lunes', 'Monday'),
('martes', 'Tuesday'),
('miércoles', 'Wednesday'),
('jueves', 'Thursday'),
('viernes', 'Friday'),
('sábado', 'Saturday'),
('domingo', 'Sunday'),
('durante', 'during'),
('antes', 'before'),
('después', 'after'),
('ahora', 'now'),
('la mañana', 'the morning'),
('la tarde', 'the afternoon / the evening'),
('la noche', 'the night'),
('un día', 'a day'),
('una semana', 'a week'),
('un mes', 'a month'),
('un año', 'a year'),
('temprano', 'early'),
('tarde', 'late'),
('nunca', 'never'),
('a veces', 'sometimes'),
('a menudo', 'often'),
('luego', 'next')
]
}, # a section object
{
'title': 'Stage 3',
'exercises': [
('enero', 'January'),
('febrero', 'February'),
('marzo', 'March'),
('abril', 'April'),
('mayo', 'May'),
('junio', 'June'),
('julio', 'July'),
('agosto', 'August'),
('septiembre', 'September'),
('octubre', 'October'),
('noviembre', 'November'),
('diciembre', 'December'),
('muy', 'very'),
('bastante', 'quite'),
('mucho', 'a lot'),
('poco', 'a little'),
('interesante', 'interesting'),
('rápido/a', 'fast'),
('divertido/a', 'fun'),
('fantástico/a', 'brilliant'),
('fatal', 'rubbish/awful'),
('agradable', 'pleasant'),
('difícil', 'difficult'),
('fácil', 'easy'),
('pues', 'then')
]
}, # a section object
{
'title': 'Stage 4',
'exercises': [
('tener', 'to have'),
('ayudar', 'to help'),
('ir', 'to go'),
('jugar', 'to play'),
('hacer', 'to to / to make'),
('llegar', 'to arrive'),
('odiar', 'to hate'),
('ganar', 'to win'),
('perder', 'to lose'),
('nadar', 'to swim'),
('trabajar', 'to work'),
('beber', 'to drink'),
('comer', 'to eat'),
('llamar', 'to call'),
('un ordenador', 'a computer'),
('un móvil', 'a mobile phone'),
('un libro', 'a book'),
('un bolígrafo', 'a pen'),
('entrada', 'entrance'),
('salida', 'exit'),
('quizás', 'maybe'),
('sobre', 'on'),
('debajo', 'under'),
('delante', 'in front'),
('detrás', 'behind')
]
}, # a section object
]
}
'''
contents from tutor
'''
TUTOR = {
'title': 'Tutor Year7',
'user': 'Zhenglin',
'template': 'table',
'subject': 'spanish',
'sections':[
{
'title': 'Vocabulary',
'exercises': [
('alumnos', 'students'),
('estuche', 'pencil case'),
('sacapuntas', 'sharpener'),
('regla', 'ruler'),
('pelota', 'ball'),
('ventana', 'window'),
('dar miedo', 'scare'),
('coche', 'car'),
('barco', 'boat / ship'),
('obra de teatro', 'a play'),
('semáforo', 'traffic lights'),
('la derecha', 'to the right / the right'),
('la izquierda', 'to the left / the left'),
('queso', 'cheese'),
('cero', 'zero'),
('la gente', 'people'),
('girar', 'to turn'),
('gota', 'drop'),
('bolos', 'bowling'),
('billar', 'pool'),
('tenis', 'tennis'),
('fútbol', 'football'),
('remo', 'rowing'),
('bucear', 'diving'),
('patinar', 'rollerskating'),
('windsurf', 'windsurfing'),
('loro', 'parrot'),
('marisco', 'sea food'),
('chiringuito', 'restaurant next to beach or sea'),
('vistas', 'sight'),
('creer', 'to believe'),
('maravillosa', 'wonderful'),
('dinero de bolsillo', 'pocket money'),
('sobre', 'about'),
('dar', 'to give'),
('eso', 'that'),
('entonces', 'then'),
('paso la aspiradora', 'to voccum'),
('mala', 'bad'),
('suerte', 'luck'),
('gastar', 'to spend'),
('suelen', 'usually'),
('películas', 'movies'),
('algún', 'some'),
('ver', 'to watch'),
('tocar', '(instead of jugar) to play (instruments)'),
('país', 'country'),
('Francia', 'France'),
('cálidos', 'warm (weather)'),
('atracciones', 'rides (in a theme park)'),
('¿Qué hora es? / ¿Tines hora? / ¿Me puede decir la hora?', 'What is the time?'),
('madrugada', 'early morning, dawn'),
('Noruega', 'Norway'),
('Reino Unido', 'U.K.'),
('abren', 'open'),
('cierran', 'closed'),
('levantarse', 'to wake up'),
('navegar', 'to sail, to surf'),
('barrio', 'neighbourhood'),
('tomar', 'to drink'),
('basura', 'junk food'),
('duele', 'ache'),
('doler', 'to hurt, have pain'),
('encuentra', 'found, located'),
('comunicación', 'communication'),
('abogada', 'lawyer'),
('despacho', 'office'),
('empresariales', 'business'),
('la profesión', 'profession, job'),
('médico', 'doctor'),
('¿Que haces?', 'What do you do?'),
('¿A qué te dedicas? / ¿En qué trabajas?', 'What\'s your job?'),
('liada', 'I\'m busy'),
('saber', 'to know'),
('siempre', 'always'),
('pues', 'because ...'),
('más', 'more'),
('claro', 'of couse'),
('verdad', 'true'),
('relajado', 'relaxed'),
('estupendo', 'wonderful'),
('oir', 'to hear'),
('todaría', 'yet, still'),
('seguro', 'for sure'),
('decir', 'to say'),
('algo', 'something'),
('pesado', 'heavy'),
('pesadísimo', 'very annoying, very heavy'),
('alguna', 'sometime, any'),
('venir', 'to come'),
('sorpresa', 'surprise'),
('venga', 'ok'),
('vemos', 'see you'),
]
}
]
}
| # -*- coding: utf-8 -*-
# pylint: disable=C0301
'''
contents of Zhenglin's Spanish year 7
'''
ZHENGLIN_YEAR7 = {
'title': 'Spanish Year 7',
'user': 'Zhenglin',
'template': 'table',
'subject': 'spanish',
'sections':[
{
'title': 'Numbers 1 - 10',
'exercises': [
('uno', '1'),
('dos', '2'),
('tres', '3'),
('cuatro', '4'),
('cinco', '5'),
('seis', '6'),
('sieta', '7'),
('ocho', '8'),
('nueve', '9'),
('diez', '10'),
('números', 'numbers')
]
}, # a section object
{
'title': 'Family',
'exercises': [
('En mi familia, hey...', 'In my family, there is/are...'),
('y', 'and'),
('mi', 'my (singular noun)'),
('mis', 'my (plural noun)'),
('los abuelos', 'grandparents'),
('el abuelo', 'grandfather'),
('la abuela', 'grandmother'),
('los padres', 'parents'),
('el padre', 'the father'),
('la madre', 'the mother'),
('el padrasto', 'step dad'),
('la madrastra', 'step mother'),
('el marido', 'husband'),
('la esposa??', 'wife'),
('los parientes', 'the relatives'),
('la tía', 'aunt'),
('el tío', 'uncle'),
('los tíos', 'uncles'),
('las tías', 'aunts'),
('el primo', 'male counsin'),
('la prima', 'female counsin'),
('los primos', 'the counsins'),
('las primas', 'only girl counsins'),
('los niños', 'the children'),
('el hermano', 'the brother'),
('la hermana', 'the sister'),
('el hermanastro', 'step brother'),
('la hermanastra', 'step sister'),
('el hijo única', 'the only son'),
('la hija única', 'the only daughter'),
('casado', 'married (male)'),
('casada', 'married (female)'),
('divorciado', 'divorced (male)'),
('divorciada', 'divorced (female)'),
('el nieto', 'the grandson'),
('la nieta', 'the granddaughter'),
]
}, # a section object
{
'title': 'Talking about my family',
'exercises': [
('Tengo', 'I have'),
('No', '(I) do not (have)'),
('Se llama', '(He/She) is called'),
('Se llaman', '(They are) called'),
('el/un', 'the/a (singlular nouns, musculine)'),
('la/una', 'the/a (sigular nouns, feminine)'),
('los/unos', 'the/some (plural nouns masculine and feminine)'),
('las/unas', 'the/some (plural nouns feminine only)'),
('unas frases sobre la familia', 'some phrases about the family'),
('El tío se llama Ted', 'The uncle is called Ted'),
('Tengo un hermano', 'I have a brother'),
('No tengo una hermana', 'I do not have a sister'),
('Los abuelos se llaman Fred y Phil', 'The grandfathers are called Fred and Phil'),
('¡Hola! Tengo un hermano pero no tengo hermanas', 'Hello! I have a brother but I do not have sisters'),
('Soy', 'I am'),
('de', 'of/from'),
('más largas', 'longer'),
('El hermano de mi madre es mi tío', 'My mother\'s brother is my uncle'),
('Los padres de mi padre son mis abuelos', 'My father\'s parents are my grandparents'),
]
}, # a section object
{
'title': 'Numbers 11-31',
'exercises': [
('once', '11'),
('doce', '12'),
('trece', '13'),
('catorce', '14'),
('quince', '15'),
('dieciséis', '16'),
('diecisiete', '17'),
('dieciocho', '18'),
('diecinueve', '19'),
('veinte', '20'),
('veinteuno', '21'),
('veintedós', '22'),
('veintitrés', '23'),
('veinticuatro', '24'),
('veinticinco', '25'),
('veintiséis', '26'),
('veintisiete', '27'),
('veintiocho', '28'),
('veintinueve', '29'),
('treinta', '30'),
('treinta y uno', '31')
]
}, # a section object
{
'title': 'Mi Vida Loca',
'exercises': [
('loca', 'crazy'),
('vida', 'life'),
('también', 'also'),
('perdón', 'excuse me'),
('gracias', 'thank you'),
('adiós', 'good bye'),
('hola', 'hello'),
('¿Cómo se llama...?', 'What is the name of ...?'),
('¿Cómo te llamas?', 'What is your name?'),
('la calle', 'the street'),
('eres', 'are you (?)'),
('De nada', 'You are welcome'),
('No entiendo', 'I do not understand'),
('entiendo', 'I get it'),
('El amiga/amigo', 'the friend'),
('sí', 'yes'),
('si', 'if'),
('nunca', 'never'),
('un piso', 'a flat'),
('dormitorio', 'bedroom'),
('salir', 'to go out / to leave'),
('quiero', 'I want'),
('un cafe', 'a coffee'),
('con leche', 'with milk'),
('una tostada', 'a piece of toast'),
('frío', 'cold'),
('fría', 'cool'),
('cerveza', 'beer'),
('caliente', 'warm/hot')
]
}, # a section object
{
'title': 'Los meses',
'exercises': [
('meses', 'months'),
('enero', 'January'),
('febrero', 'February'),
('marzo', 'March'),
('abril', 'April'),
('mayo', 'May'),
('junio', 'June'),
('julio', 'July'),
('agosto', 'August'),
('septiembre/setiembre', 'September'),
('octubre', 'October'),
('noviembre', 'November'),
('diciembre', 'December'),
('Feliz Cumpleaños', 'Happy birthday')
]
},
]
}
'''
contents of ZOOM espanol 1
'''
ZOOM_ESPANOL_1 = {
'title': 'Zoom Espanol 1',
'user': 'Zhenglin',
'template': 'table',
'subject': 'spanish',
'sections': [
{
'title': 'O Me presento',
'exercises': [
('¡Hola!', 'Hello!'),
('Saludos', 'Greetings'),
('Adiós', 'Goodbye'),
('Hasta luego', 'See you later'),
('Hasta pronto', 'See you soon'),
('Soy ana', 'I am Ana'),
('Me llamo Federico', 'My name is Federico'),
('escucha', 'listen'),
('repite', 'repeat'),
('habla', 'speak'),
('lee', 'read'),
('escribe', 'write'),
('pregunta', 'ask'),
('indica', 'point to'),
('contesta', 'answer'),
('mira', 'look'),
('empareja', 'match'),
('¿Cómo te llamas?', 'What\'s your name?'),
('Se llama Olivia', 'Her name is Olivia'),
('¿Qué tal?', 'How are you?'),
('Buenos días', 'Good morning'),
('Buenas tardes', 'Good afternoon'),
('Buenas noches', 'Good night'),
('uno', 'one'),
('dos', 'two'),
('tres', 'three'),
('cuatro', 'four'),
('cinco', 'five'),
('seis', 'six'),
('siete', 'seven'),
('ocho', 'eight'),
('nueve', 'nine'),
('diez', 'ten'),
('once', 'eleven'),
('doce', 'twelve'),
('trece', 'thirteen'),
('catorce', 'fourteen'),
('quince', 'fifteen'),
('dieciséis', 'sixteen'),
('diecisiete', 'seventeen'),
('dieciocho', 'eighteen'),
('diecinueve', 'nineteen'),
('veinte', 'twenty'),
('el abuelo', 'grandfather'),
('la abuela', 'grandmother'),
('los abuelos', 'grandparents'),
('mi padre', 'my father'),
('mi madre', 'my mother'),
('mis padres', 'my parents'),
('el hermano', 'brother'),
('la hermana', 'sister'),
('el tío', 'uncle'),
('la tía', 'aunt'),
('el primo', 'cousin(m)'),
('la prima', 'cousin(f)'),
('el hermanastro', 'stepbrother'),
('la hermanastra', 'stepsister'),
('el padrastro', 'stepfather'),
('la madrastra', 'stepmother'),
('Soy hijo único', 'I am an only child (m)'),
('Soy hija única', 'I am an only child (f)'),
('la familia', 'the family'),
('¿Cuántos años tienes?', 'How old are you?'),
('Tengo X años', 'I am X years old'),
('treinta', 'thirty'),
('cuarenta', 'forty'),
('cincuenta', 'fifty'),
('sesenta', 'sixty'),
('setenta', 'seventy'),
('ochenta', 'eightly'),
('noventa', 'ninety'),
('cien', 'a hundred'),
('gente y números', 'people and numbers'),
('hay', 'there is, there are'),
('no hay', 'there is not, there are not'),
('ser', 'to be'),
('tener', 'to have'),
('llamarse', 'to be called')
]
}, # a section object
{
'title': '1A Me describo',
'exercises': [
('Cumpleaños y fiestas', 'birthdays and festivals'),
('enero', 'January'),
('febrero', 'February'),
('marco', 'March'),
('abril', 'April'),
('mayo', 'May'),
('junio', 'June'),
('julio', 'July'),
('agosto', 'August'),
('septiembre/setiembre', 'September'),
('octubre', 'October'),
('noviembre', 'November'),
('diciembre', 'December'),
('lunes', 'Monday'),
('martes', 'Tuesday'),
('miércoles', 'Wednesday'),
('jueves', 'Thursday'),
('viernes', 'Friday'),
('sábado', 'Saturday'),
('domingo', 'Sunday'),
('mis mascotas', 'my pets'),
('un ratón', 'a mouse'),
('un pájaro', 'a bird'),
('un gato', 'a cat'),
('una rata', 'a rat'),
('una tortuga', 'a turtle / tortoise'),
('un perro', 'a dog'),
('una araña', 'a spider'),
('un pez', 'a fish'),
('un conejo', 'a rabbit'),
('un caballo', 'a horse'),
('una cobaya', 'a guinea pig'),
('una serpiente', 'a snake'),
('blanco/a', 'white'),
('negro/a', 'black'),
('rojo/a', 'red'),
('azul', 'blue'),
('verde', 'green'),
('amarillo/a', 'yellow'),
('naranja', 'orange'),
('gris', 'grey'),
('marrón', 'brown'),
('rosa', 'pink'),
('morado/a', 'purple'),
('lenguas y nacionalidades', 'languages and nationalities'),
('inglés / inglesa', 'English'),
('escocés / escocesa', 'Scottish'),
('irlandés / irlandesa', 'Irish'),
('galés / galesa', 'Welsh'),
('francés / francesa', 'French'),
('español(a)', 'Spanish'),
('portugués / portuguesa', 'Portuguese'),
('italiano/a', 'Italian'),
('¿Cómo eres?', 'What are you like?'),
('Tengo...', 'I have...'),
('el pelo', 'hair'),
('largo', 'long'),
('corto', 'short'),
('liso', 'straight'),
('rizado', 'curly'),
('ondulado', 'wavy'),
('de punta', 'spiky'),
('los ojos', 'eyes'),
('bigote', 'moustache'),
('barba', 'beard'),
('pecas', 'freckles'),
('Llevo gafas', 'I wear glasses'),
('Soy...', 'I am...'),
('alto/a', 'tall'),
('bajo/a', 'short'),
('delgado/a', 'slim'),
('gordo/a', 'fat'),
('de talla mediana', 'medium size'),
('ordenado/a', 'tidy'),
('desordenado/a', 'untidy'),
('simpático/a', 'friendly'),
('antipático/a', 'unfriendly'),
('paciente', 'patient'),
('impaciente', 'impatient'),
('estudioso/a', 'studious'),
('perezoso/a', 'lazy'),
('testarudo/a', 'stubborn'),
('extrovertido/a', 'outgoing'),
('tímido/a', 'shy'),
('inteligente', 'intelligent'),
('bobo/a', 'silly'),
('maduro/a', 'mature'),
('inmaduro/a', 'immature'),
('el chico', 'the boy'),
('la chica', 'the girl'),
('alguien', 'someone'),
('un poco', 'a little'),
('bastante', 'quite'),
('muy', 'very'),
('demasiado', 'too'),
('y', 'and'),
('pero', 'but'),
('también', 'also'),
('sin embargo', 'however'),
('casa', 'home')
]
}, # a section object
{
'title': '1B El insti',
'exercises': [
('la educación física', 'PE'),
('el español', 'Spanish'),
('el inglés', 'English'),
('la geografía', 'geography'),
('la historia', 'history'),
('la informática', 'ICT'),
('la tecnología', 'design and technology'),
('las ciencias', 'science'),
('las matemáticas', 'mathematics'),
('fácil', 'easy'),
('diffícil', 'difficult'),
('útil', 'useful'),
('aburrido/a', 'boring'),
('divertido/a', 'fun, amusing'),
('interesante', 'interesting'),
('un poco', 'a little'),
('bastante', 'fairly, quite'),
('tan', 'so'),
('muy', 'very'),
('demasiado', 'too (much)'),
('para mí', 'for me'),
('pero', 'but'),
('me gusta', 'I like'),
('te gusta', 'you like'),
('correcto', 'true'),
('mentira', 'false'),
('la hora y el horario', 'time and timetable'),
('Es la una', 'It is one o\'clock'),
('... y cinco', '... five past one'),
('... y cuarto', '... a quarter past one'),
('... y veinte', '... twenty past one'),
('... y media', '... half past one'),
('Son las dos', 'It is two o\'clock'),
('... menos venticinco', '... twenty-five to two'),
('... menos cuarto', '... a quarter to two'),
('... menos diez', '... ten to two'),
('Es el mediodiá', 'It is midday'),
('Es la medianoche', 'It is midnight'),
('las instalaciones', 'school buildings'),
('el aula', 'the classroom'),
('el gimnasio', 'the gym'),
('el laboratorio', 'the laboratory'),
('el patio', 'the playground'),
('la biblioteca', 'the library'),
('la oficina', 'the office'),
('la oficina del director', 'the headteather\'s office'),
('grande', 'large'),
('pequeño/a', 'small'),
('moderno/a', 'modern'),
('antiguo/a', 'old'),
('bonito/a', 'attractive'),
('feo/a', 'ugly'),
('cómodo/a', 'comfortable'),
('hay', 'there is / there are'),
('leer un libro', 'to read a book'),
('comer un bocadillo', 'to eat a sandwich'),
('estudiar ciencias', 'to study science'),
('charlar con amigos', 'to chat with friends'),
('practicar deporte', 'to play sport'),
('escribir cartas', 'to write letters'),
('la ropa', 'clothes'),
('llevar', 'to wear'),
('un jersey', 'a jersey'),
('una camisa', 'a shirt'),
('una camiseta', 'a blouse'),
('una corbata', 'a tie'),
('una falda', 'a skirt'),
('una sudadera', 'a sweatshirt'),
('unas zapatillas', 'trainers'),
('unos calcetines', 'socks'),
('unos pantalones', 'trousers'),
('unos vaqueros', 'jeans'),
('unos zapatos', 'shoes'),
('incómodo/a', 'uncomfortable'),
('elegante', 'stylish'),
('práctico/a', 'practical'),
('formal', 'formal'),
('ridículo/a', 'ridiculous'),
('informal', 'informal')
]
}, # a section object
{
'title': '2A Mi semana',
'exercises': [
('el tiempo', 'the weather'),
('hace buen tiempo', 'it is fine / it is a nice day'),
('hace mal tiemo', 'it is bad weather / it is not a nice day'),
('hace sol', 'it is sunny'),
('hace calor', 'it is hot'),
('hace frío', 'it is cold'),
('hace viento', 'it is windy'),
('hay tormenta', 'it is stormy'),
('hay niebla', 'it is foggy'),
('hay nubes', 'it is cloudy'),
('llueve', 'it is raining'),
('nieva', 'it is snowing'),
('la primavera', 'spring'),
('el verano', 'summer'),
('el otoño', 'autumn'),
('el invierno', 'winter'),
('jugar a / al / a la', 'to play'),
('el fútbol', 'football'),
('el baloncesto', 'basketball'),
('el ciclismo', 'cycling'),
('el atletismo', 'athletics'),
('el boxeo', 'boxing'),
('la pelota vasca', 'pelota'),
('el voleibol', 'volleyball'),
('tiempo libre', 'free time'),
('ver la tele', 'to watch TV'),
('salir con amigos', 'to go out with friends'),
('tocar la guitarra', 'to play the guitar'),
('ir al cine', 'to go to the cinema'),
('montar a caballo', 'to ride a horse'),
('bailar en la disco', 'to dance in a disco'),
('jugar al ajedrez', 'to play chess'),
('jugar con videojuegos', 'to play computer games'),
('me apasiona', 'I love'),
('me aburre', 'it is boring'),
('me molesta', 'it annoys me'),
('me fastidia', 'it gets on my nerves'),
('navegar por internet', 'to surf the net'),
('poder', 'to be able'),
('preferir', 'to prefer'),
('prefiero', 'I prefer'),
('querer', 'to like / want'),
('si', 'if'),
('sí', 'yes'),
('por la mañana', 'in the morning'),
('levantarse', 'to get up'),
('lavarse', 'to get washed'),
('me lavo (los dientes)', 'I clean (my teeth)'),
('ducharse', 'to have a shower'),
('cepillarse', 'to brush'),
('me cepillo (el pelo)', 'I brush my hair'),
('peinarse', 'to comb / do hair'),
('ponerse', 'to put on (clothes)'),
('desayunar', 'to have breakfast'),
('despertarse', 'to wake up'),
('vestirse', 'to dress'),
('almorzar(ue)', 'to have lunch'),
('por la tarde', 'in the afternoon'),
('a las trece horas', 'at 13:00 hours'),
('descansar', 'to relax'),
('merendar(ie)', 'to have a snack'),
('pasear al perro', 'to walk the dog'),
('hacer los deberes', 'to do homework'),
('cenar', 'to have suppe'),
('acostarse(ue)', 'to go to bed'),
('dormirse(ue)', 'to fall asleep'),
('ir', 'to go'),
('hacer compras', 'to do the shopping'),
('la piscina', 'swimming pool'),
('nadar', 'to swim'),
('el fin de semana', '(at) the weekend'),
('hasta las diez', 'until ten oclock'),
('tarde', 'late'),
('temprano', 'early'),
('de acuerdo', 'agreed'),
('montar en bicicleta', 'to ride a bike'),
('el sábado', 'on Saturday'),
('los sábados', 'on Saturdays'),
('la cocina', 'kitchen'),
('las tostadas', 'toast'),
('los cereales', 'cereal'),
('en tren', 'by train'),
('frente a', 'opposite'),
('el mar', 'the sea'),
('pasarlo bomba', 'to have a great time')
]
}, # a section object
{
'title': '2B Donde vivo yo',
'exercises': [
('Vivo en ...', 'I live in ...'),
('Está en ...', 'It is in ...'),
('la montaña', 'the mountains'),
('la costa', 'the coast'),
('el campo', 'the countryside'),
('la ciudad', 'the city'),
('un pueblo', 'a town'),
('una aldea', 'a village'),
('un barrio', 'a neighbourhood'),
('las afueras', 'the outskirts'),
('¿Dónde está?', 'Where is it?'),
('siempre', 'always'),
('todos los días', 'every day'),
('a menudo', 'often'),
('a veces', 'sometimes'),
('nunca', 'never'),
('una vez a la semana', 'once a week'),
('dos veces a la semana', 'twice a week'),
('un supermercado', 'a supermarket'),
('un parque', 'a park'),
('una estación', 'a station'),
('un banco', 'a bank'),
('un museo', 'a museum'),
('una catedral', 'a cathedral'),
('un zoo', 'a zoo'),
('un colegio', 'a school'),
('un cine', 'a cinema'),
('un parque de atracciones', 'a theme park'),
('la bolera', 'the bowling alley'),
('el polideportivo', 'the sports centre'),
('la piscina', 'the swimming pool'),
('la oficina de Correos', 'the post office'),
('el ayuntamiento', 'the town hall'),
('la iglesia', 'the church'),
('los grandes almacenes', 'the department store'),
('la parada de autobús', 'the bus stop'),
('las tiendas', 'the shops'),
('Sigue/Siga', 'Carry on'),
('Todo recto', 'Straight ahead'),
('Tuerce/Tuerza', 'Turn'),
('Cruza/Cruce', 'Cross'),
('Toma/Tome', 'Take'),
('el puente', 'the bridge'),
('la primera/segunda/tercera', 'the first/second/thrid'),
('la calle', 'the street'),
('a la derecha', 'on the right'),
('a la izquierda', 'on the left'),
('Mi casa', 'My house'),
('un sótano', 'a basement'),
('la planta baja', 'the ground floor'),
('la primera planta', 'the first floor'),
('el ático', 'the attic'),
('una entrada', 'an entrance hall'),
('las escaleras', 'the stairs'),
('una cocina', 'a kitchen'),
('un salón', 'a living room'),
('un comedor', 'a dining room'),
('un dormitorio', 'a bedroom'),
('un cuarto de baño', 'a bathroom'),
('una ducha', 'a shower'),
('un aseo', 'a toilet'),
('un despacho', 'an office'),
('un jardín', 'a garden'),
('un balcón', 'a balcony'),
('una piscina', 'a swimming pool'),
('Mi dormitorio', 'My bedroom'),
('un armario', 'a wardrobe'),
('una cama', 'a bed'),
('un escritorio', 'a desk'),
('unas estanterías', 'a bookcase / some shelves'),
('una mestia de noche', 'a bedside table'),
('una alfombra', 'a rug'),
('una cómoda', 'a chest of drawers'),
('una silla', 'a chair'),
('unas cortinas', 'curtains'),
('una puerta', 'a door'),
('una ventana', 'a window'),
('una lámpara', 'a lamp'),
('delante de', 'in front of'),
('enfrente de', 'facing'),
('detrás de', 'behind'),
('encima de', 'on / on top of'),
('debajo de', 'under'),
('entre', 'between'),
('al lado de', 'next to'),
('cerca de', 'near to'),
('lejos de', 'far from'),
]
}, # a section object
{
'title': 'Me gusta comer ...',
'exercises': [
('Es la hora de comer', 'It\'s time to eat'),
('la cena', 'evening meal'),
('la comida', 'midday meal'),
('la merienda', '(afternoon) snack'),
('a eso de', 'at about'),
('el chocolate', 'chocolate'),
('el pan de ajo', 'garlic bread'),
('la carne', 'meat'),
('las verduras', 'vegetables'),
('los cereales', 'cereal'),
('los churros', 'churros'),
('un bocadillo de queso', 'cheese sandwich'),
('un paquete de patatas fritas', 'packet of crisps'),
('un pollo asado', 'roast chicken'),
('una paella', 'paella'),
('una pizza', 'pizza'),
('Comida sana', 'Healthy food'),
('el atún', 'tuna'),
('los mariscos', 'shellfish'),
('el pescado', 'fish'),
('el salmón', 'salmon'),
('las gambas', 'prawns'),
('los calamares', 'squid'),
('un melocotón', 'peach'),
('un plátano', 'banana'),
('una ensalada verde', 'green salad'),
('una manzana', 'apple'),
('una naranja', 'orange'),
('contiene mucha grasa', 'it contains a lot of fat'),
('contiene mucho azúcar', 'it contains a lot of sugar'),
('es (muy)', 'it\'s (very)'),
('es sano/a', 'it\'s healthy'),
('es malsano/a', 'it\'s unhealthy'),
('es soso/a', 'it\'s bland'),
('es delicioso/a', 'it\'s delicious'),
('son (muy)', 'they are (very)'),
('son sanos/as', 'they are healthy'),
('son malsanos/as', 'they are unhealthy'),
('son sosos/as', 'they are bland'),
('son deliciosos/as', 'they are delicious'),
('¡Tengo hambre!', 'I\'m hungry!'),
('una coca-cola', 'a coca-cola'),
('una fanta naranja', 'fizzy orange'),
('un café solo', 'black coffee'),
('un café con leche', 'white coffee'),
('un vaso de vino tinto', 'a glass of red wine'),
('un vaso de vino blanco', 'a glass of white wine'),
('una cerveza', 'a beer'),
('un agua mineral con gas', 'sparkling mineral water'),
('un agua mineral sin gas', 'still mineral water'),
('tengo sed', 'I\'m thirsty'),
('para comer', 'to eat'),
('para beber', 'to drink'),
('¡Oiga, camarero!', 'Waiter!'),
('una cuchara', 'a spoon'),
('un tenedor', 'a fork'),
('un cuchillo', 'a knife'),
('¿Dónde está ...?', 'Where is ...?'),
('pedí', 'I asked for'),
('hey', 'there is'),
('una mosca', 'a fly'),
('lo siento', 'I\'m sorry'),
('lo traigo', 'I\'ll bring it'),
('en seguida', 'at once'),
('traigo otro', 'I\'ll bring another'),
('Me encanta la comida', 'I love food'),
('el arroz', 'rice'),
('la pasta', 'pasta'),
('las especias', 'spices'),
('es muy / es poco', 'it\'s very / it\'s not very'),
('contiene mucho', 'it contains a lot (of)'),
('contiene poco', 'it contains little (not a lot of)'),
('utiliza mucho', 'it uses a lot (of)'),
('utiliza poco', 'it uses little (not a lot of)'),
]
}, # a section object
]
}
'''
contents of spelling bee
'''
SPELLING_BEE = {
'title': 'Spelling Bee',
'user': 'Zhenglin',
'template': 'table',
'subject': 'spanish',
'sections':[
{
'title': 'Stage 1',
'exercises': [
('bienvenido', 'welcome'),
('y', 'and'),
('pero', 'but'),
('con', 'with'),
('sin', 'without'),
('para', 'in order to'),
('si', 'if'),
('primero', 'first'),
('segundo', 'second'),
('tercero', 'third'),
('un hermano', 'a brother'),
('una hermana', 'a sister'),
('grande', 'big'),
('pequeño', 'small'),
('hay', 'there is/are'),
('ayer', 'yesterday'),
('hoy', 'today'),
('mañana', 'tomorrow'),
('porque', 'because'),
('¿quién?', 'who?'),
('¿dónde?', 'where?'),
('¿cómo?', 'how?'),
('¿por qué?', 'why?'),
('¿qué?', 'what?'),
('¿cuánto?', 'how much/many?')
]
}, # a section object
{
'title': 'Stage 2',
'exercises': [
('lunes', 'Monday'),
('martes', 'Tuesday'),
('miércoles', 'Wednesday'),
('jueves', 'Thursday'),
('viernes', 'Friday'),
('sábado', 'Saturday'),
('domingo', 'Sunday'),
('durante', 'during'),
('antes', 'before'),
('después', 'after'),
('ahora', 'now'),
('la mañana', 'the morning'),
('la tarde', 'the afternoon / the evening'),
('la noche', 'the night'),
('un día', 'a day'),
('una semana', 'a week'),
('un mes', 'a month'),
('un año', 'a year'),
('temprano', 'early'),
('tarde', 'late'),
('nunca', 'never'),
('a veces', 'sometimes'),
('a menudo', 'often'),
('luego', 'next')
]
}, # a section object
{
'title': 'Stage 3',
'exercises': [
('enero', 'January'),
('febrero', 'February'),
('marzo', 'March'),
('abril', 'April'),
('mayo', 'May'),
('junio', 'June'),
('julio', 'July'),
('agosto', 'August'),
('septiembre', 'September'),
('octubre', 'October'),
('noviembre', 'November'),
('diciembre', 'December'),
('muy', 'very'),
('bastante', 'quite'),
('mucho', 'a lot'),
('poco', 'a little'),
('interesante', 'interesting'),
('rápido/a', 'fast'),
('divertido/a', 'fun'),
('fantástico/a', 'brilliant'),
('fatal', 'rubbish/awful'),
('agradable', 'pleasant'),
('difícil', 'difficult'),
('fácil', 'easy'),
('pues', 'then')
]
}, # a section object
{
'title': 'Stage 4',
'exercises': [
('tener', 'to have'),
('ayudar', 'to help'),
('ir', 'to go'),
('jugar', 'to play'),
('hacer', 'to to / to make'),
('llegar', 'to arrive'),
('odiar', 'to hate'),
('ganar', 'to win'),
('perder', 'to lose'),
('nadar', 'to swim'),
('trabajar', 'to work'),
('beber', 'to drink'),
('comer', 'to eat'),
('llamar', 'to call'),
('un ordenador', 'a computer'),
('un móvil', 'a mobile phone'),
('un libro', 'a book'),
('un bolígrafo', 'a pen'),
('entrada', 'entrance'),
('salida', 'exit'),
('quizás', 'maybe'),
('sobre', 'on'),
('debajo', 'under'),
('delante', 'in front'),
('detrás', 'behind')
]
}, # a section object
]
}
'''
contents from tutor
'''
TUTOR = {
'title': 'Tutor Year7',
'user': 'Zhenglin',
'template': 'table',
'subject': 'spanish',
'sections':[
{
'title': 'Vocabulary',
'exercises': [
('alumnos', 'students'),
('estuche', 'pencil case'),
('sacapuntas', 'sharpener'),
('regla', 'ruler'),
('pelota', 'ball'),
('ventana', 'window'),
('dar miedo', 'scare'),
('coche', 'car'),
('barco', 'boat / ship'),
('obra de teatro', 'a play'),
('semáforo', 'traffic lights'),
('la derecha', 'to the right / the right'),
('la izquierda', 'to the left / the left'),
('queso', 'cheese'),
('cero', 'zero'),
('la gente', 'people'),
('girar', 'to turn'),
('gota', 'drop'),
('bolos', 'bowling'),
('billar', 'pool'),
('tenis', 'tennis'),
('fútbol', 'football'),
('remo', 'rowing'),
('bucear', 'diving'),
('patinar', 'rollerskating'),
('windsurf', 'windsurfing'),
('loro', 'parrot'),
('marisco', 'sea food'),
('chiringuito', 'restaurant next to beach or sea'),
('vistas', 'sight'),
('creer', 'to believe'),
('maravillosa', 'wonderful'),
('dinero de bolsillo', 'pocket money'),
('sobre', 'about'),
('dar', 'to give'),
('eso', 'that'),
('entonces', 'then'),
('paso la aspiradora', 'to voccum'),
('mala', 'bad'),
('suerte', 'luck'),
('gastar', 'to spend'),
('suelen', 'usually'),
('películas', 'movies'),
('algún', 'some'),
('ver', 'to watch'),
('tocar', '(instead of jugar) to play (instruments)'),
('país', 'country'),
('Francia', 'France'),
('cálidos', 'warm (weather)'),
('atracciones', 'rides (in a theme park)'),
('¿Qué hora es? / ¿Tines hora? / ¿Me puede decir la hora?', 'What is the time?'),
('madrugada', 'early morning, dawn'),
('Noruega', 'Norway'),
('Reino Unido', 'U.K.'),
('abren', 'open'),
('cierran', 'closed'),
('levantarse', 'to wake up'),
('navegar', 'to sail, to surf'),
('barrio', 'neighbourhood'),
('tomar', 'to drink'),
('basura', 'junk food'),
('duele', 'ache'),
('doler', 'to hurt, have pain'),
('encuentra', 'found, located'),
('comunicación', 'communication'),
('abogada', 'lawyer'),
('despacho', 'office'),
('empresariales', 'business'),
('la profesión', 'profession, job'),
('médico', 'doctor'),
('¿Que haces?', 'What do you do?'),
('¿A qué te dedicas? / ¿En qué trabajas?', 'What\'s your job?'),
('liada', 'I\'m busy'),
('saber', 'to know'),
('siempre', 'always'),
('pues', 'because ...'),
('más', 'more'),
('claro', 'of couse'),
('verdad', 'true'),
('relajado', 'relaxed'),
('estupendo', 'wonderful'),
('oir', 'to hear'),
('todaría', 'yet, still'),
('seguro', 'for sure'),
('decir', 'to say'),
('algo', 'something'),
('pesado', 'heavy'),
('pesadísimo', 'very annoying, very heavy'),
('alguna', 'sometime, any'),
('venir', 'to come'),
('sorpresa', 'surprise'),
('venga', 'ok'),
('vemos', 'see you'),
]
}
]
}
| en | 0.475138 | # -*- coding: utf-8 -*- # pylint: disable=C0301 contents of Zhenglin's Spanish year 7 # a section object # a section object # a section object # a section object # a section object contents of ZOOM espanol 1 # a section object # a section object # a section object # a section object # a section object # a section object contents of spelling bee # a section object # a section object # a section object # a section object contents from tutor | 2.44469 | 2 |
discopy/drawing.py | Doomsk/discopy | 1 | 6631578 | <reponame>Doomsk/discopy
# -*- coding: utf-8 -*-
"""
Drawing module.
"""
import os
from tempfile import NamedTemporaryFile, TemporaryDirectory
import networkx as nx
from PIL import Image
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.patches import PathPatch
COLORS = {
'red': '#e8a5a5',
'green': '#d8f8d8',
'blue': '#776ff3',
'yellow': '#f7f700',
'black': '#000000',
}
def diagram_to_nx(diagram, scale=(1, 1), pad=(0, 0)):
"""
Builds a networkx graph, called by :meth:`Diagram.draw`.
Returns
-------
graph, positions, labels : tuple
where:
* :code:`graph` is a networkx graph with nodes for inputs, outputs,
boxes and wires,
* :code:`positions` is a dict from nodes to pairs of floats,
* :code:`labels` is a dict from nodes to strings.
"""
graph, pos, labels = nx.DiGraph(), dict(), dict()
def add_node(node, position, label=None):
graph.add_node(node)
pos.update({node: position})
if label is not None:
labels.update({node: label})
def add_box(scan, box, off, depth, x_pos):
node = 'wire_box_{}'.format(depth)\
if getattr(box, "draw_as_wire", False) else 'box_{}'.format(depth)
add_node(node, (x_pos, len(diagram) - depth - .5),
getattr(box, "drawing_name", box.name))
for i, _ in enumerate(box.dom):
wire, position = 'wire_dom_{}_{}'.format(depth, i), (
pos[scan[off + i]][0], len(diagram) - depth - .25)
add_node(wire, position, str(box.dom[i]))
graph.add_edge(scan[off + i], wire)
graph.add_edge(wire, node)
for i, _ in enumerate(box.cod):
wire, position = 'wire_cod_{}_{}'.format(depth, i), (
x_pos - len(box.cod[1:]) / 2 + i, len(diagram) - depth - .75)
add_node(wire, position, str(box.cod[i]))
graph.add_edge(node, wire)
return scan[:off] + ['wire_cod_{}_{}'.format(depth, i)
for i, _ in enumerate(box.cod)]\
+ scan[off + len(box.dom):]
def make_space(scan, box, off):
if not scan:
return 0
half_width = len(box.cod[:-1]) / 2 + 1
if not box.dom:
if not off:
x_pos = pos[scan[0]][0] - half_width
elif off == len(scan):
x_pos = pos[scan[-1]][0] + half_width
else:
right = pos[scan[off + len(box.dom)]][0]
x_pos = (pos[scan[off - 1]][0] + right) / 2
else:
right = pos[scan[off + len(box.dom) - 1]][0]
x_pos = (pos[scan[off]][0] + right) / 2
if off and pos[scan[off - 1]][0] > x_pos - half_width:
limit = pos[scan[off - 1]][0]
pad = limit - x_pos + half_width
for node, position in pos.items():
if position[0] <= limit:
pos[node] = (pos[node][0] - pad, pos[node][1])
if off + len(box.dom) < len(scan)\
and pos[scan[off + len(box.dom)]][0] < x_pos + half_width:
limit = pos[scan[off + len(box.dom)]][0]
pad = x_pos + half_width - limit
for node, position in pos.items():
if position[0] >= limit:
pos[node] = (pos[node][0] + pad, pos[node][1])
return x_pos
def scale_and_pad(pos):
widths, heights = zip(*pos.values())
min_width, min_height = min(widths), min(heights)
pos = {n: ((x - min_width) * scale[0] + pad[0],
(y - min_height) * scale[1] + pad[1])
for n, (x, y) in pos.items()}
for depth, box in enumerate(diagram.boxes):
if "box_{}".format(depth) in pos:
for i, _ in enumerate(box.dom):
node = "wire_dom_{}_{}".format(depth, i)
pos[node] = (
pos[node][0], pos[node][1] - .25 * (scale[1] - 1))
for i, _ in enumerate(box.cod):
node = "wire_cod_{}_{}".format(depth, i)
pos[node] = (
pos[node][0], pos[node][1] + .25 * (scale[1] - 1))
return pos
for i, _ in enumerate(diagram.dom):
add_node('input_{}'.format(i),
(i, len(diagram.boxes[:-1]) + 1), str(diagram.dom[i]))
scan = ['input_{}'.format(i) for i, _ in enumerate(diagram.dom)]
for depth, (box, off) in enumerate(zip(diagram.boxes, diagram.offsets)):
x_pos = make_space(scan, box, off)
scan = add_box(scan, box, off, depth, x_pos)
for i, _ in enumerate(diagram.cod):
add_node('output_{}'.format(i),
(pos[scan[i]][0], 0), str(diagram.cod[i]))
graph.add_edge(scan[i], 'output_{}'.format(i))
return graph, scale_and_pad(pos), labels
def save_tikz(commands, path=None, baseline=0, options=None):
"""
Save a list of tikz commands.
"""
options = "baseline=(O.base)" if options is None\
else "baseline=(O.base), " + options
begin = ["\\begin{{tikzpicture}}[{}]\n".format(options),
"\\node (O) at (0, {}) {{}};\n".format(baseline)]
end = ["\\end{tikzpicture}\n"]
with open(path, 'w+') as file:
file.writelines(begin + commands + end)
def draw_text(axis, text, i, j, to_tikz=False, **params):
"""
Draws `text` on `axis` as position `(i, j)`.
If `to_tikz`, axis is a list of tikz commands, else it's a matplotlib axis.
`params` get passed to matplotlib.
"""
if to_tikz:
options = ""
if params.get("verticalalignment", "center") == "top": # wire labels
options += "right"
if 'fontsize' in params and params['fontsize'] is not None:
options += (", " if options else "") +\
"scale={}".format(params['fontsize'])
axis.append(
"\\node [{}] () at ({}, {}) {{{}}};\n".format(options, i, j, text))
else:
params['fontsize'] = params.get('fontsize', None) or 12
axis.text(i, j, text, **params)
def draw_polygon(axis, *points, to_tikz=False, color='#ffffff'):
"""
Draws a polygon from a list of points.
"""
if to_tikz:
axis.append("\\draw {};\n".format(" -- ".join(
"({}, {})".format(*x) for x in points + points[:1])))
else:
codes = [Path.MOVETO]
codes += len(points[1:]) * [Path.LINETO] + [Path.CLOSEPOLY]
path = Path(points + points[:1], codes)
axis.add_patch(PathPatch(path, facecolor=color))
def draw_wire(axis, source, target,
bend_out=False, bend_in=False, to_tikz=False):
"""
Draws a wire from source to target using a Bezier curve.
"""
mid = (target[0], source[1]) if bend_out else (source[0], target[1])
if to_tikz == "controls":
cmd = "\\draw {} .. controls {} .. {};\n"
axis.append(cmd.format(*("({}, {})".format(*point)
for point in [source, mid, target])))
elif to_tikz:
out = -90 if not bend_out or source[0] == target[0]\
else (180 if source[0] > target[0] else 0)
inp = 90 if not bend_in or source[0] == target[0]\
else (180 if source[0] < target[0] else 0)
cmd = "\\draw [out={}, in={}] {{}} to {{}};\n".format(out, inp)
axis.append(cmd.format(*("({}, {})".format(*point)
for point in [source, target])))
else:
path = Path([source, mid, target],
[Path.MOVETO, Path.CURVE3, Path.CURVE3])
axis.add_patch(PathPatch(path, facecolor='none'))
def draw(diagram, axis=None, data=None, **params):
"""
Draws a diagram, see :meth:`monoidal.Diagram.draw`.
"""
asymmetry = params.get('asymmetry',
.25 * any(box.is_dagger for box in diagram.boxes))
scale, pad = params.get('scale', (1, 1)), params.get('pad', (0, 0))
graph, positions, labels =\
diagram_to_nx(diagram, scale, pad) if data is None else data
colors = {getattr(box, 'color', 'red') for box in diagram.boxes}
shapes = {getattr(box, 'shape', 'o') for box in diagram.boxes}
spiders = {(color, shape): ["box_{}".format(i)
for i, box in enumerate(diagram.boxes)
if getattr(box, "draw_as_spider", False)
if getattr(box, "color", "red") == color
if getattr(box, "shape", "o") == shape]
for color in colors
for shape in shapes}
def draw_nodes(axis, nodes, color, shape):
if params.get('to_tikz', False):
cmd = "\\node [circle, fill={}] () ".format(color)
cmd += "at ({pos[0]}, {pos[1]}) {{{label}}};\n"
for node in nodes:
lab = labels[node]\
if params.get('draw_box_labels', True) else ""
axis.append(cmd.format(label=lab, pos=positions[node]))
else:
nx.draw_networkx_nodes(
graph, positions, nodelist=spiders[(color, shape)],
node_color=COLORS[color], node_shape=shape, ax=axis)
if params.get('draw_box_labels', True):
nx.draw_networkx_labels(
graph, positions,
{n: l for n, l in labels.items() if n in nodes})
def draw_box(axis, box, depth):
node = 'box_{}'.format(depth)
if node not in graph.nodes():
return
if not box.dom and not box.cod: # pragma: no cover
left, right = positions[node][0], positions[node][0]
elif not box.dom:
left, right = (
positions['wire_cod_{}_{}'.format(depth, i)][0]
for i in [0, len(box.cod) - 1])
elif not box.cod:
left, right = (
positions['wire_dom_{}_{}'.format(depth, i)][0]
for i in [0, len(box.dom) - 1])
else:
top_left, top_right = (
positions['wire_dom_{}_{}'.format(depth, i)][0]
for i in [0, len(box.dom) - 1])
bottom_left, bottom_right = (
positions['wire_cod_{}_{}'.format(depth, i)][0]
for i in [0, len(box.cod) - 1])
left = min(top_left, bottom_left)
right = max(top_right, bottom_right)
height = positions[node][1] - .25
left, right = left - .25 * scale[0], right + .25 * scale[0]
draw_polygon(
axis, (left, height),
(right + (asymmetry if box.is_dagger else 0), height),
(right + (0 if box.is_dagger else asymmetry), height + .5),
(left, height + .5),
to_tikz=params.get('to_tikz', False),
color=params.get('color', '#ffffff'))
if params.get('draw_box_labels', True):
draw_text(axis, labels[node], *positions[node],
to_tikz=params.get('to_tikz', False),
ha='center', va='center',
fontsize=params.get('fontsize', None))
def draw_wires(axis):
for case in ['input', 'wire_cod']:
for node in [n for n in graph.nodes if n[:len(case)] == case]:
i, j = positions[node]
if params.get('draw_types', True):
if node in labels.keys():
pad_i, pad_j = params.get('textpad', (.1, .1))
draw_text(
axis, labels[node],
i + pad_i, j - (0 if case == 'input' else pad_j),
to_tikz=params.get('to_tikz', False),
fontsize=params.get('fontsize_types',
params.get('fontsize', None)),
verticalalignment='top')
for source, target in graph.edges():
if "box" in [source[:3], target[:3]] and not any(
v in sum(spiders.values(), []) for v in [source, target]):
continue
draw_wire(axis, positions[source], positions[target],
bend_out='box' in source, bend_in='box' in target,
to_tikz=params.get('to_tikz', False))
if axis is None:
axis = [] if params.get('to_tikz', False)\
else plt.subplots(figsize=params.get('figsize', None))[1]
draw_wires(axis)
for color, shape in spiders.keys():
draw_nodes(axis, spiders[(color, shape)], color, shape)
for depth, box in enumerate(diagram.boxes):
if getattr(box, "draw_as_spider", False):
continue
draw_box(axis, box, depth)
if params.get('to_tikz', False):
if 'path' in params:
save_tikz(axis, params['path'], baseline=len(diagram) / 2 or .5,
options=params.get('tikz_options', None))
else:
plt.margins(*params.get('margins', (.05, .05)))
plt.subplots_adjust(
top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
axis.set_aspect(params.get('aspect', 'auto'))
plt.axis("off")
if 'path' in params:
plt.savefig(params['path'])
plt.close()
if params.get('show', True):
plt.show()
return axis
def to_gif(diagram, *diagrams, path=None,
timestep=500, loop=False, **params): # pragma: no cover
"""
Draws a sequence of diagrams.
"""
steps, frames = (diagram, ) + diagrams, []
path = path or os.path.basename(NamedTemporaryFile(
suffix='.gif', prefix='tmp_', dir='.').name)
with TemporaryDirectory() as directory:
for i, _diagram in enumerate(steps):
tmp_path = os.path.join(directory, '{}.png'.format(i))
_diagram.draw(path=tmp_path, **params)
frames.append(Image.open(tmp_path))
if loop:
frames = frames + frames[::-1]
frames[0].save(path, format='GIF', append_images=frames[1:],
save_all=True, duration=timestep,
**{'loop': 0} if loop else {})
try:
# pylint: disable=import-outside-toplevel
from IPython.display import HTML
return HTML('<img src="{}">'.format(path))
except ImportError:
return '<img src="{}">'.format(path)
def pregroup_draw(words, cups, **params):
"""
Draws pregroup words and cups.
>>> from discopy import *
>>> s, n = Ty('s'), Ty('n')
>>> Alice, Bob = Word('Alice', n), Word('Bob', n)
>>> loves = Word('loves', n.r @ s @ n.l)
>>> sentence = Alice @ loves @ Bob >> Cup(n, n.r) @ Id(s) @ Cup(n.l, n)
>>> words, *cups = sentence.foliation().boxes
>>> pregroup_draw(words, cups, to_tikz=True, fontsize=2)
\\node [scale=2] () at (1.1, -0.2) {n};
\\draw (0.0, 0) -- (2.0, 0) -- (1.0, 1) -- (0.0, 0);
\\node [scale=2] () at (1.0, 0.1) {Alice};
\\node [scale=2] () at (3.1, -0.2) {n.r};
\\node [scale=2] () at (3.6, -0.2) {s};
\\node [scale=2] () at (4.1, -0.2) {n.l};
\\draw (2.5, 0) -- (4.5, 0) -- (3.5, 1) -- (2.5, 0);
\\node [scale=2] () at (3.5, 0.1) {loves};
\\node [scale=2] () at (6.1, -0.2) {n};
\\draw (5.0, 0) -- (7.0, 0) -- (6.0, 1) -- (5.0, 0);
\\node [scale=2] () at (6.0, 0.1) {Bob};
\\draw [out=-90, in=180] (1.0, 0) to (2.0, -1);
\\draw [out=-90, in=0] (3.0, 0) to (2.0, -1);
\\draw [out=-90, in=180] (4.0, 0) to (5.0, -1);
\\draw [out=-90, in=0] (6.0, 0) to (5.0, -1);
\\draw [out=-90, in=90] (3.5, 0) to (3.5, -2);
\\node [scale=2] () at (3.6, -1.5) {s};
"""
textpad = params.get('textpad', (.1, .2))
textpad_words = params.get('textpad_words', (0, .1))
space = params.get('space', .5)
width = params.get('width', 2.)
fontsize = params.get('fontsize', None)
def draw_triangles(axis, words):
scan = []
for i, word in enumerate(words.boxes):
for j, _ in enumerate(word.cod):
x_wire = (space + width) * i\
+ (width / (len(word.cod) + 1)) * (j + 1)
scan.append(x_wire)
if params.get('draw_types', True):
draw_text(axis, str(word.cod[j]),
x_wire + textpad[0], -textpad[1],
fontsize=params.get('fontsize_types', fontsize),
to_tikz=params.get('to_tikz', False))
draw_polygon(
axis, ((space + width) * i, 0),
((space + width) * i + width, 0),
((space + width) * i + width / 2, 1),
color='none', to_tikz=params.get('to_tikz', False))
draw_text(axis, str(word),
(space + width) * i + width / 2 + textpad_words[0],
textpad_words[1], ha='center', fontsize=fontsize,
to_tikz=params.get('to_tikz', False))
return scan
def draw_cups_and_wires(axis, cups, scan):
for j, off in [(j, off)
for j, s in enumerate(cups) for off in s.offsets]:
middle = (scan[off] + scan[off + 1]) / 2
draw_wire(axis, (scan[off], 0), (middle, - j - 1),
bend_in=True, to_tikz=params.get('to_tikz', False))
draw_wire(axis, (scan[off + 1], 0), (middle, - j - 1),
bend_in=True, to_tikz=params.get('to_tikz', False))
scan = scan[:off] + scan[off + 2:]
for i, _ in enumerate(cups[-1].cod if cups else words.cod):
label = str(cups[-1].cod[i]) if cups else ""
draw_wire(axis, (scan[i], 0), (scan[i], - (len(cups) or 1) - 1),
to_tikz=params.get('to_tikz', False))
if params.get('draw_types', True):
draw_text(axis, label,
scan[i] + textpad[0], - (len(cups) or 1) - space,
fontsize=params.get('fontsize_types', fontsize),
to_tikz=params.get('to_tikz', False))
axis = [] if params.get('to_tikz', False)\
else plt.subplots(figsize=params.get('figsize', None))[1]
scan = draw_triangles(axis, words.normal_form())
draw_cups_and_wires(axis, cups, scan)
if params.get('to_tikz', False):
if 'path' in params:
save_tikz(axis, params['path'],
options=params.get('tikz_options', None))
else:
print(''.join(axis).strip())
else:
plt.margins(*params.get('margins', (.05, .05)))
plt.subplots_adjust(
top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.axis('off')
axis.set_xlim(0, (space + width) * len(words.boxes) - space)
axis.set_ylim(- len(cups) - space, 1)
axis.set_aspect(params.get('aspect', 'auto'))
if 'path' in params.keys():
plt.savefig(params['path'])
plt.close()
plt.show()
def equation(*diagrams, symbol="=", space=1, **params):
"""
>>> from discopy import *
>>> x = Ty('x')
>>> diagrams = Id(x.r).transpose(left=True), Id(x.l).transpose()
>>> equation(*diagrams, to_tikz=True)
\\node [right] () at (0.1, 2.0) {x};
\\node [right] () at (1.1, 1.15) {x.r};
\\node [right] () at (2.1, 1.15) {x};
\\draw [out=-90, in=90] (0, 2.0) to (0, 0.75);
\\draw [out=180, in=90] (1.5, 1.5) to (1.0, 1.25);
\\draw [out=0, in=90] (1.5, 1.5) to (2.0, 1.25);
\\draw [out=-90, in=90] (1.0, 1.25) to (1.0, 0.75);
\\draw [out=-90, in=90] (2.0, 1.25) to (2.0, 0.0);
\\draw [out=-90, in=180] (0, 0.75) to (0.5, 0.5);
\\draw [out=-90, in=0] (1.0, 0.75) to (0.5, 0.5);
\\node [] () at (3.0, 1.0) {=};
\\node [right] () at (6.1, 2.0) {x};
\\node [right] () at (4.1, 1.15) {x};
\\node [right] () at (5.1, 1.15) {x.l};
\\draw [out=-90, in=90] (6.0, 2.0) to (6.0, 0.75);
\\draw [out=180, in=90] (4.5, 1.5) to (4.0, 1.25);
\\draw [out=0, in=90] (4.5, 1.5) to (5.0, 1.25);
\\draw [out=-90, in=90] (4.0, 1.25) to (4.0, 0.0);
\\draw [out=-90, in=90] (5.0, 1.25) to (5.0, 0.75);
\\draw [out=-90, in=180] (5.0, 0.75) to (5.5, 0.5);
\\draw [out=-90, in=0] (6.0, 0.75) to (5.5, 0.5);
"""
axis, pad, max_height = None, 0, max(map(len, diagrams))
scale_x, scale_y = params.get('scale', (1, 1))
path = params.pop("path", None)
for i, diagram in enumerate(diagrams):
scale = (scale_x, scale_y * max_height / (len(diagram) or 1))
graph, positions, labels = diagram_to_nx(
diagram, scale=scale, pad=(pad, 0))
axis = diagram.draw(axis=axis, data=(graph, positions, labels),
show=False, **params)
widths = {x for x, _ in positions.values()}
min_width, max_width = min(widths), max(widths)
pad += max_width - min_width + space
if i < len(diagrams) - 1:
draw_text(axis, symbol, pad, scale_y * max_height / 2,
to_tikz=params.get('to_tikz', False))
pad += space
if params.get('to_tikz', False):
if path is not None:
save_tikz(axis, path, baseline=max_height / 2,
options=params.get('tikz_options', None))
else:
print(''.join(axis).strip())
else:
if path is not None:
plt.savefig(path)
plt.close()
plt.show()
| # -*- coding: utf-8 -*-
"""
Drawing module.
"""
import os
from tempfile import NamedTemporaryFile, TemporaryDirectory
import networkx as nx
from PIL import Image
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.patches import PathPatch
COLORS = {
'red': '#e8a5a5',
'green': '#d8f8d8',
'blue': '#776ff3',
'yellow': '#f7f700',
'black': '#000000',
}
def diagram_to_nx(diagram, scale=(1, 1), pad=(0, 0)):
"""
Builds a networkx graph, called by :meth:`Diagram.draw`.
Returns
-------
graph, positions, labels : tuple
where:
* :code:`graph` is a networkx graph with nodes for inputs, outputs,
boxes and wires,
* :code:`positions` is a dict from nodes to pairs of floats,
* :code:`labels` is a dict from nodes to strings.
"""
graph, pos, labels = nx.DiGraph(), dict(), dict()
def add_node(node, position, label=None):
graph.add_node(node)
pos.update({node: position})
if label is not None:
labels.update({node: label})
def add_box(scan, box, off, depth, x_pos):
node = 'wire_box_{}'.format(depth)\
if getattr(box, "draw_as_wire", False) else 'box_{}'.format(depth)
add_node(node, (x_pos, len(diagram) - depth - .5),
getattr(box, "drawing_name", box.name))
for i, _ in enumerate(box.dom):
wire, position = 'wire_dom_{}_{}'.format(depth, i), (
pos[scan[off + i]][0], len(diagram) - depth - .25)
add_node(wire, position, str(box.dom[i]))
graph.add_edge(scan[off + i], wire)
graph.add_edge(wire, node)
for i, _ in enumerate(box.cod):
wire, position = 'wire_cod_{}_{}'.format(depth, i), (
x_pos - len(box.cod[1:]) / 2 + i, len(diagram) - depth - .75)
add_node(wire, position, str(box.cod[i]))
graph.add_edge(node, wire)
return scan[:off] + ['wire_cod_{}_{}'.format(depth, i)
for i, _ in enumerate(box.cod)]\
+ scan[off + len(box.dom):]
def make_space(scan, box, off):
if not scan:
return 0
half_width = len(box.cod[:-1]) / 2 + 1
if not box.dom:
if not off:
x_pos = pos[scan[0]][0] - half_width
elif off == len(scan):
x_pos = pos[scan[-1]][0] + half_width
else:
right = pos[scan[off + len(box.dom)]][0]
x_pos = (pos[scan[off - 1]][0] + right) / 2
else:
right = pos[scan[off + len(box.dom) - 1]][0]
x_pos = (pos[scan[off]][0] + right) / 2
if off and pos[scan[off - 1]][0] > x_pos - half_width:
limit = pos[scan[off - 1]][0]
pad = limit - x_pos + half_width
for node, position in pos.items():
if position[0] <= limit:
pos[node] = (pos[node][0] - pad, pos[node][1])
if off + len(box.dom) < len(scan)\
and pos[scan[off + len(box.dom)]][0] < x_pos + half_width:
limit = pos[scan[off + len(box.dom)]][0]
pad = x_pos + half_width - limit
for node, position in pos.items():
if position[0] >= limit:
pos[node] = (pos[node][0] + pad, pos[node][1])
return x_pos
def scale_and_pad(pos):
widths, heights = zip(*pos.values())
min_width, min_height = min(widths), min(heights)
pos = {n: ((x - min_width) * scale[0] + pad[0],
(y - min_height) * scale[1] + pad[1])
for n, (x, y) in pos.items()}
for depth, box in enumerate(diagram.boxes):
if "box_{}".format(depth) in pos:
for i, _ in enumerate(box.dom):
node = "wire_dom_{}_{}".format(depth, i)
pos[node] = (
pos[node][0], pos[node][1] - .25 * (scale[1] - 1))
for i, _ in enumerate(box.cod):
node = "wire_cod_{}_{}".format(depth, i)
pos[node] = (
pos[node][0], pos[node][1] + .25 * (scale[1] - 1))
return pos
for i, _ in enumerate(diagram.dom):
add_node('input_{}'.format(i),
(i, len(diagram.boxes[:-1]) + 1), str(diagram.dom[i]))
scan = ['input_{}'.format(i) for i, _ in enumerate(diagram.dom)]
for depth, (box, off) in enumerate(zip(diagram.boxes, diagram.offsets)):
x_pos = make_space(scan, box, off)
scan = add_box(scan, box, off, depth, x_pos)
for i, _ in enumerate(diagram.cod):
add_node('output_{}'.format(i),
(pos[scan[i]][0], 0), str(diagram.cod[i]))
graph.add_edge(scan[i], 'output_{}'.format(i))
return graph, scale_and_pad(pos), labels
def save_tikz(commands, path=None, baseline=0, options=None):
"""
Save a list of tikz commands.
"""
options = "baseline=(O.base)" if options is None\
else "baseline=(O.base), " + options
begin = ["\\begin{{tikzpicture}}[{}]\n".format(options),
"\\node (O) at (0, {}) {{}};\n".format(baseline)]
end = ["\\end{tikzpicture}\n"]
with open(path, 'w+') as file:
file.writelines(begin + commands + end)
def draw_text(axis, text, i, j, to_tikz=False, **params):
"""
Draws `text` on `axis` as position `(i, j)`.
If `to_tikz`, axis is a list of tikz commands, else it's a matplotlib axis.
`params` get passed to matplotlib.
"""
if to_tikz:
options = ""
if params.get("verticalalignment", "center") == "top": # wire labels
options += "right"
if 'fontsize' in params and params['fontsize'] is not None:
options += (", " if options else "") +\
"scale={}".format(params['fontsize'])
axis.append(
"\\node [{}] () at ({}, {}) {{{}}};\n".format(options, i, j, text))
else:
params['fontsize'] = params.get('fontsize', None) or 12
axis.text(i, j, text, **params)
def draw_polygon(axis, *points, to_tikz=False, color='#ffffff'):
"""
Draws a polygon from a list of points.
"""
if to_tikz:
axis.append("\\draw {};\n".format(" -- ".join(
"({}, {})".format(*x) for x in points + points[:1])))
else:
codes = [Path.MOVETO]
codes += len(points[1:]) * [Path.LINETO] + [Path.CLOSEPOLY]
path = Path(points + points[:1], codes)
axis.add_patch(PathPatch(path, facecolor=color))
def draw_wire(axis, source, target,
bend_out=False, bend_in=False, to_tikz=False):
"""
Draws a wire from source to target using a Bezier curve.
"""
mid = (target[0], source[1]) if bend_out else (source[0], target[1])
if to_tikz == "controls":
cmd = "\\draw {} .. controls {} .. {};\n"
axis.append(cmd.format(*("({}, {})".format(*point)
for point in [source, mid, target])))
elif to_tikz:
out = -90 if not bend_out or source[0] == target[0]\
else (180 if source[0] > target[0] else 0)
inp = 90 if not bend_in or source[0] == target[0]\
else (180 if source[0] < target[0] else 0)
cmd = "\\draw [out={}, in={}] {{}} to {{}};\n".format(out, inp)
axis.append(cmd.format(*("({}, {})".format(*point)
for point in [source, target])))
else:
path = Path([source, mid, target],
[Path.MOVETO, Path.CURVE3, Path.CURVE3])
axis.add_patch(PathPatch(path, facecolor='none'))
def draw(diagram, axis=None, data=None, **params):
"""
Draws a diagram, see :meth:`monoidal.Diagram.draw`.
"""
asymmetry = params.get('asymmetry',
.25 * any(box.is_dagger for box in diagram.boxes))
scale, pad = params.get('scale', (1, 1)), params.get('pad', (0, 0))
graph, positions, labels =\
diagram_to_nx(diagram, scale, pad) if data is None else data
colors = {getattr(box, 'color', 'red') for box in diagram.boxes}
shapes = {getattr(box, 'shape', 'o') for box in diagram.boxes}
spiders = {(color, shape): ["box_{}".format(i)
for i, box in enumerate(diagram.boxes)
if getattr(box, "draw_as_spider", False)
if getattr(box, "color", "red") == color
if getattr(box, "shape", "o") == shape]
for color in colors
for shape in shapes}
def draw_nodes(axis, nodes, color, shape):
if params.get('to_tikz', False):
cmd = "\\node [circle, fill={}] () ".format(color)
cmd += "at ({pos[0]}, {pos[1]}) {{{label}}};\n"
for node in nodes:
lab = labels[node]\
if params.get('draw_box_labels', True) else ""
axis.append(cmd.format(label=lab, pos=positions[node]))
else:
nx.draw_networkx_nodes(
graph, positions, nodelist=spiders[(color, shape)],
node_color=COLORS[color], node_shape=shape, ax=axis)
if params.get('draw_box_labels', True):
nx.draw_networkx_labels(
graph, positions,
{n: l for n, l in labels.items() if n in nodes})
def draw_box(axis, box, depth):
node = 'box_{}'.format(depth)
if node not in graph.nodes():
return
if not box.dom and not box.cod: # pragma: no cover
left, right = positions[node][0], positions[node][0]
elif not box.dom:
left, right = (
positions['wire_cod_{}_{}'.format(depth, i)][0]
for i in [0, len(box.cod) - 1])
elif not box.cod:
left, right = (
positions['wire_dom_{}_{}'.format(depth, i)][0]
for i in [0, len(box.dom) - 1])
else:
top_left, top_right = (
positions['wire_dom_{}_{}'.format(depth, i)][0]
for i in [0, len(box.dom) - 1])
bottom_left, bottom_right = (
positions['wire_cod_{}_{}'.format(depth, i)][0]
for i in [0, len(box.cod) - 1])
left = min(top_left, bottom_left)
right = max(top_right, bottom_right)
height = positions[node][1] - .25
left, right = left - .25 * scale[0], right + .25 * scale[0]
draw_polygon(
axis, (left, height),
(right + (asymmetry if box.is_dagger else 0), height),
(right + (0 if box.is_dagger else asymmetry), height + .5),
(left, height + .5),
to_tikz=params.get('to_tikz', False),
color=params.get('color', '#ffffff'))
if params.get('draw_box_labels', True):
draw_text(axis, labels[node], *positions[node],
to_tikz=params.get('to_tikz', False),
ha='center', va='center',
fontsize=params.get('fontsize', None))
def draw_wires(axis):
for case in ['input', 'wire_cod']:
for node in [n for n in graph.nodes if n[:len(case)] == case]:
i, j = positions[node]
if params.get('draw_types', True):
if node in labels.keys():
pad_i, pad_j = params.get('textpad', (.1, .1))
draw_text(
axis, labels[node],
i + pad_i, j - (0 if case == 'input' else pad_j),
to_tikz=params.get('to_tikz', False),
fontsize=params.get('fontsize_types',
params.get('fontsize', None)),
verticalalignment='top')
for source, target in graph.edges():
if "box" in [source[:3], target[:3]] and not any(
v in sum(spiders.values(), []) for v in [source, target]):
continue
draw_wire(axis, positions[source], positions[target],
bend_out='box' in source, bend_in='box' in target,
to_tikz=params.get('to_tikz', False))
if axis is None:
axis = [] if params.get('to_tikz', False)\
else plt.subplots(figsize=params.get('figsize', None))[1]
draw_wires(axis)
for color, shape in spiders.keys():
draw_nodes(axis, spiders[(color, shape)], color, shape)
for depth, box in enumerate(diagram.boxes):
if getattr(box, "draw_as_spider", False):
continue
draw_box(axis, box, depth)
if params.get('to_tikz', False):
if 'path' in params:
save_tikz(axis, params['path'], baseline=len(diagram) / 2 or .5,
options=params.get('tikz_options', None))
else:
plt.margins(*params.get('margins', (.05, .05)))
plt.subplots_adjust(
top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
axis.set_aspect(params.get('aspect', 'auto'))
plt.axis("off")
if 'path' in params:
plt.savefig(params['path'])
plt.close()
if params.get('show', True):
plt.show()
return axis
def to_gif(diagram, *diagrams, path=None,
timestep=500, loop=False, **params): # pragma: no cover
"""
Draws a sequence of diagrams.
"""
steps, frames = (diagram, ) + diagrams, []
path = path or os.path.basename(NamedTemporaryFile(
suffix='.gif', prefix='tmp_', dir='.').name)
with TemporaryDirectory() as directory:
for i, _diagram in enumerate(steps):
tmp_path = os.path.join(directory, '{}.png'.format(i))
_diagram.draw(path=tmp_path, **params)
frames.append(Image.open(tmp_path))
if loop:
frames = frames + frames[::-1]
frames[0].save(path, format='GIF', append_images=frames[1:],
save_all=True, duration=timestep,
**{'loop': 0} if loop else {})
try:
# pylint: disable=import-outside-toplevel
from IPython.display import HTML
return HTML('<img src="{}">'.format(path))
except ImportError:
return '<img src="{}">'.format(path)
def pregroup_draw(words, cups, **params):
"""
Draws pregroup words and cups.
>>> from discopy import *
>>> s, n = Ty('s'), Ty('n')
>>> Alice, Bob = Word('Alice', n), Word('Bob', n)
>>> loves = Word('loves', n.r @ s @ n.l)
>>> sentence = Alice @ loves @ Bob >> Cup(n, n.r) @ Id(s) @ Cup(n.l, n)
>>> words, *cups = sentence.foliation().boxes
>>> pregroup_draw(words, cups, to_tikz=True, fontsize=2)
\\node [scale=2] () at (1.1, -0.2) {n};
\\draw (0.0, 0) -- (2.0, 0) -- (1.0, 1) -- (0.0, 0);
\\node [scale=2] () at (1.0, 0.1) {Alice};
\\node [scale=2] () at (3.1, -0.2) {n.r};
\\node [scale=2] () at (3.6, -0.2) {s};
\\node [scale=2] () at (4.1, -0.2) {n.l};
\\draw (2.5, 0) -- (4.5, 0) -- (3.5, 1) -- (2.5, 0);
\\node [scale=2] () at (3.5, 0.1) {loves};
\\node [scale=2] () at (6.1, -0.2) {n};
\\draw (5.0, 0) -- (7.0, 0) -- (6.0, 1) -- (5.0, 0);
\\node [scale=2] () at (6.0, 0.1) {Bob};
\\draw [out=-90, in=180] (1.0, 0) to (2.0, -1);
\\draw [out=-90, in=0] (3.0, 0) to (2.0, -1);
\\draw [out=-90, in=180] (4.0, 0) to (5.0, -1);
\\draw [out=-90, in=0] (6.0, 0) to (5.0, -1);
\\draw [out=-90, in=90] (3.5, 0) to (3.5, -2);
\\node [scale=2] () at (3.6, -1.5) {s};
"""
textpad = params.get('textpad', (.1, .2))
textpad_words = params.get('textpad_words', (0, .1))
space = params.get('space', .5)
width = params.get('width', 2.)
fontsize = params.get('fontsize', None)
def draw_triangles(axis, words):
scan = []
for i, word in enumerate(words.boxes):
for j, _ in enumerate(word.cod):
x_wire = (space + width) * i\
+ (width / (len(word.cod) + 1)) * (j + 1)
scan.append(x_wire)
if params.get('draw_types', True):
draw_text(axis, str(word.cod[j]),
x_wire + textpad[0], -textpad[1],
fontsize=params.get('fontsize_types', fontsize),
to_tikz=params.get('to_tikz', False))
draw_polygon(
axis, ((space + width) * i, 0),
((space + width) * i + width, 0),
((space + width) * i + width / 2, 1),
color='none', to_tikz=params.get('to_tikz', False))
draw_text(axis, str(word),
(space + width) * i + width / 2 + textpad_words[0],
textpad_words[1], ha='center', fontsize=fontsize,
to_tikz=params.get('to_tikz', False))
return scan
def draw_cups_and_wires(axis, cups, scan):
for j, off in [(j, off)
for j, s in enumerate(cups) for off in s.offsets]:
middle = (scan[off] + scan[off + 1]) / 2
draw_wire(axis, (scan[off], 0), (middle, - j - 1),
bend_in=True, to_tikz=params.get('to_tikz', False))
draw_wire(axis, (scan[off + 1], 0), (middle, - j - 1),
bend_in=True, to_tikz=params.get('to_tikz', False))
scan = scan[:off] + scan[off + 2:]
for i, _ in enumerate(cups[-1].cod if cups else words.cod):
label = str(cups[-1].cod[i]) if cups else ""
draw_wire(axis, (scan[i], 0), (scan[i], - (len(cups) or 1) - 1),
to_tikz=params.get('to_tikz', False))
if params.get('draw_types', True):
draw_text(axis, label,
scan[i] + textpad[0], - (len(cups) or 1) - space,
fontsize=params.get('fontsize_types', fontsize),
to_tikz=params.get('to_tikz', False))
axis = [] if params.get('to_tikz', False)\
else plt.subplots(figsize=params.get('figsize', None))[1]
scan = draw_triangles(axis, words.normal_form())
draw_cups_and_wires(axis, cups, scan)
if params.get('to_tikz', False):
if 'path' in params:
save_tikz(axis, params['path'],
options=params.get('tikz_options', None))
else:
print(''.join(axis).strip())
else:
plt.margins(*params.get('margins', (.05, .05)))
plt.subplots_adjust(
top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.axis('off')
axis.set_xlim(0, (space + width) * len(words.boxes) - space)
axis.set_ylim(- len(cups) - space, 1)
axis.set_aspect(params.get('aspect', 'auto'))
if 'path' in params.keys():
plt.savefig(params['path'])
plt.close()
plt.show()
def equation(*diagrams, symbol="=", space=1, **params):
"""
>>> from discopy import *
>>> x = Ty('x')
>>> diagrams = Id(x.r).transpose(left=True), Id(x.l).transpose()
>>> equation(*diagrams, to_tikz=True)
\\node [right] () at (0.1, 2.0) {x};
\\node [right] () at (1.1, 1.15) {x.r};
\\node [right] () at (2.1, 1.15) {x};
\\draw [out=-90, in=90] (0, 2.0) to (0, 0.75);
\\draw [out=180, in=90] (1.5, 1.5) to (1.0, 1.25);
\\draw [out=0, in=90] (1.5, 1.5) to (2.0, 1.25);
\\draw [out=-90, in=90] (1.0, 1.25) to (1.0, 0.75);
\\draw [out=-90, in=90] (2.0, 1.25) to (2.0, 0.0);
\\draw [out=-90, in=180] (0, 0.75) to (0.5, 0.5);
\\draw [out=-90, in=0] (1.0, 0.75) to (0.5, 0.5);
\\node [] () at (3.0, 1.0) {=};
\\node [right] () at (6.1, 2.0) {x};
\\node [right] () at (4.1, 1.15) {x};
\\node [right] () at (5.1, 1.15) {x.l};
\\draw [out=-90, in=90] (6.0, 2.0) to (6.0, 0.75);
\\draw [out=180, in=90] (4.5, 1.5) to (4.0, 1.25);
\\draw [out=0, in=90] (4.5, 1.5) to (5.0, 1.25);
\\draw [out=-90, in=90] (4.0, 1.25) to (4.0, 0.0);
\\draw [out=-90, in=90] (5.0, 1.25) to (5.0, 0.75);
\\draw [out=-90, in=180] (5.0, 0.75) to (5.5, 0.5);
\\draw [out=-90, in=0] (6.0, 0.75) to (5.5, 0.5);
"""
axis, pad, max_height = None, 0, max(map(len, diagrams))
scale_x, scale_y = params.get('scale', (1, 1))
path = params.pop("path", None)
for i, diagram in enumerate(diagrams):
scale = (scale_x, scale_y * max_height / (len(diagram) or 1))
graph, positions, labels = diagram_to_nx(
diagram, scale=scale, pad=(pad, 0))
axis = diagram.draw(axis=axis, data=(graph, positions, labels),
show=False, **params)
widths = {x for x, _ in positions.values()}
min_width, max_width = min(widths), max(widths)
pad += max_width - min_width + space
if i < len(diagrams) - 1:
draw_text(axis, symbol, pad, scale_y * max_height / 2,
to_tikz=params.get('to_tikz', False))
pad += space
if params.get('to_tikz', False):
if path is not None:
save_tikz(axis, path, baseline=max_height / 2,
options=params.get('tikz_options', None))
else:
print(''.join(axis).strip())
else:
if path is not None:
plt.savefig(path)
plt.close()
plt.show() | en | 0.632527 | # -*- coding: utf-8 -*- Drawing module. Builds a networkx graph, called by :meth:`Diagram.draw`. Returns ------- graph, positions, labels : tuple where: * :code:`graph` is a networkx graph with nodes for inputs, outputs, boxes and wires, * :code:`positions` is a dict from nodes to pairs of floats, * :code:`labels` is a dict from nodes to strings. Save a list of tikz commands. Draws `text` on `axis` as position `(i, j)`. If `to_tikz`, axis is a list of tikz commands, else it's a matplotlib axis. `params` get passed to matplotlib. # wire labels Draws a polygon from a list of points. Draws a wire from source to target using a Bezier curve. Draws a diagram, see :meth:`monoidal.Diagram.draw`. # pragma: no cover # pragma: no cover Draws a sequence of diagrams. # pylint: disable=import-outside-toplevel Draws pregroup words and cups. >>> from discopy import * >>> s, n = Ty('s'), Ty('n') >>> Alice, Bob = Word('Alice', n), Word('Bob', n) >>> loves = Word('loves', n.r @ s @ n.l) >>> sentence = Alice @ loves @ Bob >> Cup(n, n.r) @ Id(s) @ Cup(n.l, n) >>> words, *cups = sentence.foliation().boxes >>> pregroup_draw(words, cups, to_tikz=True, fontsize=2) \\node [scale=2] () at (1.1, -0.2) {n}; \\draw (0.0, 0) -- (2.0, 0) -- (1.0, 1) -- (0.0, 0); \\node [scale=2] () at (1.0, 0.1) {Alice}; \\node [scale=2] () at (3.1, -0.2) {n.r}; \\node [scale=2] () at (3.6, -0.2) {s}; \\node [scale=2] () at (4.1, -0.2) {n.l}; \\draw (2.5, 0) -- (4.5, 0) -- (3.5, 1) -- (2.5, 0); \\node [scale=2] () at (3.5, 0.1) {loves}; \\node [scale=2] () at (6.1, -0.2) {n}; \\draw (5.0, 0) -- (7.0, 0) -- (6.0, 1) -- (5.0, 0); \\node [scale=2] () at (6.0, 0.1) {Bob}; \\draw [out=-90, in=180] (1.0, 0) to (2.0, -1); \\draw [out=-90, in=0] (3.0, 0) to (2.0, -1); \\draw [out=-90, in=180] (4.0, 0) to (5.0, -1); \\draw [out=-90, in=0] (6.0, 0) to (5.0, -1); \\draw [out=-90, in=90] (3.5, 0) to (3.5, -2); \\node [scale=2] () at (3.6, -1.5) {s}; >>> from discopy import * >>> x = Ty('x') >>> diagrams = Id(x.r).transpose(left=True), Id(x.l).transpose() >>> equation(*diagrams, to_tikz=True) \\node [right] () at (0.1, 2.0) {x}; \\node [right] () at (1.1, 1.15) {x.r}; \\node [right] () at (2.1, 1.15) {x}; \\draw [out=-90, in=90] (0, 2.0) to (0, 0.75); \\draw [out=180, in=90] (1.5, 1.5) to (1.0, 1.25); \\draw [out=0, in=90] (1.5, 1.5) to (2.0, 1.25); \\draw [out=-90, in=90] (1.0, 1.25) to (1.0, 0.75); \\draw [out=-90, in=90] (2.0, 1.25) to (2.0, 0.0); \\draw [out=-90, in=180] (0, 0.75) to (0.5, 0.5); \\draw [out=-90, in=0] (1.0, 0.75) to (0.5, 0.5); \\node [] () at (3.0, 1.0) {=}; \\node [right] () at (6.1, 2.0) {x}; \\node [right] () at (4.1, 1.15) {x}; \\node [right] () at (5.1, 1.15) {x.l}; \\draw [out=-90, in=90] (6.0, 2.0) to (6.0, 0.75); \\draw [out=180, in=90] (4.5, 1.5) to (4.0, 1.25); \\draw [out=0, in=90] (4.5, 1.5) to (5.0, 1.25); \\draw [out=-90, in=90] (4.0, 1.25) to (4.0, 0.0); \\draw [out=-90, in=90] (5.0, 1.25) to (5.0, 0.75); \\draw [out=-90, in=180] (5.0, 0.75) to (5.5, 0.5); \\draw [out=-90, in=0] (6.0, 0.75) to (5.5, 0.5); | 3.036516 | 3 |
ThirdParty/pybluez2-macos_fix/macos/_lightblue.py | zhaocy14/SmartWalker | 2 | 6631579 | <gh_stars>1-10
# Copyright (c) 2009 <NAME>. All rights reserved.
#
# This file is part of LightBlue.
#
# LightBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LightBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with LightBlue. If not, see <http://www.gnu.org/licenses/>.
# Mac OS X main module implementation.
import types
import warnings
import Foundation
import AppKit
import objc
from objc import super
from . import _IOBluetooth
from . import _LightAquaBlue
from . import _lightbluecommon
from . import _macutil
from . import _bluetoothsockets
# public attributes
__all__ = ("finddevices", "findservices", "finddevicename",
"selectdevice", "selectservice",
"gethostaddr", "gethostclass",
"socket",
"advertise", "stopadvertise")
# details of advertised services
__advertised = {}
def finddevices(getnames=True, length=10):
inquiry = _SyncDeviceInquiry()
inquiry.run(getnames, length)
devices = inquiry.getfounddevices()
return devices
def findservices(addr=None, name=None, servicetype=None):
if servicetype not in (_lightbluecommon.RFCOMM, _lightbluecommon.OBEX, None):
raise ValueError("servicetype must be RFCOMM, OBEX or None, was %s" % \
servicetype)
if addr is None:
try:
founddevices = finddevices()
except _lightbluecommon.BluetoothError as e:
msg = "findservices() failed, " +\
"error while finding devices: " + str(e)
raise _lightbluecommon.BluetoothError(msg)
#print founddevices
addresses = [dev[0] for dev in founddevices]
else:
addresses = [addr]
services = []
for devaddr in addresses:
iobtdevice = _IOBluetooth.IOBluetoothDevice.withAddressString_(devaddr)
if not iobtdevice and addr is not None:
msg = "findservices() failed, " +\
"failed to find " + devaddr
raise _lightbluecommon.BluetoothError(msg)
elif not iobtdevice:
continue
try:
lastseen = iobtdevice.getLastServicesUpdate()
if lastseen is None or lastseen.timeIntervalSinceNow() < -2:
# perform SDP query to update known services.
# wait at least a few seconds between service discovery cos
# sometimes it doesn't work if doing updates too often.
# In future should have option to not do updates.
serviceupdater = _SDPQueryRunner.alloc().init()
try:
serviceupdater.query(iobtdevice) # blocks until updated
except _lightbluecommon.BluetoothError as e:
msg = "findservices() couldn't get services for %s: %s" % \
(iobtdevice.getNameOrAddress(), str(e))
warnings.warn(msg)
# or should I use cached services instead of warning?
# but sometimes the cached ones are totally wrong.
# if searching for RFCOMM, exclude OBEX services
if servicetype == _lightbluecommon.RFCOMM:
uuidbad = _macutil.PROTO_UUIDS.get(_lightbluecommon.OBEX)
else:
uuidbad = None
filtered = _searchservices(iobtdevice, name=name,
uuid=_macutil.PROTO_UUIDS.get(servicetype),
uuidbad=uuidbad)
#print "unfiltered:", iobtdevice.getServices()
services.extend([_getservicetuple(s) for s in filtered])
finally:
# close baseband connection (not sure if this is necessary, but
# sometimes the transport connection seems to stay open?)
iobtdevice.closeConnection()
return services
def finddevicename(address, usecache=True):
if not _lightbluecommon._isbtaddr(address):
raise TypeError("%s is not a valid bluetooth address" % str(address))
if address == gethostaddr():
return _gethostname()
device = _IOBluetooth.IOBluetoothDevice.withAddressString_(address)
if usecache:
name = device.getName()
if name is not None:
return name
# do name request with timeout of 10 seconds
result = device.remoteNameRequest_withPageTimeout_(None, 10000)
if result == _macutil.kIOReturnSuccess:
return device.getName()
raise _lightbluecommon.BluetoothError(
"Could not find device name for %s" % address)
### local device ###
def gethostaddr():
addr = _LightAquaBlue.BBLocalDevice.getAddressString()
if addr is not None:
# PyObjC returns all strings as unicode, but the address doesn't need
# to be unicode cos it's just hex values
return _macutil.formatdevaddr(addr)
raise _lightbluecommon.BluetoothError("Cannot read local device address")
def gethostclass():
cod = _LightAquaBlue.BBLocalDevice.getClassOfDevice()
if cod != -1:
return int(cod)
raise _lightbluecommon.BluetoothError("Cannot read local device class")
def _gethostname():
name = _LightAquaBlue.BBLocalDevice.getName()
if name is not None:
return name
raise _lightbluecommon.BluetoothError("Cannot read local device name")
### socket ###
def socket(proto=_lightbluecommon.RFCOMM):
return _bluetoothsockets._getsocketobject(proto)
### advertising services ###
def advertise(name, sock, servicetype, uuid=None):
if not isinstance(name, str):
raise TypeError("name must be string, was %s" % type(name))
# raises exception if socket is not bound
boundchannelID = sock._getport()
# advertise the service
if servicetype == _lightbluecommon.RFCOMM or servicetype == _lightbluecommon.OBEX:
try:
result, finalchannelID, servicerecordhandle = _LightAquaBlue.BBServiceAdvertiser\
.addRFCOMMServiceDictionary_withName_UUID_channelID_serviceRecordHandle_(
_LightAquaBlue.BBServiceAdvertiser.serialPortProfileDictionary(),
name, uuid, None, None)
except:
result, finalchannelID, servicerecordhandle = _LightAquaBlue.BBServiceAdvertiser\
.addRFCOMMServiceDictionary_withName_UUID_channelID_serviceRecordHandle_(
_LightAquaBlue.BBServiceAdvertiser.serialPortProfileDictionary(),
name, uuid)
else:
raise ValueError("servicetype must be either RFCOMM or OBEX")
if result != _macutil.kIOReturnSuccess:
raise _lightbluecommon.BluetoothError(
result, "Error advertising service")
if boundchannelID and boundchannelID != finalchannelID:
msg = "socket bound to unavailable channel (%d), " % boundchannelID +\
"use channel value of 0 to bind to dynamically assigned channel"
raise _lightbluecommon.BluetoothError(msg)
# note service record handle, so that the service can be stopped later
__advertised[id(sock)] = servicerecordhandle
def stopadvertise(sock):
if sock is None:
raise TypeError("Given socket is None")
servicerecordhandle = __advertised.get(id(sock))
if servicerecordhandle is None:
raise _lightbluecommon.BluetoothError("no service advertised")
result = _LightAquaBlue.BBServiceAdvertiser.removeService_(servicerecordhandle)
if result != _macutil.kIOReturnSuccess:
raise _lightbluecommon.BluetoothError(
result, "Error stopping advertising of service")
### GUI ###
def selectdevice():
from . import _IOBluetoothUI
gui = _IOBluetoothUI.IOBluetoothDeviceSelectorController.deviceSelector()
# try to bring GUI to foreground by setting it as floating panel
# (if this is called from pyobjc app, it would automatically be in foreground)
try:
gui.window().setFloatingPanel_(True)
except:
pass
# show the window and wait for user's selection
response = gui.runModal() # problems here if transferring a lot of data??
if response == AppKit.NSRunStoppedResponse:
results = gui.getResults()
if len(results) > 0: # should always be > 0, but check anyway
devinfo = _getdevicetuple(results[0])
# sometimes the baseband connection stays open which causes
# problems with connections w so close it here, see if this fixes
# it
dev = _IOBluetooth.IOBluetoothDevice.withAddressString_(devinfo[0])
if dev.isConnected():
dev.closeConnection()
return devinfo
# user cancelled selection
return None
def selectservice():
from . import _IOBluetoothUI
gui = _IOBluetoothUI.IOBluetoothServiceBrowserController.serviceBrowserController_(
_macutil.kIOBluetoothServiceBrowserControllerOptionsNone)
# try to bring GUI to foreground by setting it as floating panel
# (if this is called from pyobjc app, it would automatically be in foreground)
try:
gui.window().setFloatingPanel_(True)
except:
pass
# show the window and wait for user's selection
response = gui.runModal()
if response == AppKit.NSRunStoppedResponse:
results = gui.getResults()
if len(results) > 0: # should always be > 0, but check anyway
serviceinfo = _getservicetuple(results[0])
# sometimes the baseband connection stays open which causes
# problems with connections ... so close it here, see if this fixes
# it
dev = _IOBluetooth.IOBluetoothDevice.deviceWithAddressString_(serviceinfo[0])
if dev.isConnected():
dev.closeConnection()
return serviceinfo
# user cancelled selection
return None
### classes ###
class _SDPQueryRunner(Foundation.NSObject):
"""
Convenience class for performing a synchronous or asynchronous SDP query
on an IOBluetoothDevice.
"""
@objc.python_method
def query(self, device, timeout=10.0):
# do SDP query
err = device.performSDPQuery_(self)
if err != _macutil.kIOReturnSuccess:
raise _lightbluecommon.BluetoothError(err, self._errmsg(device))
# performSDPQuery_ is async, so block-wait
self._queryresult = None
if not _macutil.waituntil(lambda: self._queryresult is not None,
timeout):
raise _lightbluecommon.BluetoothError(
"Timed out getting services for %s" % \
device.getNameOrAddress())
# query is now complete
if self._queryresult != _macutil.kIOReturnSuccess:
raise _lightbluecommon.BluetoothError(
self._queryresult, self._errmsg(device))
def sdpQueryComplete_status_(self, device, status):
# can't raise exception during a callback, so just keep the err value
self._queryresult = status
_macutil.interruptwait()
sdpQueryComplete_status_ = objc.selector(
sdpQueryComplete_status_, signature=b"v@:@i") # accept object, int
@objc.python_method
def _errmsg(self, device):
return "Error getting services for %s" % device.getNameOrAddress()
class _SyncDeviceInquiry:
def __init__(self):
super().__init__()
self._inquiry = _AsyncDeviceInquiry.alloc().init()
self._inquiry.cb_completed = self._inquirycomplete
self._inquiring = False
def run(self, getnames, duration):
if self._inquiring:
raise _lightbluecommon.BluetoothError(
"Another inquiry in progress")
# set inquiry attributes
self._inquiry.updatenames = getnames
self._inquiry.length = duration
# start the inquiry
err = self._inquiry.start()
if err != _macutil.kIOReturnSuccess:
raise _lightbluecommon.BluetoothError(
err, "Error starting device inquiry")
# if error occurs during inquiry, set _inquiryerr to the error code
self._inquiryerr = _macutil.kIOReturnSuccess
# wait until the inquiry is complete
self._inquiring = True
_macutil.waituntil(lambda: not self._inquiring)
# if error occured during inquiry, raise exception
if self._inquiryerr != _macutil.kIOReturnSuccess:
raise _lightbluecommon.BluetoothError(self._inquiryerr,
"Error during device inquiry")
def getfounddevices(self):
# return as list of device-info tuples
return [_getdevicetuple(device) for device in \
self._inquiry.getfounddevices()]
def _inquirycomplete(self, err, aborted):
if err != 188: # no devices found
self._inquiryerr = err
self._inquiring = False
_macutil.interruptwait()
def __del__(self):
self._inquiry.__del__()
super().__del__()
# Wrapper around IOBluetoothDeviceInquiry, with python callbacks that you can
# set to receive callbacks when the inquiry is started or stopped, or when it
# finds a device.
#
# This discovery doesn't block, so it could be used in a PyObjC application
# that is running an event loop.
#
# Properties:
# - 'length': the inquiry length (seconds)
# - 'updatenames': whether to update device names during the inquiry
# (i.e. perform remote name requests, which will take a little longer)
#
class _AsyncDeviceInquiry(Foundation.NSObject):
# NSObject init, not python __init__
def init(self):
try:
attr = _IOBluetooth.IOBluetoothDeviceInquiry
except AttributeError:
raise ImportError("Cannot find IOBluetoothDeviceInquiry class " +\
"to perform device discovery. This class was introduced in " +\
"Mac OS X 10.4, are you running an earlier version?")
self = super().init()
self._inquiry = \
_IOBluetooth.IOBluetoothDeviceInquiry.inquiryWithDelegate_(self)
# callbacks
self.cb_started = None
self.cb_completed = None
self.cb_founddevice = None
return self
# length property
@objc.python_method
def _setlength(self, length):
self._inquiry.setInquiryLength_(length)
length = property(
lambda self: self._inquiry.inquiryLength(),
_setlength)
# updatenames property
@objc.python_method
def _setupdatenames(self, update):
self._inquiry.setUpdateNewDeviceNames_(update)
updatenames = property(
lambda self: self._inquiry.updateNewDeviceNames(),
_setupdatenames)
# returns error code
def start(self):
return self._inquiry.start()
# returns error code
def stop(self):
return self._inquiry.stop()
# returns list of IOBluetoothDevice objects
def getfounddevices(self):
return self._inquiry.foundDevices()
def __del__(self):
super().dealloc()
#
# delegate methods follow (these are called by the internal
# IOBluetoothDeviceInquiry object when inquiry events occur)
#
# - (void)deviceInquiryDeviceFound:(IOBluetoothDeviceInquiry*)sender
# device:(IOBluetoothDevice*)device;
def deviceInquiryDeviceFound_device_(self, inquiry, device):
if self.cb_founddevice:
self.cb_founddevice(device)
deviceInquiryDeviceFound_device_ = objc.selector(
deviceInquiryDeviceFound_device_, signature=b"v@:@@")
# - (void)deviceInquiryComplete:error:aborted;
def deviceInquiryComplete_error_aborted_(self, inquiry, err, aborted):
if self.cb_completed:
self.cb_completed(err, aborted)
deviceInquiryComplete_error_aborted_ = objc.selector(
deviceInquiryComplete_error_aborted_, signature=b"v@:@iZ")
# - (void)deviceInquiryStarted:(IOBluetoothDeviceInquiry*)sender;
def deviceInquiryStarted_(self, inquiry):
if self.cb_started:
self.cb_started()
# - (void)deviceInquiryDeviceNameUpdated:device:devicesRemaining:
def deviceInquiryDeviceNameUpdated_device_devicesRemaining_(self, sender,
device,
devicesRemaining):
pass
# - (void)deviceInquiryUpdatingDeviceNamesStarted:devicesRemaining:
def deviceInquiryUpdatingDeviceNamesStarted_devicesRemaining_(self, sender,
devicesRemaining):
pass
### utility methods ###
def _searchservices(device, name=None, uuid=None, uuidbad=None):
"""
Searches the given IOBluetoothDevice using the specified parameters.
Returns an empty list if the device has no services.
uuid should be IOBluetoothSDPUUID object.
"""
if not isinstance(device, _IOBluetooth.IOBluetoothDevice):
raise ValueError("device must be IOBluetoothDevice, was %s" % \
type(device))
services = []
allservices = device.getServices()
if uuid:
gooduuids = (uuid, )
else:
gooduuids = ()
if uuidbad:
baduuids = (uuidbad, )
else:
baduuids = ()
if allservices is not None:
for s in allservices:
if gooduuids and not s.hasServiceFromArray_(gooduuids):
continue
if baduuids and s.hasServiceFromArray_(baduuids):
continue
if name is None or s.getServiceName() == name:
services.append(s)
return services
def _getdevicetuple(iobtdevice):
"""
Returns an (addr, name, COD) device tuple from a IOBluetoothDevice object.
"""
addr = _macutil.formatdevaddr(iobtdevice.getAddressString())
name = iobtdevice.getName()
cod = iobtdevice.getClassOfDevice()
return (addr, name, cod)
def _getservicetuple(servicerecord):
"""
Returns a (device-addr, service-channel, service-name) tuple from the given
IOBluetoothSDPServiceRecord.
"""
addr = _macutil.formatdevaddr(servicerecord.getDevice().getAddressString())
name = servicerecord.getServiceName()
try:
result, channel = servicerecord.getRFCOMMChannelID_(None) # pyobjc 2.0
except TypeError:
result, channel = servicerecord.getRFCOMMChannelID_()
if result != _macutil.kIOReturnSuccess:
try:
result, channel = servicerecord.getL2CAPPSM_(None) # pyobjc 2.0
except:
result, channel = servicerecord.getL2CAPPSM_()
if result != _macutil.kIOReturnSuccess:
channel = None
return (addr, channel, name)
| # Copyright (c) 2009 <NAME>. All rights reserved.
#
# This file is part of LightBlue.
#
# LightBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LightBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with LightBlue. If not, see <http://www.gnu.org/licenses/>.
# Mac OS X main module implementation.
import types
import warnings
import Foundation
import AppKit
import objc
from objc import super
from . import _IOBluetooth
from . import _LightAquaBlue
from . import _lightbluecommon
from . import _macutil
from . import _bluetoothsockets
# public attributes
__all__ = ("finddevices", "findservices", "finddevicename",
"selectdevice", "selectservice",
"gethostaddr", "gethostclass",
"socket",
"advertise", "stopadvertise")
# details of advertised services
__advertised = {}
def finddevices(getnames=True, length=10):
inquiry = _SyncDeviceInquiry()
inquiry.run(getnames, length)
devices = inquiry.getfounddevices()
return devices
def findservices(addr=None, name=None, servicetype=None):
if servicetype not in (_lightbluecommon.RFCOMM, _lightbluecommon.OBEX, None):
raise ValueError("servicetype must be RFCOMM, OBEX or None, was %s" % \
servicetype)
if addr is None:
try:
founddevices = finddevices()
except _lightbluecommon.BluetoothError as e:
msg = "findservices() failed, " +\
"error while finding devices: " + str(e)
raise _lightbluecommon.BluetoothError(msg)
#print founddevices
addresses = [dev[0] for dev in founddevices]
else:
addresses = [addr]
services = []
for devaddr in addresses:
iobtdevice = _IOBluetooth.IOBluetoothDevice.withAddressString_(devaddr)
if not iobtdevice and addr is not None:
msg = "findservices() failed, " +\
"failed to find " + devaddr
raise _lightbluecommon.BluetoothError(msg)
elif not iobtdevice:
continue
try:
lastseen = iobtdevice.getLastServicesUpdate()
if lastseen is None or lastseen.timeIntervalSinceNow() < -2:
# perform SDP query to update known services.
# wait at least a few seconds between service discovery cos
# sometimes it doesn't work if doing updates too often.
# In future should have option to not do updates.
serviceupdater = _SDPQueryRunner.alloc().init()
try:
serviceupdater.query(iobtdevice) # blocks until updated
except _lightbluecommon.BluetoothError as e:
msg = "findservices() couldn't get services for %s: %s" % \
(iobtdevice.getNameOrAddress(), str(e))
warnings.warn(msg)
# or should I use cached services instead of warning?
# but sometimes the cached ones are totally wrong.
# if searching for RFCOMM, exclude OBEX services
if servicetype == _lightbluecommon.RFCOMM:
uuidbad = _macutil.PROTO_UUIDS.get(_lightbluecommon.OBEX)
else:
uuidbad = None
filtered = _searchservices(iobtdevice, name=name,
uuid=_macutil.PROTO_UUIDS.get(servicetype),
uuidbad=uuidbad)
#print "unfiltered:", iobtdevice.getServices()
services.extend([_getservicetuple(s) for s in filtered])
finally:
# close baseband connection (not sure if this is necessary, but
# sometimes the transport connection seems to stay open?)
iobtdevice.closeConnection()
return services
def finddevicename(address, usecache=True):
if not _lightbluecommon._isbtaddr(address):
raise TypeError("%s is not a valid bluetooth address" % str(address))
if address == gethostaddr():
return _gethostname()
device = _IOBluetooth.IOBluetoothDevice.withAddressString_(address)
if usecache:
name = device.getName()
if name is not None:
return name
# do name request with timeout of 10 seconds
result = device.remoteNameRequest_withPageTimeout_(None, 10000)
if result == _macutil.kIOReturnSuccess:
return device.getName()
raise _lightbluecommon.BluetoothError(
"Could not find device name for %s" % address)
### local device ###
def gethostaddr():
addr = _LightAquaBlue.BBLocalDevice.getAddressString()
if addr is not None:
# PyObjC returns all strings as unicode, but the address doesn't need
# to be unicode cos it's just hex values
return _macutil.formatdevaddr(addr)
raise _lightbluecommon.BluetoothError("Cannot read local device address")
def gethostclass():
cod = _LightAquaBlue.BBLocalDevice.getClassOfDevice()
if cod != -1:
return int(cod)
raise _lightbluecommon.BluetoothError("Cannot read local device class")
def _gethostname():
name = _LightAquaBlue.BBLocalDevice.getName()
if name is not None:
return name
raise _lightbluecommon.BluetoothError("Cannot read local device name")
### socket ###
def socket(proto=_lightbluecommon.RFCOMM):
return _bluetoothsockets._getsocketobject(proto)
### advertising services ###
def advertise(name, sock, servicetype, uuid=None):
if not isinstance(name, str):
raise TypeError("name must be string, was %s" % type(name))
# raises exception if socket is not bound
boundchannelID = sock._getport()
# advertise the service
if servicetype == _lightbluecommon.RFCOMM or servicetype == _lightbluecommon.OBEX:
try:
result, finalchannelID, servicerecordhandle = _LightAquaBlue.BBServiceAdvertiser\
.addRFCOMMServiceDictionary_withName_UUID_channelID_serviceRecordHandle_(
_LightAquaBlue.BBServiceAdvertiser.serialPortProfileDictionary(),
name, uuid, None, None)
except:
result, finalchannelID, servicerecordhandle = _LightAquaBlue.BBServiceAdvertiser\
.addRFCOMMServiceDictionary_withName_UUID_channelID_serviceRecordHandle_(
_LightAquaBlue.BBServiceAdvertiser.serialPortProfileDictionary(),
name, uuid)
else:
raise ValueError("servicetype must be either RFCOMM or OBEX")
if result != _macutil.kIOReturnSuccess:
raise _lightbluecommon.BluetoothError(
result, "Error advertising service")
if boundchannelID and boundchannelID != finalchannelID:
msg = "socket bound to unavailable channel (%d), " % boundchannelID +\
"use channel value of 0 to bind to dynamically assigned channel"
raise _lightbluecommon.BluetoothError(msg)
# note service record handle, so that the service can be stopped later
__advertised[id(sock)] = servicerecordhandle
def stopadvertise(sock):
if sock is None:
raise TypeError("Given socket is None")
servicerecordhandle = __advertised.get(id(sock))
if servicerecordhandle is None:
raise _lightbluecommon.BluetoothError("no service advertised")
result = _LightAquaBlue.BBServiceAdvertiser.removeService_(servicerecordhandle)
if result != _macutil.kIOReturnSuccess:
raise _lightbluecommon.BluetoothError(
result, "Error stopping advertising of service")
### GUI ###
def selectdevice():
from . import _IOBluetoothUI
gui = _IOBluetoothUI.IOBluetoothDeviceSelectorController.deviceSelector()
# try to bring GUI to foreground by setting it as floating panel
# (if this is called from pyobjc app, it would automatically be in foreground)
try:
gui.window().setFloatingPanel_(True)
except:
pass
# show the window and wait for user's selection
response = gui.runModal() # problems here if transferring a lot of data??
if response == AppKit.NSRunStoppedResponse:
results = gui.getResults()
if len(results) > 0: # should always be > 0, but check anyway
devinfo = _getdevicetuple(results[0])
# sometimes the baseband connection stays open which causes
# problems with connections w so close it here, see if this fixes
# it
dev = _IOBluetooth.IOBluetoothDevice.withAddressString_(devinfo[0])
if dev.isConnected():
dev.closeConnection()
return devinfo
# user cancelled selection
return None
def selectservice():
from . import _IOBluetoothUI
gui = _IOBluetoothUI.IOBluetoothServiceBrowserController.serviceBrowserController_(
_macutil.kIOBluetoothServiceBrowserControllerOptionsNone)
# try to bring GUI to foreground by setting it as floating panel
# (if this is called from pyobjc app, it would automatically be in foreground)
try:
gui.window().setFloatingPanel_(True)
except:
pass
# show the window and wait for user's selection
response = gui.runModal()
if response == AppKit.NSRunStoppedResponse:
results = gui.getResults()
if len(results) > 0: # should always be > 0, but check anyway
serviceinfo = _getservicetuple(results[0])
# sometimes the baseband connection stays open which causes
# problems with connections ... so close it here, see if this fixes
# it
dev = _IOBluetooth.IOBluetoothDevice.deviceWithAddressString_(serviceinfo[0])
if dev.isConnected():
dev.closeConnection()
return serviceinfo
# user cancelled selection
return None
### classes ###
class _SDPQueryRunner(Foundation.NSObject):
"""
Convenience class for performing a synchronous or asynchronous SDP query
on an IOBluetoothDevice.
"""
@objc.python_method
def query(self, device, timeout=10.0):
# do SDP query
err = device.performSDPQuery_(self)
if err != _macutil.kIOReturnSuccess:
raise _lightbluecommon.BluetoothError(err, self._errmsg(device))
# performSDPQuery_ is async, so block-wait
self._queryresult = None
if not _macutil.waituntil(lambda: self._queryresult is not None,
timeout):
raise _lightbluecommon.BluetoothError(
"Timed out getting services for %s" % \
device.getNameOrAddress())
# query is now complete
if self._queryresult != _macutil.kIOReturnSuccess:
raise _lightbluecommon.BluetoothError(
self._queryresult, self._errmsg(device))
def sdpQueryComplete_status_(self, device, status):
# can't raise exception during a callback, so just keep the err value
self._queryresult = status
_macutil.interruptwait()
sdpQueryComplete_status_ = objc.selector(
sdpQueryComplete_status_, signature=b"v@:@i") # accept object, int
@objc.python_method
def _errmsg(self, device):
return "Error getting services for %s" % device.getNameOrAddress()
class _SyncDeviceInquiry:
def __init__(self):
super().__init__()
self._inquiry = _AsyncDeviceInquiry.alloc().init()
self._inquiry.cb_completed = self._inquirycomplete
self._inquiring = False
def run(self, getnames, duration):
if self._inquiring:
raise _lightbluecommon.BluetoothError(
"Another inquiry in progress")
# set inquiry attributes
self._inquiry.updatenames = getnames
self._inquiry.length = duration
# start the inquiry
err = self._inquiry.start()
if err != _macutil.kIOReturnSuccess:
raise _lightbluecommon.BluetoothError(
err, "Error starting device inquiry")
# if error occurs during inquiry, set _inquiryerr to the error code
self._inquiryerr = _macutil.kIOReturnSuccess
# wait until the inquiry is complete
self._inquiring = True
_macutil.waituntil(lambda: not self._inquiring)
# if error occured during inquiry, raise exception
if self._inquiryerr != _macutil.kIOReturnSuccess:
raise _lightbluecommon.BluetoothError(self._inquiryerr,
"Error during device inquiry")
def getfounddevices(self):
# return as list of device-info tuples
return [_getdevicetuple(device) for device in \
self._inquiry.getfounddevices()]
def _inquirycomplete(self, err, aborted):
if err != 188: # no devices found
self._inquiryerr = err
self._inquiring = False
_macutil.interruptwait()
def __del__(self):
self._inquiry.__del__()
super().__del__()
# Wrapper around IOBluetoothDeviceInquiry, with python callbacks that you can
# set to receive callbacks when the inquiry is started or stopped, or when it
# finds a device.
#
# This discovery doesn't block, so it could be used in a PyObjC application
# that is running an event loop.
#
# Properties:
# - 'length': the inquiry length (seconds)
# - 'updatenames': whether to update device names during the inquiry
# (i.e. perform remote name requests, which will take a little longer)
#
class _AsyncDeviceInquiry(Foundation.NSObject):
# NSObject init, not python __init__
def init(self):
try:
attr = _IOBluetooth.IOBluetoothDeviceInquiry
except AttributeError:
raise ImportError("Cannot find IOBluetoothDeviceInquiry class " +\
"to perform device discovery. This class was introduced in " +\
"Mac OS X 10.4, are you running an earlier version?")
self = super().init()
self._inquiry = \
_IOBluetooth.IOBluetoothDeviceInquiry.inquiryWithDelegate_(self)
# callbacks
self.cb_started = None
self.cb_completed = None
self.cb_founddevice = None
return self
# length property
@objc.python_method
def _setlength(self, length):
self._inquiry.setInquiryLength_(length)
length = property(
lambda self: self._inquiry.inquiryLength(),
_setlength)
# updatenames property
@objc.python_method
def _setupdatenames(self, update):
self._inquiry.setUpdateNewDeviceNames_(update)
updatenames = property(
lambda self: self._inquiry.updateNewDeviceNames(),
_setupdatenames)
# returns error code
def start(self):
return self._inquiry.start()
# returns error code
def stop(self):
return self._inquiry.stop()
# returns list of IOBluetoothDevice objects
def getfounddevices(self):
return self._inquiry.foundDevices()
def __del__(self):
super().dealloc()
#
# delegate methods follow (these are called by the internal
# IOBluetoothDeviceInquiry object when inquiry events occur)
#
# - (void)deviceInquiryDeviceFound:(IOBluetoothDeviceInquiry*)sender
# device:(IOBluetoothDevice*)device;
def deviceInquiryDeviceFound_device_(self, inquiry, device):
if self.cb_founddevice:
self.cb_founddevice(device)
deviceInquiryDeviceFound_device_ = objc.selector(
deviceInquiryDeviceFound_device_, signature=b"v@:@@")
# - (void)deviceInquiryComplete:error:aborted;
def deviceInquiryComplete_error_aborted_(self, inquiry, err, aborted):
if self.cb_completed:
self.cb_completed(err, aborted)
deviceInquiryComplete_error_aborted_ = objc.selector(
deviceInquiryComplete_error_aborted_, signature=b"v@:@iZ")
# - (void)deviceInquiryStarted:(IOBluetoothDeviceInquiry*)sender;
def deviceInquiryStarted_(self, inquiry):
if self.cb_started:
self.cb_started()
# - (void)deviceInquiryDeviceNameUpdated:device:devicesRemaining:
def deviceInquiryDeviceNameUpdated_device_devicesRemaining_(self, sender,
device,
devicesRemaining):
pass
# - (void)deviceInquiryUpdatingDeviceNamesStarted:devicesRemaining:
def deviceInquiryUpdatingDeviceNamesStarted_devicesRemaining_(self, sender,
devicesRemaining):
pass
### utility methods ###
def _searchservices(device, name=None, uuid=None, uuidbad=None):
"""
Searches the given IOBluetoothDevice using the specified parameters.
Returns an empty list if the device has no services.
uuid should be IOBluetoothSDPUUID object.
"""
if not isinstance(device, _IOBluetooth.IOBluetoothDevice):
raise ValueError("device must be IOBluetoothDevice, was %s" % \
type(device))
services = []
allservices = device.getServices()
if uuid:
gooduuids = (uuid, )
else:
gooduuids = ()
if uuidbad:
baduuids = (uuidbad, )
else:
baduuids = ()
if allservices is not None:
for s in allservices:
if gooduuids and not s.hasServiceFromArray_(gooduuids):
continue
if baduuids and s.hasServiceFromArray_(baduuids):
continue
if name is None or s.getServiceName() == name:
services.append(s)
return services
def _getdevicetuple(iobtdevice):
"""
Returns an (addr, name, COD) device tuple from a IOBluetoothDevice object.
"""
addr = _macutil.formatdevaddr(iobtdevice.getAddressString())
name = iobtdevice.getName()
cod = iobtdevice.getClassOfDevice()
return (addr, name, cod)
def _getservicetuple(servicerecord):
"""
Returns a (device-addr, service-channel, service-name) tuple from the given
IOBluetoothSDPServiceRecord.
"""
addr = _macutil.formatdevaddr(servicerecord.getDevice().getAddressString())
name = servicerecord.getServiceName()
try:
result, channel = servicerecord.getRFCOMMChannelID_(None) # pyobjc 2.0
except TypeError:
result, channel = servicerecord.getRFCOMMChannelID_()
if result != _macutil.kIOReturnSuccess:
try:
result, channel = servicerecord.getL2CAPPSM_(None) # pyobjc 2.0
except:
result, channel = servicerecord.getL2CAPPSM_()
if result != _macutil.kIOReturnSuccess:
channel = None
return (addr, channel, name) | en | 0.848475 | # Copyright (c) 2009 <NAME>. All rights reserved. # # This file is part of LightBlue. # # LightBlue is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # LightBlue is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with LightBlue. If not, see <http://www.gnu.org/licenses/>. # Mac OS X main module implementation. # public attributes # details of advertised services #print founddevices # perform SDP query to update known services. # wait at least a few seconds between service discovery cos # sometimes it doesn't work if doing updates too often. # In future should have option to not do updates. # blocks until updated # or should I use cached services instead of warning? # but sometimes the cached ones are totally wrong. # if searching for RFCOMM, exclude OBEX services #print "unfiltered:", iobtdevice.getServices() # close baseband connection (not sure if this is necessary, but # sometimes the transport connection seems to stay open?) # do name request with timeout of 10 seconds ### local device ### # PyObjC returns all strings as unicode, but the address doesn't need # to be unicode cos it's just hex values ### socket ### ### advertising services ### # raises exception if socket is not bound # advertise the service # note service record handle, so that the service can be stopped later ### GUI ### # try to bring GUI to foreground by setting it as floating panel # (if this is called from pyobjc app, it would automatically be in foreground) # show the window and wait for user's selection # problems here if transferring a lot of data?? # should always be > 0, but check anyway # sometimes the baseband connection stays open which causes # problems with connections w so close it here, see if this fixes # it # user cancelled selection # try to bring GUI to foreground by setting it as floating panel # (if this is called from pyobjc app, it would automatically be in foreground) # show the window and wait for user's selection # should always be > 0, but check anyway # sometimes the baseband connection stays open which causes # problems with connections ... so close it here, see if this fixes # it # user cancelled selection ### classes ### Convenience class for performing a synchronous or asynchronous SDP query on an IOBluetoothDevice. # do SDP query # performSDPQuery_ is async, so block-wait # query is now complete # can't raise exception during a callback, so just keep the err value # accept object, int # set inquiry attributes # start the inquiry # if error occurs during inquiry, set _inquiryerr to the error code # wait until the inquiry is complete # if error occured during inquiry, raise exception # return as list of device-info tuples # no devices found # Wrapper around IOBluetoothDeviceInquiry, with python callbacks that you can # set to receive callbacks when the inquiry is started or stopped, or when it # finds a device. # # This discovery doesn't block, so it could be used in a PyObjC application # that is running an event loop. # # Properties: # - 'length': the inquiry length (seconds) # - 'updatenames': whether to update device names during the inquiry # (i.e. perform remote name requests, which will take a little longer) # # NSObject init, not python __init__ # callbacks # length property # updatenames property # returns error code # returns error code # returns list of IOBluetoothDevice objects # # delegate methods follow (these are called by the internal # IOBluetoothDeviceInquiry object when inquiry events occur) # # - (void)deviceInquiryDeviceFound:(IOBluetoothDeviceInquiry*)sender # device:(IOBluetoothDevice*)device; # - (void)deviceInquiryComplete:error:aborted; # - (void)deviceInquiryStarted:(IOBluetoothDeviceInquiry*)sender; # - (void)deviceInquiryDeviceNameUpdated:device:devicesRemaining: # - (void)deviceInquiryUpdatingDeviceNamesStarted:devicesRemaining: ### utility methods ### Searches the given IOBluetoothDevice using the specified parameters. Returns an empty list if the device has no services. uuid should be IOBluetoothSDPUUID object. Returns an (addr, name, COD) device tuple from a IOBluetoothDevice object. Returns a (device-addr, service-channel, service-name) tuple from the given IOBluetoothSDPServiceRecord. # pyobjc 2.0 # pyobjc 2.0 | 1.8235 | 2 |
scripts/pyqt/qlist_clicked_sqlite.py | meramsey/python-scripts-collection | 2 | 6631580 | <reponame>meramsey/python-scripts-collection
import re
import sys
from datetime import date
from PySide2.QtCore import Qt
from qtpy import QtSql
from PySide2.QtSql import QSqlDatabase, QSqlQueryModel, QSqlQuery
from PySide2.QtWidgets import QTableView, QApplication
import sys
command_category = 'FirewallChecks' # self.commandcategorydropdown
def replace(string, substitutions):
substrings = sorted(substitutions, key=len, reverse=True)
regex = re.compile('|'.join(map(re.escape, substrings)))
return regex.sub(lambda match: substitutions[match.group(0)], string)
# domain = self.domaininput.text()
# url = self.urlinput.text()
# email = self.emailinput.text()
# email2 = self.emailinput2.text()
# username = self.usernameinput.text()
# clientip = self.clientipinput.text()
# date_time_input = self.dateTimeinput.text()
domain = 'wizardassistant.com'
url = 'https://wizardassistant.com'
email = '<EMAIL>'
email2 = '<EMAIL>'
username = 'cooluser666'
clientip = '192.168.10.12'
date_time_input = date.today()
# DomainInputField
# Email1InputField
# Email2InputField
# CPUsernameInputField
# ClientIPInputField
# DateTimeInputField
substitutions = {"DomainInputField": domain, "Email1InputField": email, "Email2InputField": email2,
"CPUsernameInputField": username, "ClientIPInputField": clientip,
"DateTimeInputField": date_time_input, }
def listclicked(index):
row = index.row()
print(row)
cmd = model.data(row).field(3).value()
# cmd = projectModel.data(projectModel.index(row, 3))
print(cmd)
cmd_replaced = replace(cmd, substitutions)
# print(row)
# print(cmd)
print()
# print("id = %s" % projectModel.record(row).field(0).value().toString())
print("command = %s" % model.record(row).field(3).value())
print("adjusted command = %s" % cmd_replaced)
app = QApplication(sys.argv)
db = QSqlDatabase.addDatabase("QSQLITE")
db.setDatabaseName("/home/user/PycharmProjects/WizardAssistantPython/wizardassistant.db")
if db.open():
print('connect to SQL Server successfully')
else:
print('connection failed')
model = QSqlQueryModel()
model.setQuery("SELECT command_alias, command FROM commands WHERE category = '%s'" % command_category)
# model.setHeaderData(0, Qt.Horizontal, tr("Name"))
# model.setHeaderData(1, Qt.Horizontal, tr("Salary"))
view = QTableView()
view.setModel(model)
view.hideColumn(1) # hide id column
view.show()
db.close()
app.exec_()
| import re
import sys
from datetime import date
from PySide2.QtCore import Qt
from qtpy import QtSql
from PySide2.QtSql import QSqlDatabase, QSqlQueryModel, QSqlQuery
from PySide2.QtWidgets import QTableView, QApplication
import sys
command_category = 'FirewallChecks' # self.commandcategorydropdown
def replace(string, substitutions):
substrings = sorted(substitutions, key=len, reverse=True)
regex = re.compile('|'.join(map(re.escape, substrings)))
return regex.sub(lambda match: substitutions[match.group(0)], string)
# domain = self.domaininput.text()
# url = self.urlinput.text()
# email = self.emailinput.text()
# email2 = self.emailinput2.text()
# username = self.usernameinput.text()
# clientip = self.clientipinput.text()
# date_time_input = self.dateTimeinput.text()
domain = 'wizardassistant.com'
url = 'https://wizardassistant.com'
email = '<EMAIL>'
email2 = '<EMAIL>'
username = 'cooluser666'
clientip = '192.168.10.12'
date_time_input = date.today()
# DomainInputField
# Email1InputField
# Email2InputField
# CPUsernameInputField
# ClientIPInputField
# DateTimeInputField
substitutions = {"DomainInputField": domain, "Email1InputField": email, "Email2InputField": email2,
"CPUsernameInputField": username, "ClientIPInputField": clientip,
"DateTimeInputField": date_time_input, }
def listclicked(index):
row = index.row()
print(row)
cmd = model.data(row).field(3).value()
# cmd = projectModel.data(projectModel.index(row, 3))
print(cmd)
cmd_replaced = replace(cmd, substitutions)
# print(row)
# print(cmd)
print()
# print("id = %s" % projectModel.record(row).field(0).value().toString())
print("command = %s" % model.record(row).field(3).value())
print("adjusted command = %s" % cmd_replaced)
app = QApplication(sys.argv)
db = QSqlDatabase.addDatabase("QSQLITE")
db.setDatabaseName("/home/user/PycharmProjects/WizardAssistantPython/wizardassistant.db")
if db.open():
print('connect to SQL Server successfully')
else:
print('connection failed')
model = QSqlQueryModel()
model.setQuery("SELECT command_alias, command FROM commands WHERE category = '%s'" % command_category)
# model.setHeaderData(0, Qt.Horizontal, tr("Name"))
# model.setHeaderData(1, Qt.Horizontal, tr("Salary"))
view = QTableView()
view.setModel(model)
view.hideColumn(1) # hide id column
view.show()
db.close()
app.exec_() | en | 0.329727 | # self.commandcategorydropdown # domain = self.domaininput.text() # url = self.urlinput.text() # email = self.emailinput.text() # email2 = self.emailinput2.text() # username = self.usernameinput.text() # clientip = self.clientipinput.text() # date_time_input = self.dateTimeinput.text() # DomainInputField # Email1InputField # Email2InputField # CPUsernameInputField # ClientIPInputField # DateTimeInputField # cmd = projectModel.data(projectModel.index(row, 3)) # print(row) # print(cmd) # print("id = %s" % projectModel.record(row).field(0).value().toString()) # model.setHeaderData(0, Qt.Horizontal, tr("Name")) # model.setHeaderData(1, Qt.Horizontal, tr("Salary")) # hide id column | 2.555063 | 3 |
web/evaluate/Parsers/RJParser.py | ChristoferHuynh/web | 0 | 6631581 | <reponame>ChristoferHuynh/web<gh_stars>0
'''
Created on May 3, 2017
@author: jesper
'''
import itertools
import yaml
from symbol import comparison
from multiprocessing.forking import duplicate
from sqlalchemy.sql.functions import next_value
class AuditModule():
@staticmethod
def read(file):
pass
@staticmethod
def evaluate(info, yaml_path):
pass
class cron_at(AuditModule):
@staticmethod
def read(file):
info_dict = dict()
next_line = file.readline()
while next_line:
inner_values = next_line.split()
if "No such file or directory" in next_line:
info_dict[inner_values[3][1:-2]] = ["No such file or directory"] # [1:-2] is to trim the filename from from quotation marks
else:
# [permissions][?][owner][group][size][month][day][hour:min][filename]
info_dict[inner_values[8]] = inner_values[0]
next_line = file.readline()
return info_dict
@staticmethod
def evaluate(info_dict, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
for file_name in yaml_dict:
if info_dict.has_key(file_name):
info_value = info_dict[file_name]
for comparison in yaml_dict[file_name]:
yaml_values = yaml_dict[file_name][comparison]
message = compare(info_value, yaml_values, comparison)
if message is not None: return_string += message + "\n"
return return_string
class crontab(AuditModule):
@staticmethod
def read(file):
values = dict()
notSetupString = "No crontab has been set up for the following: \n"
next_line = file.readline()[:-1]
while (next_line):
crontab = next_line.replace("no crontab for ", "")
values[crontab] = "no"
next_line = file.readline()[:-1]
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
blacklist = yaml_dict.pop("blacklist")
expected = yaml_dict.pop("expected")
for cronjob in blacklist:
if info.has_key(cronjob):
message = blacklist[cronjob]["msg"]
return_string += message + "\n"
for cronjob in expected:
if not info.has_key(cronjob):
message = expected[cronjob]["msg"]
return_string += message + "\n"
# for key in yaml_dict:
# if info.has_key(key):
# customer_value = info[key]
#
# for comparison in yaml_dict[key]:
# values = yaml_dict[key][comparison]
# print customer_value
# print values
# print comparison
# message = compare(customer_value, values, comparison)
# if message is not None: return_string += message + "\n"
return return_string
class diskvolume(AuditModule):
@staticmethod
def read(file):
values = dict()
file.readline() # Skip first line
next_line = file.readline()
column = ["filesystem", "size", "used", "avail", "use%", "mount"]
while next_line:
inner_dict = dict()
# [Filesystem][Size][Used][Avail][Use%][Mounted on]
inner_values = next_line.split()
for index in range(0, 6):
inner_dict[column[index]] = inner_values[index]
inner_dict["use%"] = inner_dict["use%"][:-1] # Removes the % sign
values[inner_values[5]] = inner_dict
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
info_copy = dict(info)
with open(yaml_path, "r") as stream:
loaded_data = yaml.load(stream)
for key in loaded_data:
if info.has_key(key):
customer_map = info[key]
for column in customer_map:
customer_value = customer_map[column]
if not loaded_data[key].has_key(column): continue
for comparison in loaded_data[key][column]:
values = loaded_data[key][column][comparison]
message = compare(customer_value, values, comparison)
if message is not None: return_string += message + "\n"
info_copy.pop(key)
for key in info_copy:
customer_map = info[key]
for column in customer_map:
customer_value = customer_map[column]
if not loaded_data["default"].has_key(column): continue
for comparison in loaded_data["default"][column]:
values = loaded_data["default"][column][comparison]
message = compare(customer_value, values, comparison)
if message is not None:
message = message.replace("/fs/", key)
return_string += message + "\n"
return return_string
class encrypted_disk(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while next_line:
inner_values = dict()
n_line_split = next_line.split()
for i in range(1, len(n_line_split)):
n_line_ssplit = n_line_split[i].split("=")
inner_values[n_line_ssplit[0]] = n_line_ssplit[1]
values[n_line_split[0]] = inner_values
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
uuid_dict = {}
for key in info:
for key_key in info[key]:
if ("UUID" in key_key):
if uuid_dict.has_key(info[key][key_key]):
uuid_dict[info[key][key_key]].append(key)
else:
uuid_dict[info[key][key_key]] = [key]
for uuid in uuid_dict:
duplicate_warning_msg = open("duplicate_uuid_warning_msg.txt", "r").read()
if len(uuid_dict[uuid]) > 1:
duplicate_warning_msg = duplicate_warning_msg.replace("/uuid/", uuid)
duplicate_warning_msg = duplicate_warning_msg.replace("/key_set/", str(set(uuid_dict[uuid])))
return_string += duplicate_warning_msg + "\n"
return return_string
class environment(AuditModule):
@staticmethod
def read(file):
values = dict()
while True:
nextLine = file.readline()
if (nextLine == ""):
break
innerValues = nextLine.split("=")
if (innerValues[0] == "LS_COLORS"): # Hard to parse and don't think it has anythign to do with security risks
continue
values[innerValues[0]] = innerValues[1][:-1]
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
for key in yaml_dict:
# check if key exists in customer file
if info.has_key(key):
customer_value = info[key]
values = yaml_dict[key]
for comparison in values:
message = compare(customer_value, values[comparison], comparison)
if message is not None: return_string += message + "\n"
return return_string
class firewall(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while next_line:
inner_values = next_line.split()
if (inner_values and inner_values[0] == "Chain"):
chain = inner_values[1]
policy = inner_values[3].split(")")[0]
values[chain] = policy
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
for trafic in yaml_dict:
columns = yaml_dict[trafic]
if yaml_dict[trafic].has_key("policy"):
for comparison in yaml_dict[trafic]["policy"]:
customer_value = info[trafic]
values = yaml_dict[trafic]["policy"][comparison]
message = compare(customer_value, values, comparison)
if message is not None: return_string += message + "\n"
return return_string
class groups(AuditModule):
@staticmethod
def read(file):
info_dict = dict()
next_line = file.readline()[:-1]
while next_line:
inner_dict = dict()
inner_values = next_line.split(":")
inner_dict["group"] = inner_values[0]
inner_dict["password"] = inner_values[1]
inner_dict["id"] = inner_values[2]
inner_dict["users"] = inner_values[3]
info_dict[inner_dict["group"]] = inner_dict
next_line = file.readline()[:-1]
return info_dict
@staticmethod
def evaluate(info_dict, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
default_dict = yaml_dict.pop("default")
for key in yaml_dict:
if info_dict.has_key(key):
for column in yaml_dict[key]:
info_value = info_dict[key][column]
for comparison in yaml_dict[key][column]:
yaml_values = yaml_dict[key][column][comparison]
message = compare(info_value, yaml_values, comparison)
if message is not None:
message = message.replace("/users/", info_dict[key]["users"])
message = message.replace("/group/", info_dict[key]["group"])
return_string += message + "\n"
for key in info_dict:
for column in default_dict:
info_value = info_dict[key][column]
for comparison in default_dict[column]:
yaml_values = default_dict[column][comparison]
message = compare(info_value, yaml_values, comparison)
if message is not None:
message = message.replace("/users/", info_dict[key]["users"])
message = message.replace("/group/", info_dict[key]["group"])
return_string += message + "\n"
return return_string
class lastlog(AuditModule):
# Unsure how to parse...
@staticmethod
def read(file):
value = dict()
last_dict = dict()
lastlog_dict = dict()
next_line = file.readline()
while next_line and not "wtmp begins " in next_line:
next_values = next_line.split()
if len(next_values) > 1:
last_dict[next_values[0]] = "yes"
next_line = file.readline()
next_line = file.readline() # Skip line
while next_line:
next_values = next_line[:-1].split(None, 1)
if len(next_values) > 1:
lastlog_dict[next_values[0]] = next_values[1]
next_line = file.readline()
value["last"] = last_dict
value["lastlog"] = lastlog_dict
return value
@staticmethod
def evaluate(info, yaml_path):
# Not sure how to evaluate...
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
last = yaml_dict.pop("last")
lastlog = yaml_dict.pop("lastlog")
info_last = info.pop("last")
info_lastlog = info.pop("lastlog")
for key in lastlog:
if info_lastlog.has_key(key):
for comparison in lastlog[key]:
customer_value = info_lastlog[key]
values = lastlog[key][comparison]
message = compare(customer_value, values, comparison)
if message is not None:
return_string += message + "\n"
for key in last:
if info_last.has_key(key):
message = last[key]["msg"]
if message is not None:
return_string += message + "\n"
return return_string
class modprobe(AuditModule):
@staticmethod
def read(file):
values = dict()
modprobes = []
while True:
nextLine = file.readline()
if ("Module" in nextLine): break
modprobes.append(nextLine[:-1])
values["modprobe.d"] = modprobes
while True:
nextLine = file.readline()
if (nextLine == ""): break
innerValues = nextLine.split()
values[innerValues[0]] = innerValues
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
# Important configs
for config in yaml_dict["important_configs"]:
if config == "default":
important_configs = yaml_dict["important_configs"]["default"]["config"]
for i_config in important_configs:
if i_config not in info["modprobe.d"]:
message = yaml_dict["important_configs"]["default"]["message"]
message = message.replace("/conf/", i_config)
return_string += message + "\n"
elif config not in info["modprobe.d"]:
message = yaml_dict["important_configs"][config]["message"]
return_string += message + "\n"
# Important modules
for module in yaml_dict["important_modules"]:
if module == "default":
important_modules = yaml_dict["important_modules"]["default"]["module"]
for i_module in important_modules:
if i_module not in info.keys():
message = yaml_dict["important_modules"]["default"]["message"]
message = message.replace("/module/", i_module)
return_string += message + "\n"
elif module not in info.keys():
message = yaml_dict["important_modules"][module]["message"]
return_string += message + "\n"
# Blacklisted configs
for config in yaml_dict["blacklisted_configs"]:
if config == "default":
important_configs = yaml_dict["blacklisted_configs"]["default"]["config"]
for i_config in important_configs:
if i_config in info["modprobe.d"]:
message = yaml_dict["blacklisted_configs"]["default"]["message"]
message = message.replace("/conf/", i_config)
return_string += message + "\n"
elif config in info["modprobe.d"]:
message = yaml_dict["blacklisted_configs"][config]["message"]
return_string += message + "\n"
# Blacklisted modules
for module in yaml_dict["blacklisted_modules"]:
if module == "default":
important_modules = yaml_dict["blacklisted_modules"]["default"]["module"]
for i_module in important_modules:
if i_module in info.keys():
message = yaml_dict["blacklisted_modules"]["default"]["message"]
message = message.replace("/module/", i_module)
return_string += message + "\n"
elif module in info.keys():
message = yaml_dict["blacklisted_modules"][module]["message"]
return_string += message + "\n"
# modprobe_file = open("modprobe_folders", "r")
#
# config_list = []
# blacklist = []
# important_list = []
#
# customer_modules = []
#
#
# next_line = modprobe_file.readline() #Skip line
# next_line = modprobe_file.readline()
#
# while next_line and not next_line.startswith("#"):
# config_list.append(next_line[:-1])
# next_line = modprobe_file.readline()
#
# next_line = modprobe_file.readline() # Skip line
#
# while next_line and not next_line.startswith("#"):
# blacklist.append(next_line[:-1])
# next_line = modprobe_file.readline()
#
# next_line = modprobe_file.readline() # Skip line
#
# while next_line and not next_line.startswith("#"):
# important_list.append(next_line[:-1])
# next_line = modprobe_file.readline()
#
# customer_config_list = dict["modprobe.d"].split("%")
#
# dict.pop("modprobe.d", None)
# dict.pop("", None)
#
# for key in dict:
# customer_modules.append(key)
#
# for config in config_list:
# if config not in customer_config_list:
# return_string += "The expected file " + config + " is not in your system.\n"
#
# for module in customer_modules:
# if module in blacklist:
# return_string += "The system contains the blacklisted module " + module + "\n"
#
# for module in important_list:
# if module not in customer_modules:
# return_string += "The system does not contain the important module " + module + "\n"
return return_string
class networkvolume(AuditModule):
@staticmethod
def read(file):
values = dict()
mount_dict = dict()
fstab_dict = dict()
next_line = file.readline()
while next_line and "#" not in next_line and not next_line.isspace():
innerValues = next_line.split()
mount_dict[innerValues[2]] = innerValues
next_line = file.readline()
while next_line and "#" not in next_line and not next_line.isspace():
inner_dict = dict()
if ("#" in next_line):
next_line = file.readline()
continue
inner_values = next_line.split()
inner_dict["file_system"] = inner_values[0]
inner_dict["mount_point"] = inner_values[1]
inner_dict["type"] = inner_values[2]
options = inner_values[3].split(",")
inner_dict["options"] = options
inner_dict["dump"] = inner_values[4]
inner_dict["pass"] = inner_values[5]
fstab_dict[inner_dict["mount_point"]] = inner_dict
next_line = file.readline()
values["mount"] = mount_dict
values["fstab"] = fstab_dict
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
uuid_dict = dict()
info_mount = info["mount"]
info_fstab = info["fstab"]
with open(yaml_path, "r") as stream:
warnings = yaml.load(stream)
# check duplicates
for key in info_fstab:
uuid = info_fstab[key]["file_system"].split("=")[1]
if uuid_dict.has_key(uuid):
uuid_dict[uuid].append(info_fstab[key]["mount_point"])
else:
uuid_dict[uuid] = [info_fstab[key]["mount_point"]]
for key in uuid_dict:
if len(uuid_dict[key]) > 1:
message = warnings["duplicates"]
message = message.replace("/uuid/", key).replace("/key_set/", str(uuid_dict[key]))
return_string += message + "\n"
# #
# check for username/password and backup, pass
for key in info_fstab:
# check for username/password
options = info_fstab[key]["options"]
for option in options:
if "password" in option or "username" in option:
message = warnings["username_password"]
return_string += message + "\n"
# checks for backup
backup = info_fstab[key]["dump"]
if backup != 1:
message = warnings["backup"]
return_string += message + "\n"
# checks for pass
pass_flag = info_fstab[key]["pass"]
if key != "/" and pass_flag == "1":
message = warnings["pass_non_root"]
return_string += message + "\n"
elif key == "/" and pass_flag != "1":
message = warnings["pass_root"]
return_string += message + "\n"
return return_string
class open_connections(AuditModule):
@staticmethod
def read(file):
values = dict()
file.readline() # Skip first line
next_line = file.readline()
while (next_line and not "COMMAND" in next_line):
innerValues = next_line.split()
values[innerValues[4]] = innerValues
next_line = file.readline()
while (next_line):
innerValues = next_line.split()
# Unsure what should be the key..
values[innerValues[0] + "#" + innerValues[3]] = innerValues
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
"""Lists of listen ports, estab ports etc
make sure that the ports are not bad according to open_connections file
"""
return return_string
class passwdpolicy(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while(next_line):
if "#" not in next_line and not next_line.isspace():
key_value = next_line.split()
values[key_value[0]] = key_value[1]
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
for key in yaml_dict:
if info.has_key(key):
for comparison in yaml_dict[key]:
customer_value = info[key]
values = yaml_dict[key][comparison]
message = compare(customer_value, values, comparison)
if message is not None:
return_string += message + "\n"
# passwd_file = open("passwdpolicy", "r")
#
# next_line = passwd_file.readline()
#
# important_keys = []
# passwd_dict = dict()
#
# while next_line:
#
# if (next_line.isspace() or next_line.startswith("%")):
# next_line = passwd_file.readline()
# continue
#
# passwd_key = next_line.split("=")[0]
#
# passwd_values = next_line.split("=")[1][:-1]
#
# passwd_dict[passwd_key] = passwd_values
# next_line = passwd_file.readline()
#
# print passwd_dict
# print info
#
# for key in passwd_dict:
# #If key is in customer
# if info.has_key(key[1:]):
# #If key is dangerous
# if (key.startswith("^")):
# return_string += "The key " + key + " is considered dangerous.\n"
#
# else:
# customer_value = info[key[1:]]
# values = passwd_dict[key]
# print key
# print "customer: " + customer_value
# print "values: " + str(values)
# #If value is dangerous
# if "^" + customer_value in values:
# return_string += "The value " + customer_value + " is considered dangerous. Consider switching to " + str([x for x in values if not x.startswith("^")] + ". prefeably one of " + str([x for x in values if x.startswith("*")])) + "\n"
#
# #If value is not prefered
# if "<" + customer_value in values:
# return_string += "The value " + customer_value + " is not considered preferable. Consider switching to one of " + str([x for x in values if x.startswith("*")]) + "\n"
#
# #If not found in customer
# else:
# #If key is important
# if (key.startswith("#")):
# important_keys.append(key[1:])
# #Add recomended value?
#
# if len(important_keys) > 0:
# return_string += "The following important keys were not found: " + str(important_keys) + "\n"
#
"""if info["ENCRYPT_METHOD"] == "MD5":
return_string = (return_string + "Your currently password encrypting method is MD5. " +
"\nYou should consider changing the encrypting method to SHA256 or SHA516.")
if info["PASS_MIN_DAYS"] > '0':
return_string = (return_string + "Warning: You have to wait " + dict["PASS_MIN_DAYS"] +
" days to change password, this can be a security risk in case of accidental password change.")
"""
return return_string
class processes(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
next_line = file.readline() # Skip first line
while (next_line):
inner_dict = dict()
next_line = next_line[:-1]
inner_values = next_line.split(None, 10)
inner_dict["USER"] = inner_values[0]
inner_dict["PID"] = inner_values[1]
inner_dict["%CPU"] = inner_values[2]
inner_dict["%MEM"] = inner_values[3]
inner_dict["VSZ"] = inner_values[4]
inner_dict["RSS"] = inner_values[5]
inner_dict["TTY"] = inner_values[6]
inner_dict["STAT"] = inner_values[7]
inner_dict["START"] = inner_values[8]
inner_dict["TIME"] = inner_values[9]
inner_dict["COMMAND"] = inner_values[10]
values[inner_dict["COMMAND"]] = inner_dict
next_line = file.readline()
# next_line = file.readline()
#
# while (next_line):
# splitted_line = next_line.split()
# innerValues = ["" for i in range(11)] # Init the list with empty strings
# for i in range (0, 10):
# innerValues[i] = splitted_line[i]
# for i in range (10, len(splitted_line)):
# innerValues[10] = str(innerValues[10]) + splitted_line[i] + " "
#
# innerValues[10] = innerValues[:-1]
# next_line = file.readline()
#
#
# values[innerValues[1]] = innerValues
return values
@staticmethod
def evaluate(info, yaml_path): # change to dict if using commented code?
return_string = ""
info_copy = dict(info)
with open(yaml_path, 'r') as stream:
yaml_dict = yaml.load(stream)
default = yaml_dict.pop("default")
important_processes = yaml_dict.pop("important_processes")
blacklisted_processes = yaml_dict.pop("blacklisted_processes")
# important processes
for key in important_processes:
if key == "default":
for process in important_processes["default"]["process"]:
if not info.has_key(process):
message = important_processes["default"]["message"]
message = message.replace("/process/", process)
return_string += message + "\n"
elif not info_copy.has_key(key):
return_string += important_processes[key]["message"] + "\n"
# blacklisted processes
for key in blacklisted_processes:
if key == "default":
for process in blacklisted_processes["default"]["process"]:
if info.has_key(process):
message = blacklisted_processes["default"]["message"]
message = message.replace("/process/", process)
return_string += message + "\n"
elif info_copy.has_key(key):
return_string += blacklisted_processes[key]["message"] + "\n"
# default value check (CPU & MEM usage)
# print info_copy
for key in info_copy:
for column in default:
customer_value = info_copy[key][column]
for comparison in default[column]:
values = default[column][comparison]
message = compare(customer_value, values, comparison)
if message is not None:
message = message.replace("/process/", key)
return_string += message + "\n"
# other keys
for key in yaml_dict:
if info_copy.has_key(key):
for column in yaml_dict[key]:
for comparison in yaml_dict[key][column]:
if info_copy[key].has_key(column):
customer_value = info_copy[key][column]
values = yaml_dict[key][column][comparison]
message = compare(customer_value, values, comparison)
if message is not None:
return_string += message
# processes_file = open("processes", "r")
#
# next_line = processes_file.readline() #Skip first line
# next_line = processes_file.readline()
#
# expected_processes = []
# non_root_blacklist = []
# blacklist = []
#
#
# while next_line and "#" not in next_line and not next_line.isspace():
# expected_processes.append(next_line[:-1])
# next_line = processes_file.readline()
#
# next_line = processes_file.readline()
#
# while next_line and "#" not in next_line and not next_line.isspace():
# non_root_blacklist.append(next_line[:-1])
# next_line = processes_file.readline()
#
# next_line = processes_file.readline()
#
#
# while next_line and "#" not in next_line and not next_line.isspace():
# blacklist.append(next_line[:-1])
# next_line = processes_file.readline()
#
#
#
# for key in dict.iterkeys():
# customer_process = dict[key][10][:-1]
#
# #if process is blacklist
# if customer_process in blacklist:
# return_string += "The process " + customer_process + " currently running on your service is in our blacklist\n"
#
# #if process is non root
# elif customer_process in non_root_blacklist and dict[key][0 != "root"]:
# return_string += "The process " + customer_process + " currently running on your service as a non-root. This is considered a security risk\n"
#
# #if expected process is found, it removes it from the exepcted processes list
# if customer_process in expected_processes:
# expected_processes = [x for x in expected_processes if x != customer_process]
#
# #if expected_processes is NOT empty
# if expected_processes:
# return_string += "The following processes were expected but could not be found on your system: " + str(expected_processes) + "\n"
return return_string
class samba(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while (next_line):
if "No such file or directory" in next_line:
values["/etc/samba/smb.conf"] = "No such file or directory"
return values
if "#" in next_line or next_line.isspace():
next_line = file.readline()
continue
if "[" in next_line:
level = next_line[1:-2]
next_line = file.readline()
continue
next_values = next_line.split(" = ")
next_dict = dict()
next_dict['value'] = next_values[1][:-1]
next_dict['level'] = level
values[next_values[0].lstrip()] = next_dict
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
with open(yaml_path, 'r') as stream:
yaml_dict = yaml.load(stream)
for key in yaml_dict:
if info.has_key(key):
#pop out value(otherwise going through comparisons might give issue)
customer_value = info[key]['value']
customer_level = info[key]['level']
customer_level_value = customer_level + ("#%") + customer_value
for comparison in yaml_dict[key]:
yaml_values = yaml_dict[key][comparison]
msg = compare(customer_level_value, yaml_values, comparison)
if msg is not None:
msg = msg.replace("/key/", key)
msg = msg.replace("/level/", customer_level)
msg = msg.replace("/value/", customer_value)
# print "samba/eval"
#
# print info
# return_string = ""
#
#
# samba_file = open(yaml_path, "r")
#
# samba_dict = dict()
#
# samba_lists = [[]]
#
#
# samba_important_keys = []
#
# samba_lists[0] = ([1, 2, 3])
# samba_lists.append([17, 6, 5])
#
# next_line = samba_file.readline()
#
# while next_line:
# if next_line.startswith("%") or next_line.isspace():
# next_line = samba_file.readline()
# continue
# samba_k_v_l = next_line[:-1].split("=")
# samba_key = samba_k_v_l[0]
# samba_v_l = samba_k_v_l[1].split(",")
#
#
# next_line = samba_file.readline()
# samba_values = samba_v_l[0].split("|")
# samba_levels = samba_v_l[1].split("|")
#
# if samba_key.startswith("#"): samba_important_keys.append(samba_key[1:])
#
# samba_dict[samba_key] = [samba_values, samba_levels]
#
#
# for key in samba_dict:
# if key[1:] in info.keys():
#
# # if Dangerous key
# if key.startswith("^"):
# return_string += "The key " + key + " is considered dangerous.\n"
#
# else:
# customer_value = info[key[1:]][0]
# customer_level = info[key[1:]][1]
# samba_values = samba_dict[key][0]
# samba_levels = samba_dict[key][1]
# # if Dangerous level
# if "^" + customer_level in samba_levels:
# return_string += "The level for the key " + key[1:] + " is considered dangerous. Consider changing to one of " + str([x[1:] for x in samba_levels if not x.startswith("^")]) + " preferably one of " + str([x[1:] for x in samba_levels if x.startswith("*")]) + "\n"
#
# # if not preferable level
# elif "<" + customer_level in samba_levels:
# if len([x for x in samba_levels if x.startswith("*")]) > 0:
# return_string += "The level for the environment key " + key[1:] + " is not considered preferable. Consider changing to one of " + str([x[1:] for x in samba_levels if x.startswith("*")]) + "\n"
#
# # cant find level in samba txt
# elif "*" + customer_level not in samba_levels:
# return_string += "The level " + customer_value + " for the key " + key[1:] + " was not found in our list of \"predetermined\" levels. \n\tRecommended levels: " + str([x[1:] for x in samba_levels if x.startswith("*")]) + "\n\tOkay levels: " + str([x[1:] for x in samba_levels if x.startswith("<")]) + "\n"
#
#
# # if Dangerous value
# if "^" + customer_value in samba_values:
# return_string += "The value for the key " + key[1:] + " is considered dangerous. Consider changing to one of " + str([x[1:] for x in samba_values if not x.startswith("^")]) + " preferably one of " + str([x[1:] for x in samba_values if x.startswith("*")]) + "\n"
#
# # if not preferable value
# elif "<" + customer_value in samba_values:
# if len([x for x in samba_levels if x.startswith("*")]) > 0:
# return_string += "The value for the environment key " + key[1:] + " is not considered preferable. Consider changing to one of " + str([x[1:] for x in samba_values if x.startswith("*")]) + "\n"
#
# # cant find value in samba txt
# elif "*" + customer_level not in samba_values:
# return_string += "The value " + customer_value + " for the key " + key[1:] + " was not found in our list of \"predetermined\" values. \n\tRecommended values: " + str([x[1:] for x in samba_values if x.startswith("*")]) + "\n\tOkay levels: " + str([x[1:] for x in samba_values if x.startswith("<")]) + "\n"
#
# samba_important_keys = [x for x in samba_important_keys if x != key[1:]]
# # cant find key in samba
#
# if len(samba_important_keys) > 0:
# return_string += "The following keys were not found in your system: " + str(samba_important_keys) + ". They are considered important."
#
# return return_string
#
class sshd(AuditModule):
@staticmethod
def read(file):
info_dict = dict()
next_line = file.readline()
while (next_line):
if "No such file or directory" in next_line:
info_dict["/etc/ssh/sshd_config"] = "No such file or directory"
if "#" in next_line or next_line.isspace():
next_line = file.readline()
continue
next_values = next_line.split()
info_dict[next_values[0]] = next_values[1]
next_line = file.readline()
return info_dict
@staticmethod
def evaluate(info_dict, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
for key in yaml_dict:
if info_dict.has_key(key):
info_value = info_dict[key]
yaml_values = yaml_dict[key]
for comparison in yaml_values:
yaml_value = yaml_values[comparison]
message = compare(info_value, yaml_value, comparison)
if message is not None:
return_string += message + "\n"
return return_string
class startup(AuditModule):
@staticmethod
def read(file):
values = dict()
file.readline() # Skip first line (/etc/init.d)
file.readline() # Skip second line (total 216) //maybe use?
next_line = file.readline()
while (next_line):
next_values = next_line.split()
values[next_values[8]] = next_values
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
blacklist = []
expected = []
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
expected = yaml_dict.pop("expected")
blacklist = yaml_dict.pop("blacklisted")
permission = yaml_dict.pop("permission")
# expected scripts
for script in expected["scripts"]:
if script not in info:
message = expected["msg"]
message = message.replace("/script/", script)
return_string += message + "\n"
# blacklisted scripts
for script in blacklist["scripts"]:
if script in info:
message = blacklist["msg"]
message = message.replace("/script/", script)
return_string += message + "\n"
# check permissions
for key, value in info.iteritems():
permissions = value[0]
permissions = list(permissions)
if permissions[5] == "w" or permissions[8] == "w":
message = permission["msg"]
message = message.replace("/script/", key)
return_string += message + "\n"
return return_string
class sudoers(AuditModule):
@staticmethod
def read(file):
values = dict()
username = ""
hosts = ""
run_as_users = ""
run_as_groups = ""
command = ""
next_line = file.readline()
while (next_line):
group = False
if "#" in next_line or next_line.isspace():
next_line = file.readline()
continue
if "Defaults" in next_line:
inner_values = next_line.split()
tmp = inner_values[1].split("=")
username = tmp[0]
values[username] = ['', '', '', command]
next_line = file.readline()
continue
inner_values = next_line.split()
username = inner_values[0]
command = inner_values[2]
inner_values = inner_values[1].split("=")
hosts = inner_values[0]
inner_values = inner_values[1].split(":")
if (len(inner_values) > 1):
run_as_users = inner_values[0][1:]
run_as_groups = inner_values[1][:-1]
else:
run_as_users = inner_values[0][-1:-1]
values[username] = [hosts, run_as_users, run_as_groups, command]
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
if info.has_key("env_reset") == True:
return_string += "env_reset is available. The system will make sure the terminal environment remove any user variables and clear potentially harmful environmental variables from the sudo sessions \n \n"
else:
return_string += "env_reset variable has not been set. You should add it the variable in /etc/sudoers"
for key, value in info.iteritems():
if key == "secure_path":
if value[3] != "[\'\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin\"\']":
continue
if (value[0] and value[1] and value[2] and value[3]) == "ALL" and ("root" not in key) and ("%" not in key):
return_string += "User: " + "\"" + key + "\"" + " has super user rights.\n\n"
continue
if (value[0] and value[2] and value[3] == "ALL") and (value[1] == '') and ("root" not in key) and ("%admin" not in key) and ("%sudo" not in key):
return_string += "Members of group: " + "\"" + key + "\"" + " may gain root privileges.\n\n"
continue
if (value[0] and value[1] and value[2] and value[3] == "ALL") and ("root" not in key) and ("%admin" not in key) and ("%sudo" not in key):
return_string += "Members of sudo group: " + "\"" + key + "\"" + " can execute any command\n\n"
continue
return return_string
class suid_files(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while (next_line):
values[next_line] = next_line
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
return return_string
class system(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while (next_line):
values[next_line] = next_line
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
return return_string
class users(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while (next_line):
inner_dict = dict()
inner_values = next_line[:-1].split(":", 6)
inner_dict["username"] = inner_values[0]
inner_dict["password"] = inner_values[1]
inner_dict["user_id"] = inner_values[2]
inner_dict["group_id"] = inner_values[3]
inner_dict["user_info"] = inner_values[4]
inner_dict["home_dir"] = inner_values[5]
inner_dict["shell"] = inner_values[6]
values[inner_dict["username"]] = inner_dict
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
with open(yaml_path, 'r') as stream:
yaml_dict = yaml.load(stream)
for key in yaml_dict:
if info.has_key(key):
for column in yaml_dict[key]:
for comparison in yaml_dict[key][column]:
values = yaml_dict[key][column][comparison]
customer_value = info[key][column]
message = compare(customer_value, values, comparison)
if message is not None:
return_string += message
# for key in dict:
#
# risks = [False, False, False]
#
# value = dict[key]
# if value[2] == "0" and not key == "root":
# return_string = return_string + "User " + "'" + key + "'" + " has super user rights\n"
# risks[0] = True
#
# if value[1] == "!":
# return_string = return_string = "User " + "'" + key + "'" + " is stored in /etc/security/passwd and is not encrypted\n"
# risks[1] = True
#
# elif value[1] == "*":
# return_string = return_string + "User " + "'" + key + "'" + " has an invalid password\n"
# risks[2] = True
#
#
# if risks[0]:
# return_string += "\nYou should change the users' priviliges"
#
# if risks[1]:
# return_string += "\nYou should encrypt the users' password"
#
# if risks[2]:
# return_string += "\nYou should change users' password to a valid one"
#
return return_string
def compare(customer_value, values, comparison):
# Equal
if comparison == "eq":
value = values.keys()[0]
if customer_value != value:
message = values[value]["msg"]
severity = values[value]["severity"]
return message
# Not equal
if comparison == "neq":
values = values["values"]
if customer_value in values.keys():
message = values[customer_value]["msg"]
severity = values[customer_value]["severity"]
return message
if comparison == "nlt":
value = values["value"]
if int(customer_value) < int(value):
message = values["msg"]
severity = values["severity"]
return message
if comparison == "ngr":
value = values["value"]
if float(customer_value) > float(value):
message = values["msg"]
return message
if comparison == "nbtwn":
values = values["values"]
for message in values:
for ranges in values[message]["ranges"]:
range_max = max(ranges)
range_min = min(ranges)
if int(customer_value) < range_max and int(customer_value) > range_min:
severity = values[message]["severity"]
return message
if comparison == "in":
if customer_value not in values["values"]:
severity = values["severity"]
message = values["msg"]
return message
if comparison == "permissions":
for permission_group in values:
if permission_group == "other":
other_rwx = customer_value[7:]
for permission in values[permission_group]:
if permission in other_rwx:
message = values[permission_group] [permission]["msg"]
return message
if permission_group == "user":
user_rwx = customer_value[1:4]
for permission in values[permission_group]:
if permission in user_rwx:
message = values[permission_group] [permission]["msg"]
return message
if permission_group == "group":
group_rwx = customer_value[4:7]
for permission in values[permission_group]:
if permission in group_rwx:
message = values[permission_group] [permission]["msg"]
return message
pass
| '''
Created on May 3, 2017
@author: jesper
'''
import itertools
import yaml
from symbol import comparison
from multiprocessing.forking import duplicate
from sqlalchemy.sql.functions import next_value
class AuditModule():
@staticmethod
def read(file):
pass
@staticmethod
def evaluate(info, yaml_path):
pass
class cron_at(AuditModule):
@staticmethod
def read(file):
info_dict = dict()
next_line = file.readline()
while next_line:
inner_values = next_line.split()
if "No such file or directory" in next_line:
info_dict[inner_values[3][1:-2]] = ["No such file or directory"] # [1:-2] is to trim the filename from from quotation marks
else:
# [permissions][?][owner][group][size][month][day][hour:min][filename]
info_dict[inner_values[8]] = inner_values[0]
next_line = file.readline()
return info_dict
@staticmethod
def evaluate(info_dict, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
for file_name in yaml_dict:
if info_dict.has_key(file_name):
info_value = info_dict[file_name]
for comparison in yaml_dict[file_name]:
yaml_values = yaml_dict[file_name][comparison]
message = compare(info_value, yaml_values, comparison)
if message is not None: return_string += message + "\n"
return return_string
class crontab(AuditModule):
@staticmethod
def read(file):
values = dict()
notSetupString = "No crontab has been set up for the following: \n"
next_line = file.readline()[:-1]
while (next_line):
crontab = next_line.replace("no crontab for ", "")
values[crontab] = "no"
next_line = file.readline()[:-1]
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
blacklist = yaml_dict.pop("blacklist")
expected = yaml_dict.pop("expected")
for cronjob in blacklist:
if info.has_key(cronjob):
message = blacklist[cronjob]["msg"]
return_string += message + "\n"
for cronjob in expected:
if not info.has_key(cronjob):
message = expected[cronjob]["msg"]
return_string += message + "\n"
# for key in yaml_dict:
# if info.has_key(key):
# customer_value = info[key]
#
# for comparison in yaml_dict[key]:
# values = yaml_dict[key][comparison]
# print customer_value
# print values
# print comparison
# message = compare(customer_value, values, comparison)
# if message is not None: return_string += message + "\n"
return return_string
class diskvolume(AuditModule):
@staticmethod
def read(file):
values = dict()
file.readline() # Skip first line
next_line = file.readline()
column = ["filesystem", "size", "used", "avail", "use%", "mount"]
while next_line:
inner_dict = dict()
# [Filesystem][Size][Used][Avail][Use%][Mounted on]
inner_values = next_line.split()
for index in range(0, 6):
inner_dict[column[index]] = inner_values[index]
inner_dict["use%"] = inner_dict["use%"][:-1] # Removes the % sign
values[inner_values[5]] = inner_dict
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
info_copy = dict(info)
with open(yaml_path, "r") as stream:
loaded_data = yaml.load(stream)
for key in loaded_data:
if info.has_key(key):
customer_map = info[key]
for column in customer_map:
customer_value = customer_map[column]
if not loaded_data[key].has_key(column): continue
for comparison in loaded_data[key][column]:
values = loaded_data[key][column][comparison]
message = compare(customer_value, values, comparison)
if message is not None: return_string += message + "\n"
info_copy.pop(key)
for key in info_copy:
customer_map = info[key]
for column in customer_map:
customer_value = customer_map[column]
if not loaded_data["default"].has_key(column): continue
for comparison in loaded_data["default"][column]:
values = loaded_data["default"][column][comparison]
message = compare(customer_value, values, comparison)
if message is not None:
message = message.replace("/fs/", key)
return_string += message + "\n"
return return_string
class encrypted_disk(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while next_line:
inner_values = dict()
n_line_split = next_line.split()
for i in range(1, len(n_line_split)):
n_line_ssplit = n_line_split[i].split("=")
inner_values[n_line_ssplit[0]] = n_line_ssplit[1]
values[n_line_split[0]] = inner_values
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
uuid_dict = {}
for key in info:
for key_key in info[key]:
if ("UUID" in key_key):
if uuid_dict.has_key(info[key][key_key]):
uuid_dict[info[key][key_key]].append(key)
else:
uuid_dict[info[key][key_key]] = [key]
for uuid in uuid_dict:
duplicate_warning_msg = open("duplicate_uuid_warning_msg.txt", "r").read()
if len(uuid_dict[uuid]) > 1:
duplicate_warning_msg = duplicate_warning_msg.replace("/uuid/", uuid)
duplicate_warning_msg = duplicate_warning_msg.replace("/key_set/", str(set(uuid_dict[uuid])))
return_string += duplicate_warning_msg + "\n"
return return_string
class environment(AuditModule):
@staticmethod
def read(file):
values = dict()
while True:
nextLine = file.readline()
if (nextLine == ""):
break
innerValues = nextLine.split("=")
if (innerValues[0] == "LS_COLORS"): # Hard to parse and don't think it has anythign to do with security risks
continue
values[innerValues[0]] = innerValues[1][:-1]
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
for key in yaml_dict:
# check if key exists in customer file
if info.has_key(key):
customer_value = info[key]
values = yaml_dict[key]
for comparison in values:
message = compare(customer_value, values[comparison], comparison)
if message is not None: return_string += message + "\n"
return return_string
class firewall(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while next_line:
inner_values = next_line.split()
if (inner_values and inner_values[0] == "Chain"):
chain = inner_values[1]
policy = inner_values[3].split(")")[0]
values[chain] = policy
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
for trafic in yaml_dict:
columns = yaml_dict[trafic]
if yaml_dict[trafic].has_key("policy"):
for comparison in yaml_dict[trafic]["policy"]:
customer_value = info[trafic]
values = yaml_dict[trafic]["policy"][comparison]
message = compare(customer_value, values, comparison)
if message is not None: return_string += message + "\n"
return return_string
class groups(AuditModule):
@staticmethod
def read(file):
info_dict = dict()
next_line = file.readline()[:-1]
while next_line:
inner_dict = dict()
inner_values = next_line.split(":")
inner_dict["group"] = inner_values[0]
inner_dict["password"] = inner_values[1]
inner_dict["id"] = inner_values[2]
inner_dict["users"] = inner_values[3]
info_dict[inner_dict["group"]] = inner_dict
next_line = file.readline()[:-1]
return info_dict
@staticmethod
def evaluate(info_dict, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
default_dict = yaml_dict.pop("default")
for key in yaml_dict:
if info_dict.has_key(key):
for column in yaml_dict[key]:
info_value = info_dict[key][column]
for comparison in yaml_dict[key][column]:
yaml_values = yaml_dict[key][column][comparison]
message = compare(info_value, yaml_values, comparison)
if message is not None:
message = message.replace("/users/", info_dict[key]["users"])
message = message.replace("/group/", info_dict[key]["group"])
return_string += message + "\n"
for key in info_dict:
for column in default_dict:
info_value = info_dict[key][column]
for comparison in default_dict[column]:
yaml_values = default_dict[column][comparison]
message = compare(info_value, yaml_values, comparison)
if message is not None:
message = message.replace("/users/", info_dict[key]["users"])
message = message.replace("/group/", info_dict[key]["group"])
return_string += message + "\n"
return return_string
class lastlog(AuditModule):
# Unsure how to parse...
@staticmethod
def read(file):
value = dict()
last_dict = dict()
lastlog_dict = dict()
next_line = file.readline()
while next_line and not "wtmp begins " in next_line:
next_values = next_line.split()
if len(next_values) > 1:
last_dict[next_values[0]] = "yes"
next_line = file.readline()
next_line = file.readline() # Skip line
while next_line:
next_values = next_line[:-1].split(None, 1)
if len(next_values) > 1:
lastlog_dict[next_values[0]] = next_values[1]
next_line = file.readline()
value["last"] = last_dict
value["lastlog"] = lastlog_dict
return value
@staticmethod
def evaluate(info, yaml_path):
# Not sure how to evaluate...
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
last = yaml_dict.pop("last")
lastlog = yaml_dict.pop("lastlog")
info_last = info.pop("last")
info_lastlog = info.pop("lastlog")
for key in lastlog:
if info_lastlog.has_key(key):
for comparison in lastlog[key]:
customer_value = info_lastlog[key]
values = lastlog[key][comparison]
message = compare(customer_value, values, comparison)
if message is not None:
return_string += message + "\n"
for key in last:
if info_last.has_key(key):
message = last[key]["msg"]
if message is not None:
return_string += message + "\n"
return return_string
class modprobe(AuditModule):
@staticmethod
def read(file):
values = dict()
modprobes = []
while True:
nextLine = file.readline()
if ("Module" in nextLine): break
modprobes.append(nextLine[:-1])
values["modprobe.d"] = modprobes
while True:
nextLine = file.readline()
if (nextLine == ""): break
innerValues = nextLine.split()
values[innerValues[0]] = innerValues
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
# Important configs
for config in yaml_dict["important_configs"]:
if config == "default":
important_configs = yaml_dict["important_configs"]["default"]["config"]
for i_config in important_configs:
if i_config not in info["modprobe.d"]:
message = yaml_dict["important_configs"]["default"]["message"]
message = message.replace("/conf/", i_config)
return_string += message + "\n"
elif config not in info["modprobe.d"]:
message = yaml_dict["important_configs"][config]["message"]
return_string += message + "\n"
# Important modules
for module in yaml_dict["important_modules"]:
if module == "default":
important_modules = yaml_dict["important_modules"]["default"]["module"]
for i_module in important_modules:
if i_module not in info.keys():
message = yaml_dict["important_modules"]["default"]["message"]
message = message.replace("/module/", i_module)
return_string += message + "\n"
elif module not in info.keys():
message = yaml_dict["important_modules"][module]["message"]
return_string += message + "\n"
# Blacklisted configs
for config in yaml_dict["blacklisted_configs"]:
if config == "default":
important_configs = yaml_dict["blacklisted_configs"]["default"]["config"]
for i_config in important_configs:
if i_config in info["modprobe.d"]:
message = yaml_dict["blacklisted_configs"]["default"]["message"]
message = message.replace("/conf/", i_config)
return_string += message + "\n"
elif config in info["modprobe.d"]:
message = yaml_dict["blacklisted_configs"][config]["message"]
return_string += message + "\n"
# Blacklisted modules
for module in yaml_dict["blacklisted_modules"]:
if module == "default":
important_modules = yaml_dict["blacklisted_modules"]["default"]["module"]
for i_module in important_modules:
if i_module in info.keys():
message = yaml_dict["blacklisted_modules"]["default"]["message"]
message = message.replace("/module/", i_module)
return_string += message + "\n"
elif module in info.keys():
message = yaml_dict["blacklisted_modules"][module]["message"]
return_string += message + "\n"
# modprobe_file = open("modprobe_folders", "r")
#
# config_list = []
# blacklist = []
# important_list = []
#
# customer_modules = []
#
#
# next_line = modprobe_file.readline() #Skip line
# next_line = modprobe_file.readline()
#
# while next_line and not next_line.startswith("#"):
# config_list.append(next_line[:-1])
# next_line = modprobe_file.readline()
#
# next_line = modprobe_file.readline() # Skip line
#
# while next_line and not next_line.startswith("#"):
# blacklist.append(next_line[:-1])
# next_line = modprobe_file.readline()
#
# next_line = modprobe_file.readline() # Skip line
#
# while next_line and not next_line.startswith("#"):
# important_list.append(next_line[:-1])
# next_line = modprobe_file.readline()
#
# customer_config_list = dict["modprobe.d"].split("%")
#
# dict.pop("modprobe.d", None)
# dict.pop("", None)
#
# for key in dict:
# customer_modules.append(key)
#
# for config in config_list:
# if config not in customer_config_list:
# return_string += "The expected file " + config + " is not in your system.\n"
#
# for module in customer_modules:
# if module in blacklist:
# return_string += "The system contains the blacklisted module " + module + "\n"
#
# for module in important_list:
# if module not in customer_modules:
# return_string += "The system does not contain the important module " + module + "\n"
return return_string
class networkvolume(AuditModule):
@staticmethod
def read(file):
values = dict()
mount_dict = dict()
fstab_dict = dict()
next_line = file.readline()
while next_line and "#" not in next_line and not next_line.isspace():
innerValues = next_line.split()
mount_dict[innerValues[2]] = innerValues
next_line = file.readline()
while next_line and "#" not in next_line and not next_line.isspace():
inner_dict = dict()
if ("#" in next_line):
next_line = file.readline()
continue
inner_values = next_line.split()
inner_dict["file_system"] = inner_values[0]
inner_dict["mount_point"] = inner_values[1]
inner_dict["type"] = inner_values[2]
options = inner_values[3].split(",")
inner_dict["options"] = options
inner_dict["dump"] = inner_values[4]
inner_dict["pass"] = inner_values[5]
fstab_dict[inner_dict["mount_point"]] = inner_dict
next_line = file.readline()
values["mount"] = mount_dict
values["fstab"] = fstab_dict
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
uuid_dict = dict()
info_mount = info["mount"]
info_fstab = info["fstab"]
with open(yaml_path, "r") as stream:
warnings = yaml.load(stream)
# check duplicates
for key in info_fstab:
uuid = info_fstab[key]["file_system"].split("=")[1]
if uuid_dict.has_key(uuid):
uuid_dict[uuid].append(info_fstab[key]["mount_point"])
else:
uuid_dict[uuid] = [info_fstab[key]["mount_point"]]
for key in uuid_dict:
if len(uuid_dict[key]) > 1:
message = warnings["duplicates"]
message = message.replace("/uuid/", key).replace("/key_set/", str(uuid_dict[key]))
return_string += message + "\n"
# #
# check for username/password and backup, pass
for key in info_fstab:
# check for username/password
options = info_fstab[key]["options"]
for option in options:
if "password" in option or "username" in option:
message = warnings["username_password"]
return_string += message + "\n"
# checks for backup
backup = info_fstab[key]["dump"]
if backup != 1:
message = warnings["backup"]
return_string += message + "\n"
# checks for pass
pass_flag = info_fstab[key]["pass"]
if key != "/" and pass_flag == "1":
message = warnings["pass_non_root"]
return_string += message + "\n"
elif key == "/" and pass_flag != "1":
message = warnings["pass_root"]
return_string += message + "\n"
return return_string
class open_connections(AuditModule):
@staticmethod
def read(file):
values = dict()
file.readline() # Skip first line
next_line = file.readline()
while (next_line and not "COMMAND" in next_line):
innerValues = next_line.split()
values[innerValues[4]] = innerValues
next_line = file.readline()
while (next_line):
innerValues = next_line.split()
# Unsure what should be the key..
values[innerValues[0] + "#" + innerValues[3]] = innerValues
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
"""Lists of listen ports, estab ports etc
make sure that the ports are not bad according to open_connections file
"""
return return_string
class passwdpolicy(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while(next_line):
if "#" not in next_line and not next_line.isspace():
key_value = next_line.split()
values[key_value[0]] = key_value[1]
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
for key in yaml_dict:
if info.has_key(key):
for comparison in yaml_dict[key]:
customer_value = info[key]
values = yaml_dict[key][comparison]
message = compare(customer_value, values, comparison)
if message is not None:
return_string += message + "\n"
# passwd_file = open("passwdpolicy", "r")
#
# next_line = passwd_file.readline()
#
# important_keys = []
# passwd_dict = dict()
#
# while next_line:
#
# if (next_line.isspace() or next_line.startswith("%")):
# next_line = passwd_file.readline()
# continue
#
# passwd_key = next_line.split("=")[0]
#
# passwd_values = next_line.split("=")[1][:-1]
#
# passwd_dict[passwd_key] = passwd_values
# next_line = passwd_file.readline()
#
# print passwd_dict
# print info
#
# for key in passwd_dict:
# #If key is in customer
# if info.has_key(key[1:]):
# #If key is dangerous
# if (key.startswith("^")):
# return_string += "The key " + key + " is considered dangerous.\n"
#
# else:
# customer_value = info[key[1:]]
# values = passwd_dict[key]
# print key
# print "customer: " + customer_value
# print "values: " + str(values)
# #If value is dangerous
# if "^" + customer_value in values:
# return_string += "The value " + customer_value + " is considered dangerous. Consider switching to " + str([x for x in values if not x.startswith("^")] + ". prefeably one of " + str([x for x in values if x.startswith("*")])) + "\n"
#
# #If value is not prefered
# if "<" + customer_value in values:
# return_string += "The value " + customer_value + " is not considered preferable. Consider switching to one of " + str([x for x in values if x.startswith("*")]) + "\n"
#
# #If not found in customer
# else:
# #If key is important
# if (key.startswith("#")):
# important_keys.append(key[1:])
# #Add recomended value?
#
# if len(important_keys) > 0:
# return_string += "The following important keys were not found: " + str(important_keys) + "\n"
#
"""if info["ENCRYPT_METHOD"] == "MD5":
return_string = (return_string + "Your currently password encrypting method is MD5. " +
"\nYou should consider changing the encrypting method to SHA256 or SHA516.")
if info["PASS_MIN_DAYS"] > '0':
return_string = (return_string + "Warning: You have to wait " + dict["PASS_MIN_DAYS"] +
" days to change password, this can be a security risk in case of accidental password change.")
"""
return return_string
class processes(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
next_line = file.readline() # Skip first line
while (next_line):
inner_dict = dict()
next_line = next_line[:-1]
inner_values = next_line.split(None, 10)
inner_dict["USER"] = inner_values[0]
inner_dict["PID"] = inner_values[1]
inner_dict["%CPU"] = inner_values[2]
inner_dict["%MEM"] = inner_values[3]
inner_dict["VSZ"] = inner_values[4]
inner_dict["RSS"] = inner_values[5]
inner_dict["TTY"] = inner_values[6]
inner_dict["STAT"] = inner_values[7]
inner_dict["START"] = inner_values[8]
inner_dict["TIME"] = inner_values[9]
inner_dict["COMMAND"] = inner_values[10]
values[inner_dict["COMMAND"]] = inner_dict
next_line = file.readline()
# next_line = file.readline()
#
# while (next_line):
# splitted_line = next_line.split()
# innerValues = ["" for i in range(11)] # Init the list with empty strings
# for i in range (0, 10):
# innerValues[i] = splitted_line[i]
# for i in range (10, len(splitted_line)):
# innerValues[10] = str(innerValues[10]) + splitted_line[i] + " "
#
# innerValues[10] = innerValues[:-1]
# next_line = file.readline()
#
#
# values[innerValues[1]] = innerValues
return values
@staticmethod
def evaluate(info, yaml_path): # change to dict if using commented code?
return_string = ""
info_copy = dict(info)
with open(yaml_path, 'r') as stream:
yaml_dict = yaml.load(stream)
default = yaml_dict.pop("default")
important_processes = yaml_dict.pop("important_processes")
blacklisted_processes = yaml_dict.pop("blacklisted_processes")
# important processes
for key in important_processes:
if key == "default":
for process in important_processes["default"]["process"]:
if not info.has_key(process):
message = important_processes["default"]["message"]
message = message.replace("/process/", process)
return_string += message + "\n"
elif not info_copy.has_key(key):
return_string += important_processes[key]["message"] + "\n"
# blacklisted processes
for key in blacklisted_processes:
if key == "default":
for process in blacklisted_processes["default"]["process"]:
if info.has_key(process):
message = blacklisted_processes["default"]["message"]
message = message.replace("/process/", process)
return_string += message + "\n"
elif info_copy.has_key(key):
return_string += blacklisted_processes[key]["message"] + "\n"
# default value check (CPU & MEM usage)
# print info_copy
for key in info_copy:
for column in default:
customer_value = info_copy[key][column]
for comparison in default[column]:
values = default[column][comparison]
message = compare(customer_value, values, comparison)
if message is not None:
message = message.replace("/process/", key)
return_string += message + "\n"
# other keys
for key in yaml_dict:
if info_copy.has_key(key):
for column in yaml_dict[key]:
for comparison in yaml_dict[key][column]:
if info_copy[key].has_key(column):
customer_value = info_copy[key][column]
values = yaml_dict[key][column][comparison]
message = compare(customer_value, values, comparison)
if message is not None:
return_string += message
# processes_file = open("processes", "r")
#
# next_line = processes_file.readline() #Skip first line
# next_line = processes_file.readline()
#
# expected_processes = []
# non_root_blacklist = []
# blacklist = []
#
#
# while next_line and "#" not in next_line and not next_line.isspace():
# expected_processes.append(next_line[:-1])
# next_line = processes_file.readline()
#
# next_line = processes_file.readline()
#
# while next_line and "#" not in next_line and not next_line.isspace():
# non_root_blacklist.append(next_line[:-1])
# next_line = processes_file.readline()
#
# next_line = processes_file.readline()
#
#
# while next_line and "#" not in next_line and not next_line.isspace():
# blacklist.append(next_line[:-1])
# next_line = processes_file.readline()
#
#
#
# for key in dict.iterkeys():
# customer_process = dict[key][10][:-1]
#
# #if process is blacklist
# if customer_process in blacklist:
# return_string += "The process " + customer_process + " currently running on your service is in our blacklist\n"
#
# #if process is non root
# elif customer_process in non_root_blacklist and dict[key][0 != "root"]:
# return_string += "The process " + customer_process + " currently running on your service as a non-root. This is considered a security risk\n"
#
# #if expected process is found, it removes it from the exepcted processes list
# if customer_process in expected_processes:
# expected_processes = [x for x in expected_processes if x != customer_process]
#
# #if expected_processes is NOT empty
# if expected_processes:
# return_string += "The following processes were expected but could not be found on your system: " + str(expected_processes) + "\n"
return return_string
class samba(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while (next_line):
if "No such file or directory" in next_line:
values["/etc/samba/smb.conf"] = "No such file or directory"
return values
if "#" in next_line or next_line.isspace():
next_line = file.readline()
continue
if "[" in next_line:
level = next_line[1:-2]
next_line = file.readline()
continue
next_values = next_line.split(" = ")
next_dict = dict()
next_dict['value'] = next_values[1][:-1]
next_dict['level'] = level
values[next_values[0].lstrip()] = next_dict
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
with open(yaml_path, 'r') as stream:
yaml_dict = yaml.load(stream)
for key in yaml_dict:
if info.has_key(key):
#pop out value(otherwise going through comparisons might give issue)
customer_value = info[key]['value']
customer_level = info[key]['level']
customer_level_value = customer_level + ("#%") + customer_value
for comparison in yaml_dict[key]:
yaml_values = yaml_dict[key][comparison]
msg = compare(customer_level_value, yaml_values, comparison)
if msg is not None:
msg = msg.replace("/key/", key)
msg = msg.replace("/level/", customer_level)
msg = msg.replace("/value/", customer_value)
# print "samba/eval"
#
# print info
# return_string = ""
#
#
# samba_file = open(yaml_path, "r")
#
# samba_dict = dict()
#
# samba_lists = [[]]
#
#
# samba_important_keys = []
#
# samba_lists[0] = ([1, 2, 3])
# samba_lists.append([17, 6, 5])
#
# next_line = samba_file.readline()
#
# while next_line:
# if next_line.startswith("%") or next_line.isspace():
# next_line = samba_file.readline()
# continue
# samba_k_v_l = next_line[:-1].split("=")
# samba_key = samba_k_v_l[0]
# samba_v_l = samba_k_v_l[1].split(",")
#
#
# next_line = samba_file.readline()
# samba_values = samba_v_l[0].split("|")
# samba_levels = samba_v_l[1].split("|")
#
# if samba_key.startswith("#"): samba_important_keys.append(samba_key[1:])
#
# samba_dict[samba_key] = [samba_values, samba_levels]
#
#
# for key in samba_dict:
# if key[1:] in info.keys():
#
# # if Dangerous key
# if key.startswith("^"):
# return_string += "The key " + key + " is considered dangerous.\n"
#
# else:
# customer_value = info[key[1:]][0]
# customer_level = info[key[1:]][1]
# samba_values = samba_dict[key][0]
# samba_levels = samba_dict[key][1]
# # if Dangerous level
# if "^" + customer_level in samba_levels:
# return_string += "The level for the key " + key[1:] + " is considered dangerous. Consider changing to one of " + str([x[1:] for x in samba_levels if not x.startswith("^")]) + " preferably one of " + str([x[1:] for x in samba_levels if x.startswith("*")]) + "\n"
#
# # if not preferable level
# elif "<" + customer_level in samba_levels:
# if len([x for x in samba_levels if x.startswith("*")]) > 0:
# return_string += "The level for the environment key " + key[1:] + " is not considered preferable. Consider changing to one of " + str([x[1:] for x in samba_levels if x.startswith("*")]) + "\n"
#
# # cant find level in samba txt
# elif "*" + customer_level not in samba_levels:
# return_string += "The level " + customer_value + " for the key " + key[1:] + " was not found in our list of \"predetermined\" levels. \n\tRecommended levels: " + str([x[1:] for x in samba_levels if x.startswith("*")]) + "\n\tOkay levels: " + str([x[1:] for x in samba_levels if x.startswith("<")]) + "\n"
#
#
# # if Dangerous value
# if "^" + customer_value in samba_values:
# return_string += "The value for the key " + key[1:] + " is considered dangerous. Consider changing to one of " + str([x[1:] for x in samba_values if not x.startswith("^")]) + " preferably one of " + str([x[1:] for x in samba_values if x.startswith("*")]) + "\n"
#
# # if not preferable value
# elif "<" + customer_value in samba_values:
# if len([x for x in samba_levels if x.startswith("*")]) > 0:
# return_string += "The value for the environment key " + key[1:] + " is not considered preferable. Consider changing to one of " + str([x[1:] for x in samba_values if x.startswith("*")]) + "\n"
#
# # cant find value in samba txt
# elif "*" + customer_level not in samba_values:
# return_string += "The value " + customer_value + " for the key " + key[1:] + " was not found in our list of \"predetermined\" values. \n\tRecommended values: " + str([x[1:] for x in samba_values if x.startswith("*")]) + "\n\tOkay levels: " + str([x[1:] for x in samba_values if x.startswith("<")]) + "\n"
#
# samba_important_keys = [x for x in samba_important_keys if x != key[1:]]
# # cant find key in samba
#
# if len(samba_important_keys) > 0:
# return_string += "The following keys were not found in your system: " + str(samba_important_keys) + ". They are considered important."
#
# return return_string
#
class sshd(AuditModule):
@staticmethod
def read(file):
info_dict = dict()
next_line = file.readline()
while (next_line):
if "No such file or directory" in next_line:
info_dict["/etc/ssh/sshd_config"] = "No such file or directory"
if "#" in next_line or next_line.isspace():
next_line = file.readline()
continue
next_values = next_line.split()
info_dict[next_values[0]] = next_values[1]
next_line = file.readline()
return info_dict
@staticmethod
def evaluate(info_dict, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
for key in yaml_dict:
if info_dict.has_key(key):
info_value = info_dict[key]
yaml_values = yaml_dict[key]
for comparison in yaml_values:
yaml_value = yaml_values[comparison]
message = compare(info_value, yaml_value, comparison)
if message is not None:
return_string += message + "\n"
return return_string
class startup(AuditModule):
@staticmethod
def read(file):
values = dict()
file.readline() # Skip first line (/etc/init.d)
file.readline() # Skip second line (total 216) //maybe use?
next_line = file.readline()
while (next_line):
next_values = next_line.split()
values[next_values[8]] = next_values
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
blacklist = []
expected = []
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
expected = yaml_dict.pop("expected")
blacklist = yaml_dict.pop("blacklisted")
permission = yaml_dict.pop("permission")
# expected scripts
for script in expected["scripts"]:
if script not in info:
message = expected["msg"]
message = message.replace("/script/", script)
return_string += message + "\n"
# blacklisted scripts
for script in blacklist["scripts"]:
if script in info:
message = blacklist["msg"]
message = message.replace("/script/", script)
return_string += message + "\n"
# check permissions
for key, value in info.iteritems():
permissions = value[0]
permissions = list(permissions)
if permissions[5] == "w" or permissions[8] == "w":
message = permission["msg"]
message = message.replace("/script/", key)
return_string += message + "\n"
return return_string
class sudoers(AuditModule):
@staticmethod
def read(file):
values = dict()
username = ""
hosts = ""
run_as_users = ""
run_as_groups = ""
command = ""
next_line = file.readline()
while (next_line):
group = False
if "#" in next_line or next_line.isspace():
next_line = file.readline()
continue
if "Defaults" in next_line:
inner_values = next_line.split()
tmp = inner_values[1].split("=")
username = tmp[0]
values[username] = ['', '', '', command]
next_line = file.readline()
continue
inner_values = next_line.split()
username = inner_values[0]
command = inner_values[2]
inner_values = inner_values[1].split("=")
hosts = inner_values[0]
inner_values = inner_values[1].split(":")
if (len(inner_values) > 1):
run_as_users = inner_values[0][1:]
run_as_groups = inner_values[1][:-1]
else:
run_as_users = inner_values[0][-1:-1]
values[username] = [hosts, run_as_users, run_as_groups, command]
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
if info.has_key("env_reset") == True:
return_string += "env_reset is available. The system will make sure the terminal environment remove any user variables and clear potentially harmful environmental variables from the sudo sessions \n \n"
else:
return_string += "env_reset variable has not been set. You should add it the variable in /etc/sudoers"
for key, value in info.iteritems():
if key == "secure_path":
if value[3] != "[\'\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin\"\']":
continue
if (value[0] and value[1] and value[2] and value[3]) == "ALL" and ("root" not in key) and ("%" not in key):
return_string += "User: " + "\"" + key + "\"" + " has super user rights.\n\n"
continue
if (value[0] and value[2] and value[3] == "ALL") and (value[1] == '') and ("root" not in key) and ("%admin" not in key) and ("%sudo" not in key):
return_string += "Members of group: " + "\"" + key + "\"" + " may gain root privileges.\n\n"
continue
if (value[0] and value[1] and value[2] and value[3] == "ALL") and ("root" not in key) and ("%admin" not in key) and ("%sudo" not in key):
return_string += "Members of sudo group: " + "\"" + key + "\"" + " can execute any command\n\n"
continue
return return_string
class suid_files(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while (next_line):
values[next_line] = next_line
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
return return_string
class system(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while (next_line):
values[next_line] = next_line
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
return return_string
class users(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while (next_line):
inner_dict = dict()
inner_values = next_line[:-1].split(":", 6)
inner_dict["username"] = inner_values[0]
inner_dict["password"] = inner_values[1]
inner_dict["user_id"] = inner_values[2]
inner_dict["group_id"] = inner_values[3]
inner_dict["user_info"] = inner_values[4]
inner_dict["home_dir"] = inner_values[5]
inner_dict["shell"] = inner_values[6]
values[inner_dict["username"]] = inner_dict
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
with open(yaml_path, 'r') as stream:
yaml_dict = yaml.load(stream)
for key in yaml_dict:
if info.has_key(key):
for column in yaml_dict[key]:
for comparison in yaml_dict[key][column]:
values = yaml_dict[key][column][comparison]
customer_value = info[key][column]
message = compare(customer_value, values, comparison)
if message is not None:
return_string += message
# for key in dict:
#
# risks = [False, False, False]
#
# value = dict[key]
# if value[2] == "0" and not key == "root":
# return_string = return_string + "User " + "'" + key + "'" + " has super user rights\n"
# risks[0] = True
#
# if value[1] == "!":
# return_string = return_string = "User " + "'" + key + "'" + " is stored in /etc/security/passwd and is not encrypted\n"
# risks[1] = True
#
# elif value[1] == "*":
# return_string = return_string + "User " + "'" + key + "'" + " has an invalid password\n"
# risks[2] = True
#
#
# if risks[0]:
# return_string += "\nYou should change the users' priviliges"
#
# if risks[1]:
# return_string += "\nYou should encrypt the users' password"
#
# if risks[2]:
# return_string += "\nYou should change users' password to a valid one"
#
return return_string
def compare(customer_value, values, comparison):
# Equal
if comparison == "eq":
value = values.keys()[0]
if customer_value != value:
message = values[value]["msg"]
severity = values[value]["severity"]
return message
# Not equal
if comparison == "neq":
values = values["values"]
if customer_value in values.keys():
message = values[customer_value]["msg"]
severity = values[customer_value]["severity"]
return message
if comparison == "nlt":
value = values["value"]
if int(customer_value) < int(value):
message = values["msg"]
severity = values["severity"]
return message
if comparison == "ngr":
value = values["value"]
if float(customer_value) > float(value):
message = values["msg"]
return message
if comparison == "nbtwn":
values = values["values"]
for message in values:
for ranges in values[message]["ranges"]:
range_max = max(ranges)
range_min = min(ranges)
if int(customer_value) < range_max and int(customer_value) > range_min:
severity = values[message]["severity"]
return message
if comparison == "in":
if customer_value not in values["values"]:
severity = values["severity"]
message = values["msg"]
return message
if comparison == "permissions":
for permission_group in values:
if permission_group == "other":
other_rwx = customer_value[7:]
for permission in values[permission_group]:
if permission in other_rwx:
message = values[permission_group] [permission]["msg"]
return message
if permission_group == "user":
user_rwx = customer_value[1:4]
for permission in values[permission_group]:
if permission in user_rwx:
message = values[permission_group] [permission]["msg"]
return message
if permission_group == "group":
group_rwx = customer_value[4:7]
for permission in values[permission_group]:
if permission in group_rwx:
message = values[permission_group] [permission]["msg"]
return message
pass | en | 0.710245 | Created on May 3, 2017 @author: jesper # [1:-2] is to trim the filename from from quotation marks # [permissions][?][owner][group][size][month][day][hour:min][filename] # for key in yaml_dict: # if info.has_key(key): # customer_value = info[key] # # for comparison in yaml_dict[key]: # values = yaml_dict[key][comparison] # print customer_value # print values # print comparison # message = compare(customer_value, values, comparison) # if message is not None: return_string += message + "\n" # Skip first line # [Filesystem][Size][Used][Avail][Use%][Mounted on] # Removes the % sign # Hard to parse and don't think it has anythign to do with security risks # check if key exists in customer file # Unsure how to parse... # Skip line # Not sure how to evaluate... # Important configs # Important modules # Blacklisted configs # Blacklisted modules # modprobe_file = open("modprobe_folders", "r") # # config_list = [] # blacklist = [] # important_list = [] # # customer_modules = [] # # # next_line = modprobe_file.readline() #Skip line # next_line = modprobe_file.readline() # # while next_line and not next_line.startswith("#"): # config_list.append(next_line[:-1]) # next_line = modprobe_file.readline() # # next_line = modprobe_file.readline() # Skip line # # while next_line and not next_line.startswith("#"): # blacklist.append(next_line[:-1]) # next_line = modprobe_file.readline() # # next_line = modprobe_file.readline() # Skip line # # while next_line and not next_line.startswith("#"): # important_list.append(next_line[:-1]) # next_line = modprobe_file.readline() # # customer_config_list = dict["modprobe.d"].split("%") # # dict.pop("modprobe.d", None) # dict.pop("", None) # # for key in dict: # customer_modules.append(key) # # for config in config_list: # if config not in customer_config_list: # return_string += "The expected file " + config + " is not in your system.\n" # # for module in customer_modules: # if module in blacklist: # return_string += "The system contains the blacklisted module " + module + "\n" # # for module in important_list: # if module not in customer_modules: # return_string += "The system does not contain the important module " + module + "\n" # check duplicates # # # check for username/password and backup, pass # check for username/password # checks for backup # checks for pass # Skip first line # Unsure what should be the key.. Lists of listen ports, estab ports etc make sure that the ports are not bad according to open_connections file # passwd_file = open("passwdpolicy", "r") # # next_line = passwd_file.readline() # # important_keys = [] # passwd_dict = dict() # # while next_line: # # if (next_line.isspace() or next_line.startswith("%")): # next_line = passwd_file.readline() # continue # # passwd_key = next_line.split("=")[0] # # passwd_values = next_line.split("=")[1][:-1] # # passwd_dict[passwd_key] = passwd_values # next_line = passwd_file.readline() # # print passwd_dict # print info # # for key in passwd_dict: # #If key is in customer # if info.has_key(key[1:]): # #If key is dangerous # if (key.startswith("^")): # return_string += "The key " + key + " is considered dangerous.\n" # # else: # customer_value = info[key[1:]] # values = passwd_dict[key] # print key # print "customer: " + customer_value # print "values: " + str(values) # #If value is dangerous # if "^" + customer_value in values: # return_string += "The value " + customer_value + " is considered dangerous. Consider switching to " + str([x for x in values if not x.startswith("^")] + ". prefeably one of " + str([x for x in values if x.startswith("*")])) + "\n" # # #If value is not prefered # if "<" + customer_value in values: # return_string += "The value " + customer_value + " is not considered preferable. Consider switching to one of " + str([x for x in values if x.startswith("*")]) + "\n" # # #If not found in customer # else: # #If key is important # if (key.startswith("#")): # important_keys.append(key[1:]) # #Add recomended value? # # if len(important_keys) > 0: # return_string += "The following important keys were not found: " + str(important_keys) + "\n" # if info["ENCRYPT_METHOD"] == "MD5": return_string = (return_string + "Your currently password encrypting method is MD5. " + "\nYou should consider changing the encrypting method to SHA256 or SHA516.") if info["PASS_MIN_DAYS"] > '0': return_string = (return_string + "Warning: You have to wait " + dict["PASS_MIN_DAYS"] + " days to change password, this can be a security risk in case of accidental password change.") # Skip first line # next_line = file.readline() # # while (next_line): # splitted_line = next_line.split() # innerValues = ["" for i in range(11)] # Init the list with empty strings # for i in range (0, 10): # innerValues[i] = splitted_line[i] # for i in range (10, len(splitted_line)): # innerValues[10] = str(innerValues[10]) + splitted_line[i] + " " # # innerValues[10] = innerValues[:-1] # next_line = file.readline() # # # values[innerValues[1]] = innerValues # change to dict if using commented code? # important processes # blacklisted processes # default value check (CPU & MEM usage) # print info_copy # other keys # processes_file = open("processes", "r") # # next_line = processes_file.readline() #Skip first line # next_line = processes_file.readline() # # expected_processes = [] # non_root_blacklist = [] # blacklist = [] # # # while next_line and "#" not in next_line and not next_line.isspace(): # expected_processes.append(next_line[:-1]) # next_line = processes_file.readline() # # next_line = processes_file.readline() # # while next_line and "#" not in next_line and not next_line.isspace(): # non_root_blacklist.append(next_line[:-1]) # next_line = processes_file.readline() # # next_line = processes_file.readline() # # # while next_line and "#" not in next_line and not next_line.isspace(): # blacklist.append(next_line[:-1]) # next_line = processes_file.readline() # # # # for key in dict.iterkeys(): # customer_process = dict[key][10][:-1] # # #if process is blacklist # if customer_process in blacklist: # return_string += "The process " + customer_process + " currently running on your service is in our blacklist\n" # # #if process is non root # elif customer_process in non_root_blacklist and dict[key][0 != "root"]: # return_string += "The process " + customer_process + " currently running on your service as a non-root. This is considered a security risk\n" # # #if expected process is found, it removes it from the exepcted processes list # if customer_process in expected_processes: # expected_processes = [x for x in expected_processes if x != customer_process] # # #if expected_processes is NOT empty # if expected_processes: # return_string += "The following processes were expected but could not be found on your system: " + str(expected_processes) + "\n" #pop out value(otherwise going through comparisons might give issue) # print "samba/eval" # # print info # return_string = "" # # # samba_file = open(yaml_path, "r") # # samba_dict = dict() # # samba_lists = [[]] # # # samba_important_keys = [] # # samba_lists[0] = ([1, 2, 3]) # samba_lists.append([17, 6, 5]) # # next_line = samba_file.readline() # # while next_line: # if next_line.startswith("%") or next_line.isspace(): # next_line = samba_file.readline() # continue # samba_k_v_l = next_line[:-1].split("=") # samba_key = samba_k_v_l[0] # samba_v_l = samba_k_v_l[1].split(",") # # # next_line = samba_file.readline() # samba_values = samba_v_l[0].split("|") # samba_levels = samba_v_l[1].split("|") # # if samba_key.startswith("#"): samba_important_keys.append(samba_key[1:]) # # samba_dict[samba_key] = [samba_values, samba_levels] # # # for key in samba_dict: # if key[1:] in info.keys(): # # # if Dangerous key # if key.startswith("^"): # return_string += "The key " + key + " is considered dangerous.\n" # # else: # customer_value = info[key[1:]][0] # customer_level = info[key[1:]][1] # samba_values = samba_dict[key][0] # samba_levels = samba_dict[key][1] # # if Dangerous level # if "^" + customer_level in samba_levels: # return_string += "The level for the key " + key[1:] + " is considered dangerous. Consider changing to one of " + str([x[1:] for x in samba_levels if not x.startswith("^")]) + " preferably one of " + str([x[1:] for x in samba_levels if x.startswith("*")]) + "\n" # # # if not preferable level # elif "<" + customer_level in samba_levels: # if len([x for x in samba_levels if x.startswith("*")]) > 0: # return_string += "The level for the environment key " + key[1:] + " is not considered preferable. Consider changing to one of " + str([x[1:] for x in samba_levels if x.startswith("*")]) + "\n" # # # cant find level in samba txt # elif "*" + customer_level not in samba_levels: # return_string += "The level " + customer_value + " for the key " + key[1:] + " was not found in our list of \"predetermined\" levels. \n\tRecommended levels: " + str([x[1:] for x in samba_levels if x.startswith("*")]) + "\n\tOkay levels: " + str([x[1:] for x in samba_levels if x.startswith("<")]) + "\n" # # # # if Dangerous value # if "^" + customer_value in samba_values: # return_string += "The value for the key " + key[1:] + " is considered dangerous. Consider changing to one of " + str([x[1:] for x in samba_values if not x.startswith("^")]) + " preferably one of " + str([x[1:] for x in samba_values if x.startswith("*")]) + "\n" # # # if not preferable value # elif "<" + customer_value in samba_values: # if len([x for x in samba_levels if x.startswith("*")]) > 0: # return_string += "The value for the environment key " + key[1:] + " is not considered preferable. Consider changing to one of " + str([x[1:] for x in samba_values if x.startswith("*")]) + "\n" # # # cant find value in samba txt # elif "*" + customer_level not in samba_values: # return_string += "The value " + customer_value + " for the key " + key[1:] + " was not found in our list of \"predetermined\" values. \n\tRecommended values: " + str([x[1:] for x in samba_values if x.startswith("*")]) + "\n\tOkay levels: " + str([x[1:] for x in samba_values if x.startswith("<")]) + "\n" # # samba_important_keys = [x for x in samba_important_keys if x != key[1:]] # # cant find key in samba # # if len(samba_important_keys) > 0: # return_string += "The following keys were not found in your system: " + str(samba_important_keys) + ". They are considered important." # # return return_string # # Skip first line (/etc/init.d) # Skip second line (total 216) //maybe use? # expected scripts # blacklisted scripts # check permissions # for key in dict: # # risks = [False, False, False] # # value = dict[key] # if value[2] == "0" and not key == "root": # return_string = return_string + "User " + "'" + key + "'" + " has super user rights\n" # risks[0] = True # # if value[1] == "!": # return_string = return_string = "User " + "'" + key + "'" + " is stored in /etc/security/passwd and is not encrypted\n" # risks[1] = True # # elif value[1] == "*": # return_string = return_string + "User " + "'" + key + "'" + " has an invalid password\n" # risks[2] = True # # # if risks[0]: # return_string += "\nYou should change the users' priviliges" # # if risks[1]: # return_string += "\nYou should encrypt the users' password" # # if risks[2]: # return_string += "\nYou should change users' password to a valid one" # # Equal # Not equal | 2.135804 | 2 |
aidants_connect_web/tests/test_functional/test_view_autorisations.py | betagouv/Aidants_Connect | 16 | 6631582 | <filename>aidants_connect_web/tests/test_functional/test_view_autorisations.py<gh_stars>10-100
from datetime import timedelta
from django.test import tag
from django.utils import timezone
from aidants_connect_web.tests.factories import (
AidantFactory,
AutorisationFactory,
MandatFactory,
UsagerFactory,
)
from aidants_connect_web.tests.test_functional.testcases import FunctionalTestCase
from aidants_connect_web.tests.test_functional.utilities import login_aidant
@tag("functional")
class ViewAutorisationsTests(FunctionalTestCase):
def setUp(self):
self.aidant = AidantFactory(email="<EMAIL>")
device = self.aidant.staticdevice_set.create(id=self.aidant.id)
device.token_set.create(token="<PASSWORD>")
self.usager_alice = UsagerFactory(given_name="Alice", family_name="Lovelace")
self.usager_josephine = UsagerFactory(
given_name="Joséphine", family_name="Dupont"
)
self.usager_corentin = UsagerFactory(
given_name="Corentin", family_name="Dupont", preferred_username="Astro"
)
self.mandat_aidant_alice_no_autorisation = MandatFactory(
organisation=self.aidant.organisation,
usager=self.usager_alice,
expiration_date=timezone.now() + timedelta(days=5),
)
self.mandat_aidant_josephine_6 = MandatFactory(
organisation=self.aidant.organisation,
usager=self.usager_josephine,
expiration_date=timezone.now() + timedelta(days=6),
)
AutorisationFactory(
mandat=self.mandat_aidant_josephine_6,
demarche="social",
)
self.mandat_aidant_josephine_1 = MandatFactory(
organisation=self.aidant.organisation,
usager=self.usager_josephine,
expiration_date=timezone.now() + timedelta(days=1),
)
AutorisationFactory(
mandat=self.mandat_aidant_josephine_1,
demarche="papiers",
)
self.mandat_aidant_corentin_365 = MandatFactory(
organisation=self.aidant.organisation,
usager=self.usager_corentin,
expiration_date=timezone.now() + timedelta(days=365),
)
AutorisationFactory(
mandat=self.mandat_aidant_corentin_365,
demarche="famille",
)
def test_grouped_autorisations(self):
self.open_live_url("/espace-aidant/")
# Login
login_aidant(self)
# Espace Aidant home
self.selenium.find_element_by_id("view_mandats").click()
results = []
for el in self.selenium.find_elements_by_tag_name("table"):
for tr in el.find_elements_by_css_selector("tbody tr"):
results.append(tr)
# autorisation List
self.assertEqual(len(results), 3)
| <filename>aidants_connect_web/tests/test_functional/test_view_autorisations.py<gh_stars>10-100
from datetime import timedelta
from django.test import tag
from django.utils import timezone
from aidants_connect_web.tests.factories import (
AidantFactory,
AutorisationFactory,
MandatFactory,
UsagerFactory,
)
from aidants_connect_web.tests.test_functional.testcases import FunctionalTestCase
from aidants_connect_web.tests.test_functional.utilities import login_aidant
@tag("functional")
class ViewAutorisationsTests(FunctionalTestCase):
def setUp(self):
self.aidant = AidantFactory(email="<EMAIL>")
device = self.aidant.staticdevice_set.create(id=self.aidant.id)
device.token_set.create(token="<PASSWORD>")
self.usager_alice = UsagerFactory(given_name="Alice", family_name="Lovelace")
self.usager_josephine = UsagerFactory(
given_name="Joséphine", family_name="Dupont"
)
self.usager_corentin = UsagerFactory(
given_name="Corentin", family_name="Dupont", preferred_username="Astro"
)
self.mandat_aidant_alice_no_autorisation = MandatFactory(
organisation=self.aidant.organisation,
usager=self.usager_alice,
expiration_date=timezone.now() + timedelta(days=5),
)
self.mandat_aidant_josephine_6 = MandatFactory(
organisation=self.aidant.organisation,
usager=self.usager_josephine,
expiration_date=timezone.now() + timedelta(days=6),
)
AutorisationFactory(
mandat=self.mandat_aidant_josephine_6,
demarche="social",
)
self.mandat_aidant_josephine_1 = MandatFactory(
organisation=self.aidant.organisation,
usager=self.usager_josephine,
expiration_date=timezone.now() + timedelta(days=1),
)
AutorisationFactory(
mandat=self.mandat_aidant_josephine_1,
demarche="papiers",
)
self.mandat_aidant_corentin_365 = MandatFactory(
organisation=self.aidant.organisation,
usager=self.usager_corentin,
expiration_date=timezone.now() + timedelta(days=365),
)
AutorisationFactory(
mandat=self.mandat_aidant_corentin_365,
demarche="famille",
)
def test_grouped_autorisations(self):
self.open_live_url("/espace-aidant/")
# Login
login_aidant(self)
# Espace Aidant home
self.selenium.find_element_by_id("view_mandats").click()
results = []
for el in self.selenium.find_elements_by_tag_name("table"):
for tr in el.find_elements_by_css_selector("tbody tr"):
results.append(tr)
# autorisation List
self.assertEqual(len(results), 3)
| fr | 0.337974 | # Login # Espace Aidant home # autorisation List | 2.093207 | 2 |
resnet_imagenet.py | ivankreso/resnet-tensorflow | 2 | 6631583 | import time
import tensorflow as tf
import argparse
import os, re
import numpy as np
import h5py
import tensorflow.contrib.layers as layers
from tensorflow.contrib.framework import arg_scope
# model depth can be 50, 101 or 152
MODEL_DEPTH = 50
#DATA_MEAN = [103.939, 116.779, 123.68]
DATA_MEAN = np.load('imagenet_mean.npy')
MODEL_PATH ='/home/kivan/datasets/pretrained/resnet/ResNet'+str(MODEL_DEPTH)+'.npy'
DATA_PATH = '/home/kivan/datasets/imagenet/ILSVRC2015/numpy/val_data.hdf5'
def normalize_input(rgb):
return rgb - DATA_MEAN
def build(image, labels, is_training):
weight_decay = 1e-4
bn_params = {
# Decay for the moving averages.
'decay': 0.9,
'center': True,
'scale': True,
# epsilon to prevent 0s in variance.
'epsilon': 1e-5,
# None to force the updates
'updates_collections': None,
'is_training': is_training,
}
init_func = layers.variance_scaling_initializer(mode='FAN_OUT')
def shortcut(net, num_maps_out, stride):
num_maps_in = net.get_shape().as_list()[-1]
if num_maps_in != num_maps_out:
return layers.convolution2d(net, num_maps_out, kernel_size=1, stride=stride,
activation_fn=None, scope='convshortcut')
return net
def bottleneck(net, num_maps, stride):
net = tf.nn.relu(net)
bottom_net = net
with arg_scope([layers.convolution2d],
padding='SAME', activation_fn=tf.nn.relu,
normalizer_fn=layers.batch_norm, normalizer_params=bn_params,
weights_initializer=init_func,
weights_regularizer=layers.l2_regularizer(weight_decay)):
net = layers.convolution2d(net, num_maps, kernel_size=1, stride=stride, scope='conv1')
net = layers.convolution2d(net, num_maps, kernel_size=3, scope='conv2')
net = layers.convolution2d(net, num_maps * 4, kernel_size=1,
activation_fn=None, scope='conv3')
return net + shortcut(bottom_net, num_maps * 4, stride)
def layer(net, name, num_maps, num_layers, stride):
with tf.variable_scope(name):
for i in range(num_layers):
with tf.variable_scope('block{}'.format(i)):
s = stride if i == 0 else 1
net = bottleneck(net, num_maps, s)
return net
config_map = {
50: [3,4,6,3],
101: [3,4,23,3],
152: [3,8,36,3]
}
config = config_map[MODEL_DEPTH]
image = normalize_input(image)
#image = tf.pad(image, [[0,0],[3,3],[3,3],[0,0]])
#net = layers.convolution2d(image, 64, 7, stride=2, padding='VALID',
net = layers.convolution2d(image, 64, 7, stride=2, padding='SAME',
activation_fn=None, weights_initializer=init_func,
normalizer_fn=layers.batch_norm, normalizer_params=bn_params,
weights_regularizer=layers.l2_regularizer(weight_decay), scope='conv0')
net = layers.max_pool2d(net, 3, stride=2, padding='SAME', scope='pool0')
net = layer(net, 'group0', 64, config[0], 1)
net = layer(net, 'group1', 128, config[1], 2)
net = layer(net, 'group2', 256, config[2], 2)
net = layer(net, 'group3', 512, config[3], 2)
net = tf.nn.relu(net)
in_size = net.get_shape().as_list()[1:3]
net = layers.avg_pool2d(net, kernel_size=in_size, scope='global_avg_pool')
net = layers.flatten(net, scope='flatten')
logits = layers.fully_connected(net, 1000, activation_fn=None, scope='fc1000')
return logits
def name_conversion(caffe_layer_name):
NAME_MAP = {
'bn_conv1/beta': 'conv0/BatchNorm/beta:0',
'bn_conv1/gamma': 'conv0/BatchNorm/gamma:0',
'bn_conv1/mean/EMA': 'conv0/BatchNorm/moving_mean:0',
'bn_conv1/variance/EMA': 'conv0/BatchNorm/moving_variance:0',
'conv1/W': 'conv0/weights:0', 'conv1/b': 'conv0/biases:0',
'fc1000/W': 'fc1000/weights:0', 'fc1000/b': 'fc1000/biases:0'}
if caffe_layer_name in NAME_MAP:
return NAME_MAP[caffe_layer_name]
s = re.search('([a-z]+)([0-9]+)([a-z]+)_', caffe_layer_name)
if s is None:
s = re.search('([a-z]+)([0-9]+)([a-z]+)([0-9]+)_', caffe_layer_name)
layer_block_part1 = s.group(3)
layer_block_part2 = s.group(4)
assert layer_block_part1 in ['a', 'b']
layer_block = 0 if layer_block_part1 == 'a' else int(layer_block_part2)
else:
layer_block = ord(s.group(3)) - ord('a')
layer_type = s.group(1)
layer_group = s.group(2)
layer_branch = int(re.search('_branch([0-9])', caffe_layer_name).group(1))
assert layer_branch in [1, 2]
if layer_branch == 2:
layer_id = re.search('_branch[0-9]([a-z])/', caffe_layer_name).group(1)
layer_id = ord(layer_id) - ord('a') + 1
type_dict = {'res':'conv', 'bn':'BatchNorm'}
name_map = {'/W': '/weights:0', '/b': '/biases:0', '/beta': '/beta:0',
'/gamma': '/gamma:0', '/mean/EMA': '/moving_mean:0',
'/variance/EMA': '/moving_variance:0'}
tf_name = caffe_layer_name[caffe_layer_name.index('/'):]
if tf_name in name_map:
tf_name = name_map[tf_name]
if layer_type == 'res':
layer_type = type_dict[layer_type] + \
(str(layer_id) if layer_branch == 2 else 'shortcut')
elif layer_branch == 2:
layer_type = 'conv' + str(layer_id) + '/' + type_dict[layer_type]
elif layer_branch == 1:
layer_type = 'convshortcut/' + type_dict[layer_type]
tf_name = 'group{}/block{}/{}'.format(int(layer_group) - 2,
layer_block, layer_type) + tf_name
return tf_name
def create_init_op(params):
variables = tf.contrib.framework.get_variables()
init_map = {}
for var in variables:
name = var.name
if name in params:
init_map[var.name] = params[name]
del params[name]
else:
print(var.name, ' --> init not found!')
raise ValueError('Init not found')
print('Unused pretrained params:')
print(list(params.keys()))
init_op, init_feed = tf.contrib.framework.assign_from_values(init_map)
return init_op, init_feed
def evaluate():
params = np.load(MODEL_PATH, encoding='latin1').item()
resnet_param = {}
for k, v in params.items():
newname = name_conversion(k)
resnet_param[newname] = v
img_size = 224
image = tf.placeholder(tf.float32, [None, img_size, img_size, 3], 'input')
labels = tf.placeholder(tf.int32, [None], 'label')
logits = build(image, labels, is_training=False)
all_vars = tf.contrib.framework.get_variables()
for v in all_vars:
print(v.name)
init_op, init_feed = create_init_op(resnet_param)
sess = tf.Session()
sess.run(init_op, feed_dict=init_feed)
batch_size = 100
h5f = h5py.File(DATA_PATH, 'r')
data_x = h5f['data_x'][()]
print(data_x.shape)
data_y = h5f['data_y'][()]
h5f.close()
N = data_x.shape[0]
assert N % batch_size == 0
num_batches = N // batch_size
top5_error = tf.nn.in_top_k(logits, labels, 5)
top5_wrong = 0
cnt_wrong = 0
for i in range(num_batches):
offset = i * batch_size
batch_x = data_x[offset:offset+batch_size, ...]
batch_y = data_y[offset:offset+batch_size, ...]
start_time = time.time()
logits_val, top5 = sess.run([logits, top5_error], feed_dict={image:batch_x, labels:batch_y})
duration = time.time() - start_time
num_examples_per_step = batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
top5_wrong += (top5==0).sum()
yp = logits_val.argmax(1).astype(np.int32)
cnt_wrong += (yp != batch_y).sum()
if i % 10 == 0:
print('[%d / %d] top1error = %.2f - top5error = %.2f (%.1f examples/sec; %.3f sec/batch)' % (i, num_batches,
cnt_wrong / ((i+1)*batch_size) * 100, top5_wrong / ((i+1)*batch_size) * 100,
examples_per_sec, sec_per_batch))
print(cnt_wrong / N)
print(top5_wrong / N)
if __name__ == '__main__':
evaluate()
| import time
import tensorflow as tf
import argparse
import os, re
import numpy as np
import h5py
import tensorflow.contrib.layers as layers
from tensorflow.contrib.framework import arg_scope
# model depth can be 50, 101 or 152
MODEL_DEPTH = 50
#DATA_MEAN = [103.939, 116.779, 123.68]
DATA_MEAN = np.load('imagenet_mean.npy')
MODEL_PATH ='/home/kivan/datasets/pretrained/resnet/ResNet'+str(MODEL_DEPTH)+'.npy'
DATA_PATH = '/home/kivan/datasets/imagenet/ILSVRC2015/numpy/val_data.hdf5'
def normalize_input(rgb):
return rgb - DATA_MEAN
def build(image, labels, is_training):
weight_decay = 1e-4
bn_params = {
# Decay for the moving averages.
'decay': 0.9,
'center': True,
'scale': True,
# epsilon to prevent 0s in variance.
'epsilon': 1e-5,
# None to force the updates
'updates_collections': None,
'is_training': is_training,
}
init_func = layers.variance_scaling_initializer(mode='FAN_OUT')
def shortcut(net, num_maps_out, stride):
num_maps_in = net.get_shape().as_list()[-1]
if num_maps_in != num_maps_out:
return layers.convolution2d(net, num_maps_out, kernel_size=1, stride=stride,
activation_fn=None, scope='convshortcut')
return net
def bottleneck(net, num_maps, stride):
net = tf.nn.relu(net)
bottom_net = net
with arg_scope([layers.convolution2d],
padding='SAME', activation_fn=tf.nn.relu,
normalizer_fn=layers.batch_norm, normalizer_params=bn_params,
weights_initializer=init_func,
weights_regularizer=layers.l2_regularizer(weight_decay)):
net = layers.convolution2d(net, num_maps, kernel_size=1, stride=stride, scope='conv1')
net = layers.convolution2d(net, num_maps, kernel_size=3, scope='conv2')
net = layers.convolution2d(net, num_maps * 4, kernel_size=1,
activation_fn=None, scope='conv3')
return net + shortcut(bottom_net, num_maps * 4, stride)
def layer(net, name, num_maps, num_layers, stride):
with tf.variable_scope(name):
for i in range(num_layers):
with tf.variable_scope('block{}'.format(i)):
s = stride if i == 0 else 1
net = bottleneck(net, num_maps, s)
return net
config_map = {
50: [3,4,6,3],
101: [3,4,23,3],
152: [3,8,36,3]
}
config = config_map[MODEL_DEPTH]
image = normalize_input(image)
#image = tf.pad(image, [[0,0],[3,3],[3,3],[0,0]])
#net = layers.convolution2d(image, 64, 7, stride=2, padding='VALID',
net = layers.convolution2d(image, 64, 7, stride=2, padding='SAME',
activation_fn=None, weights_initializer=init_func,
normalizer_fn=layers.batch_norm, normalizer_params=bn_params,
weights_regularizer=layers.l2_regularizer(weight_decay), scope='conv0')
net = layers.max_pool2d(net, 3, stride=2, padding='SAME', scope='pool0')
net = layer(net, 'group0', 64, config[0], 1)
net = layer(net, 'group1', 128, config[1], 2)
net = layer(net, 'group2', 256, config[2], 2)
net = layer(net, 'group3', 512, config[3], 2)
net = tf.nn.relu(net)
in_size = net.get_shape().as_list()[1:3]
net = layers.avg_pool2d(net, kernel_size=in_size, scope='global_avg_pool')
net = layers.flatten(net, scope='flatten')
logits = layers.fully_connected(net, 1000, activation_fn=None, scope='fc1000')
return logits
def name_conversion(caffe_layer_name):
NAME_MAP = {
'bn_conv1/beta': 'conv0/BatchNorm/beta:0',
'bn_conv1/gamma': 'conv0/BatchNorm/gamma:0',
'bn_conv1/mean/EMA': 'conv0/BatchNorm/moving_mean:0',
'bn_conv1/variance/EMA': 'conv0/BatchNorm/moving_variance:0',
'conv1/W': 'conv0/weights:0', 'conv1/b': 'conv0/biases:0',
'fc1000/W': 'fc1000/weights:0', 'fc1000/b': 'fc1000/biases:0'}
if caffe_layer_name in NAME_MAP:
return NAME_MAP[caffe_layer_name]
s = re.search('([a-z]+)([0-9]+)([a-z]+)_', caffe_layer_name)
if s is None:
s = re.search('([a-z]+)([0-9]+)([a-z]+)([0-9]+)_', caffe_layer_name)
layer_block_part1 = s.group(3)
layer_block_part2 = s.group(4)
assert layer_block_part1 in ['a', 'b']
layer_block = 0 if layer_block_part1 == 'a' else int(layer_block_part2)
else:
layer_block = ord(s.group(3)) - ord('a')
layer_type = s.group(1)
layer_group = s.group(2)
layer_branch = int(re.search('_branch([0-9])', caffe_layer_name).group(1))
assert layer_branch in [1, 2]
if layer_branch == 2:
layer_id = re.search('_branch[0-9]([a-z])/', caffe_layer_name).group(1)
layer_id = ord(layer_id) - ord('a') + 1
type_dict = {'res':'conv', 'bn':'BatchNorm'}
name_map = {'/W': '/weights:0', '/b': '/biases:0', '/beta': '/beta:0',
'/gamma': '/gamma:0', '/mean/EMA': '/moving_mean:0',
'/variance/EMA': '/moving_variance:0'}
tf_name = caffe_layer_name[caffe_layer_name.index('/'):]
if tf_name in name_map:
tf_name = name_map[tf_name]
if layer_type == 'res':
layer_type = type_dict[layer_type] + \
(str(layer_id) if layer_branch == 2 else 'shortcut')
elif layer_branch == 2:
layer_type = 'conv' + str(layer_id) + '/' + type_dict[layer_type]
elif layer_branch == 1:
layer_type = 'convshortcut/' + type_dict[layer_type]
tf_name = 'group{}/block{}/{}'.format(int(layer_group) - 2,
layer_block, layer_type) + tf_name
return tf_name
def create_init_op(params):
variables = tf.contrib.framework.get_variables()
init_map = {}
for var in variables:
name = var.name
if name in params:
init_map[var.name] = params[name]
del params[name]
else:
print(var.name, ' --> init not found!')
raise ValueError('Init not found')
print('Unused pretrained params:')
print(list(params.keys()))
init_op, init_feed = tf.contrib.framework.assign_from_values(init_map)
return init_op, init_feed
def evaluate():
params = np.load(MODEL_PATH, encoding='latin1').item()
resnet_param = {}
for k, v in params.items():
newname = name_conversion(k)
resnet_param[newname] = v
img_size = 224
image = tf.placeholder(tf.float32, [None, img_size, img_size, 3], 'input')
labels = tf.placeholder(tf.int32, [None], 'label')
logits = build(image, labels, is_training=False)
all_vars = tf.contrib.framework.get_variables()
for v in all_vars:
print(v.name)
init_op, init_feed = create_init_op(resnet_param)
sess = tf.Session()
sess.run(init_op, feed_dict=init_feed)
batch_size = 100
h5f = h5py.File(DATA_PATH, 'r')
data_x = h5f['data_x'][()]
print(data_x.shape)
data_y = h5f['data_y'][()]
h5f.close()
N = data_x.shape[0]
assert N % batch_size == 0
num_batches = N // batch_size
top5_error = tf.nn.in_top_k(logits, labels, 5)
top5_wrong = 0
cnt_wrong = 0
for i in range(num_batches):
offset = i * batch_size
batch_x = data_x[offset:offset+batch_size, ...]
batch_y = data_y[offset:offset+batch_size, ...]
start_time = time.time()
logits_val, top5 = sess.run([logits, top5_error], feed_dict={image:batch_x, labels:batch_y})
duration = time.time() - start_time
num_examples_per_step = batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
top5_wrong += (top5==0).sum()
yp = logits_val.argmax(1).astype(np.int32)
cnt_wrong += (yp != batch_y).sum()
if i % 10 == 0:
print('[%d / %d] top1error = %.2f - top5error = %.2f (%.1f examples/sec; %.3f sec/batch)' % (i, num_batches,
cnt_wrong / ((i+1)*batch_size) * 100, top5_wrong / ((i+1)*batch_size) * 100,
examples_per_sec, sec_per_batch))
print(cnt_wrong / N)
print(top5_wrong / N)
if __name__ == '__main__':
evaluate()
| en | 0.538911 | # model depth can be 50, 101 or 152 #DATA_MEAN = [103.939, 116.779, 123.68] # Decay for the moving averages. # epsilon to prevent 0s in variance. # None to force the updates #image = tf.pad(image, [[0,0],[3,3],[3,3],[0,0]]) #net = layers.convolution2d(image, 64, 7, stride=2, padding='VALID', | 2.17268 | 2 |
transformers_sandbox/reformer.py | aikindergarten/transformers_sandbox | 1 | 6631584 | <reponame>aikindergarten/transformers_sandbox
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04a_models.reformer.ipynb (unless otherwise specified).
__all__ = ['Chunk', 'ChunkedFeedForward', 'Deterministic', 'ReversibleBlock', 'IrreversibleBlock', 'ReversibleSequence',
'RevSwap', 'RevHalfResidual', 'RevChunk', 'RevMerge', 'ReversibleSequenceV2', 'ReversibleEncoder',
'ReversibleDecoder', 'ReversibleLM', 'ReversibleTransformer', 'ReversibleEncoderV2', 'ReversibleLMV2',
'LSHEncoderBlock', 'LSHEncoder', 'LSHLM', 'ReformerEncoder', 'ReformerLM', 'reformer_lm_splits']
# Cell
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
from functools import wraps
from fastai.basics import *
from .core import *
from .layers import *
from .attention.all import *
from .transformer import LMMixin, EncDecMixin
# Cell
class Chunk(Module):
"Applies fn to input chunked along dim"
def __init__(self, n_chunks:int, fn:Module, dim:int=-1):
store_attr()
def forward(self, x, **kwargs):
if self.n_chunks == 1:
return self.fn(x, **kwargs)
chunks = x.chunk(self.n_chunks, dim = self.dim)
return torch.cat([self.fn(c, **kwargs) for c in chunks], dim = self.dim)
# Cell
class ChunkedFeedForward(Module):
"Applies positionwise feed-forward layer to input chunced along dim"
def __init__(self, d:int, d_ff:int=None, n_chunks:int=1, dropout:float=0., dim:int=-1):
store_attr('n_chunks,dim')
d_ff = default(d_ff, 4*d)
self.net = nn.Sequential(
nn.Linear(d, d_ff),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(d_ff, d),
nn.Dropout(dropout)
)
def forward(self, x, **kwargs):
if self.n_chunks == 1:
return self.net(x)
chunks = x.chunk(self.n_chunks, dim = self.dim)
return torch.cat([self.net(c) for c in chunks], dim = self.dim)
# Cell
class Deterministic(Module):
"""
Wrapper module to ensure determinism for backward pass
following example for saving and setting rng here
https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
"""
def __init__(self, net:Module):
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# Cell
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(Module):
"Applies f and g in reversible manner. Avoids storing outputs for backpropagation"
def __init__(self, f:Module, g:Module, depth=None, send_signal=False):
store_attr('depth, send_signal')
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
if self.send_signal:
f_args['_reverse'] = g_args['_reverse'] = False
f_args['_depth'] = g_args['_depth'] = self.depth
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
if self.send_signal:
f_args['_reverse'] = g_args['_reverse'] = True
f_args['_depth'] = g_args['_depth'] = self.depth
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
# Cell
class IrreversibleBlock(Module):
"Mimics ReversibleBlock computation but gradients are computed as ussual"
def __init__(self, f, g):
store_attr()
def forward(self, x, f_args={}, g_args={}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1 = x1 + self.f(x2, **f_args)
y2 = x2 + self.g(y1, **g_args)
return torch.cat([y1, y2], dim=2)
# Cell
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, kwargs):
ctx.kwargs = kwargs
for block in blocks:
x = block(x, **kwargs)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
kwargs = ctx.kwargs
for block in ctx.blocks[::-1]:
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
# Cell
class ReversibleSequence(Module):
"""
Stack of ReversibleBlocks constructed from blocks.Applies ReversibleBlocks if
sequence length is > rev_thres or else IrreversibleBlocks.
"""
def __init__(self, blocks, rev_thres = 0, send_signal = False):
self.rev_thres = rev_thres # uses revblocks if seq_len else irrev_blocks
self.blocks = nn.ModuleList([ReversibleBlock(f, g, depth, send_signal) for depth, (f, g) in enumerate(blocks)])
self.irrev_blocks = nn.ModuleList([IrreversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, arg_route = (True, True), **kwargs):
reverse = x.shape[1] > self.rev_thres
blocks = self.blocks if reverse else self.irrev_blocks
f_args, g_args = map(lambda route: kwargs if route else {}, arg_route)
block_kwargs = {'f_args': f_args, 'g_args': g_args}
if not reverse:
for block in blocks:
x = block(x, **block_kwargs)
return x
return _ReversibleFunction.apply(x, blocks, block_kwargs)
# Cell
class RevSwap(Module):
def forward(self, x1, x2):
return x2, x1
def backward_pass(self, x1, x2, dx1, dx2, **kwargs):
return x2, x1, dx2, dx1
# Cell
class RevHalfResidual(Module):
"Reversible Half-Residual. Can be used to stack arbitrary number of residual blocks in reversible manner"
def __init__(self, submodule:Module, depth=None, send_signal=False):
store_attr('depth, send_signal')
self.f = Deterministic(submodule)
def forward(self, x1, x2):
return x1 + self.f(x2, record_rng=self.training), x2
def backward_pass(self, y1, y2, dy1, dy2, **kwargs):
x2 = y2
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True)
torch.autograd.backward(fx2, dy1)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
return x1, x2, dy1, dx2
# Cell
# needed for compatibility with `ReversibleEncoder`
class RevChunk(Module):
def forward(self, x):
return x.chunk(2, dim=-1)
def backward_pass(self, x1, x2, dx1, dx2):
return torch.cat([x1,x2], dim=-1), torch.cat([dx1,dx2], dim=-1)
class RevMerge(Module):
def forward(self, x1, x2):
return torch.cat([x1, x2], dim=-1)
def backward_pass(self, y, dy):
y1, y2 = y.chunk(2, dim=-1)
dy1, dy2 = y.chunk(2, dim=-1)
return y1, y2, dy1, dy2
# Cell
class _ReversibleFunctionV2(Function):
@staticmethod
def forward(ctx, x, blocks, kwargs):
ctx.kwargs = kwargs
x1,x2 = blocks[0](x)
for block in blocks[1:-1]:
x1,x2 = block(x1,x2, **kwargs)
x = blocks[-1](x1,x2)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
kwargs = ctx.kwargs
y1, y2, dy1, dy2 = ctx.blocks[-1].backward_pass(y, dy)
for block in ctx.blocks[-2:1:-1]:
y1,y2, dy1,dy2 = block.backward_pass(y1,y2, dy1,dy2, **kwargs)
_, dy = ctx.blocks[0].backward_pass(y1,y2, dy1,dy2)
return dy, None, None
# Cell
class ReversibleSequenceV2(Module):
"""
Stack of ReversibleBlocks constructed from blocks.Applies ReversibleBlocks if
sequence length is > rev_thres or else IrreversibleBlocks.
"""
def __init__(self, blocks, rev_thres = 0, send_signal = False):
self.rev_thres = rev_thres # uses revblocks if seq_len else irrev_blocks
self.blocks = nn.ModuleList(blocks)
# self.irrev_blocks = nn.ModuleList([IrreversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, arg_route = (True, True), **kwargs):
reverse = x.shape[1] > self.rev_thres
blocks = self.blocks if reverse else self.irrev_blocks
f_args, g_args = map(lambda route: kwargs if route else {}, arg_route)
block_kwargs = {'f_args': f_args, 'g_args': g_args}
if not reverse:
for block in blocks:
x = block(x, **block_kwargs)
return x
return _ReversibleFunctionV2.apply(x, blocks, {})
# Cell
class ReversibleEncoder(Module):
"Stack of ReversibleBlocks"
def __init__(self,
d_model:int,
n_layers:int=6,
n_heads:int = 8,
max_seq_len:int = 512,
ff_chunks:int = 1,
causal:bool = False,
attn_dropout:float = 0.,
post_attn_dropout:float = None,
attn_bias:bool=False,
ff_dropout:float = 0.,
d_ff:int = None,
prenorm:bool=True,
final_norm:Module=None,
rev_thres:int=0):
# store_attr()
blocks = []
norm_wrapper = PreNorm if prenorm else PostNorm
for ind in range(n_layers):
attn = Attention(d_model, n_heads=n_heads, causal=causal, dropout=attn_dropout, out_dropout=post_attn_dropout, bias=attn_bias)
ff = ChunkedFeedForward(d_model, d_ff, n_chunks=ff_chunks, dropout=ff_dropout, dim=1)
f = norm_wrapper(d_model, attn)
g = norm_wrapper(d_model, ff)
blocks.append(nn.ModuleList([f, g]))
self.norm = final_norm(d_model) if exists(final_norm) else None
self.layers = ReversibleSequence(nn.ModuleList(blocks), rev_thres=rev_thres, send_signal=False)
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim = -1)
arg_route = (False, False)
# pdb.set_trace()
x = self.layers(x, arg_route = arg_route, **kwargs)
x = torch.stack(x.chunk(2, dim=-1)).mean(dim=0)
if exists(self.norm): x = self.norm(x)
return x
# Cell
class ReversibleDecoder(Module):
"Stack of ReversibleBlocks. Uses AdditiveAttention."
def __init__(self,
d_model,
n_layers = 6,
n_heads = 8,
max_seq_len = 512,
d_head = None,
bucket_size = 64,
n_hashes = 8,
ff_chunks = 1,
attn_chunks = None, # ??
attn_dropout = 0.,
post_attn_dropout = None,
attn_bias:bool=False,
ff_dropout = 0.,
d_ff = None,
prenorm=True,
final_norm:Module=None,
rev_thres = 0,
):
store_attr('d_model,n_layers')
get_attn = lambda: AdditiveAttention(d_model, n_heads=n_heads, causal=True, dropout=attn_dropout, out_dropout=post_attn_dropout, bias=attn_bias)
get_ff = lambda: ChunkedFeedForward(d_model, d_ff, n_chunks=ff_chunks, dropout=ff_dropout, dim=1)
norm_wrapper = PreNorm if prenorm else PostNorm
blocks = []
for ind in range(n_layers):
f = norm_wrapper(d_model, get_attn())
g = norm_wrapper(d_model, get_ff())
blocks.append(nn.ModuleList([f, g]))
self.norm = final_norm(d_model) if exists(final_norm) else None
# send_signal is not implemented for now
self.layers = ReversibleSequence(nn.ModuleList(blocks), rev_thres=rev_thres, send_signal=False)
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim = -1)
arg_route = (True, False)
# pdb.set_trace()
x = self.layers(x, arg_route = arg_route, **kwargs)
x = torch.stack(x.chunk(2, dim=-1)).mean(dim=0)
if exists(self.norm): x = self.norm(x)
return x
# Cell
class ReversibleLM(Module, LMMixin):
"""
Reversible Transformer for language modelling
Parameters:
* vocab_sz: int
* d_model: int - inner dimension of the model
* n_layers: int (default: 6)
* n_heads: int (default: 8)
* d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model
* ff_chunkes: int - number of chunks for FeedForward layer computation
* attn_dropout: float - attention dropout
* ff_dropout: float - feed-forward dropout
* emb_dropout: float - embedding dropout
* causal: bool (default: True) - if True does causal masking automatically
* max_seq_len: int (default: 512)
* tie_weights: bool - if True target embedding weights are used for computation output projection
* prenorm: bool - wether to use PreNorm or PostNorm
* attn_bias: bool - if True projection layers attention modules will have bias
* pad_idx: int - padding token id, required for autogeneration of padding mask
* pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use
* axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of
max_seq_len
* axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model
* rev_thres: int - if (seq_len < rev_thres) applies irreversible blocks
Inputs:
* x - input ids, shape [bs, sl]
* mask - optional boolean mask, shape [bs, sl]
Returns:
* logits - target token logits, shape [bs, sl, vocab_sz]
"""
def __init__(self,
vocab_sz:int,
d_model:int,
n_layers:int=6,
n_heads:int=8,
d_ff:int=None,
ff_chunks:int=1,
attn_dropout:float=0.1,
ff_dropout:float=0.1,
emb_dropout:float=0.1,
tie_weights:bool=True,
causal:bool=True,
pos_enc:str='absolute',
max_seq_len:int=512,
axial_shape=None,
axial_emb_dims=None,
pad_idx:int=None,
prenorm:bool=True,
attn_bias:bool=False,
rev_thres:int=0):
store_attr()
self.emb = TransformerEmbedding(vocab_sz, d_model, max_seq_len, dropout=emb_dropout,
pos_enc=pos_enc, axial_shape=axial_shape,
axial_emb_dims=axial_emb_dims)
self.encoder = ReversibleEncoder(d_model, n_layers, n_heads, causal=causal, d_ff=d_ff,
attn_dropout=attn_dropout, ff_dropout=ff_dropout,
prenorm=prenorm, attn_bias=attn_bias, ff_chunks=ff_chunks,
final_norm=nn.LayerNorm, rev_thres=rev_thres)
self.proj = nn.Linear(d_model, vocab_sz)
if tie_weights: self.proj.weight = self.emb.emb.weight
def forward(self, x, mask=None):
x = self.emb(x)
x = self.encoder(x, mask=mask)
return self.proj(x)
# Cell
#TODO test weight tying
# Note on weight tying: it's done like here in fastai AWD_LSTM model
# Lucidrains does it with custom MatrixMultiply module https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reformer_pytorch.py#L106
#TODO: update docstrings
class ReversibleTransformer(Module):
"""
Basic Transformer Encoder-Decoder model
Parameters:
* enc_vocab_sz: int - source vocab size
* dec_vocab_sz: int - target vocab size
* d_model: int - inner dimension of the model
* n_enc_layers: int (default: 6)
* n_dec_layers: int (default: 6)
* n_heads: int (default: 8)
* d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model
* ff_chunkes: int - number of chunks for FeedForward layer computation
* attn_dropout: float - attention dropout
* ff_dropout: float - feed-forward dropout
* emb_dropout: float - embedding dropout
* max_seq_len: int (default: 512)
* prenorm: bool - whether to use PreNorm or PostNorm
* attn_bias: bool - whether to allow biases in attention projection layers
* pad_idx: int - padding token id, if pad_idx is provided, and no mask/context_mask are
passed to forward method will be used to generate padding masks
* tie_weights: bool - if True target embedding weights are used for computation output projection
* shared_emb: bool - if True encoder and decoder will use shared embedding layer
* pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use
* axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of
max_seq_len
* axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model
Inputs:
* src - source input ids, shape [bs, src_sl]
* tgt - target input ids, shape [bs, tgt_sl]
* src_mask - optional boolean source mask, shape [bs, src_sl]
* tgt_mask - optional boolean target mask, shape [bs, tgt_sl]
Returns:
* logits - target token logits, shape [bs, tgt_sl, tgt_vocab_sz]
"""
def __init__(self,
enc_vocab_sz,
dec_vocab_sz,
d_model,
n_layers:int=6,
n_enc_layers=None,
n_dec_layers=None,
n_heads=8,
d_ff=None,
ff_chunks:int=1,
pad_idx=None,
tie_weights=True,
shared_emb = False,
attn_dropout=0.1,
ff_dropout=0.1,
emb_dropout=0.1,
prenorm=True,
attn_bias=False,
comb_attn=False,
pos_enc='absolute',
max_seq_len=512,
axial_shape=None,
axial_emb_dims=None):
store_attr()
n_enc_layers = ifnone(n_enc_layers, n_layers)
n_dec_layers = ifnone(n_dec_layers, n_layers)
self.enc_emb = TransformerEmbedding(enc_vocab_sz, d_model, max_seq_len, dropout=emb_dropout, pos_enc=pos_enc,
axial_shape=axial_shape, axial_emb_dims=axial_emb_dims)
if shared_emb:
assert (enc_vocab_sz == dec_vocab_sz), "Encoder and decoder vocab size doesn't match"
self.dec_emb = self.enc_emb
else:
self.dec_emb = TransformerEmbedding(dec_vocab_sz, d_model, max_seq_len, dropout=emb_dropout, pos_enc=pos_enc,
axial_shape=axial_shape, axial_emb_dims=axial_emb_dims)
self.encoder = ReversibleEncoder(d_model, n_enc_layers, n_heads, d_ff=d_ff, attn_dropout=attn_dropout,
ff_dropout=ff_dropout, prenorm=prenorm, attn_bias=attn_bias,
final_norm=nn.LayerNorm, causal=False, ff_chunks=ff_chunks)
self.decoder = ReversibleDecoder(d_model, n_dec_layers, n_heads, d_ff=d_ff, attn_dropout=attn_dropout,
ff_dropout=ff_dropout, prenorm=prenorm, attn_bias=attn_bias,
final_norm=nn.LayerNorm, ff_chunks=ff_chunks)
self.proj = nn.Linear(d_model, dec_vocab_sz)
if tie_weights: self.proj.weight = self.dec_emb.emb.weight
def forward(self, src, tgt, src_mask=None, tgt_mask=None):
src_mask = default(src_mask, self.get_padding_mask(src))
tgt_mask = default(tgt_mask, self.get_padding_mask(tgt))
enc = self.encoder(self.enc_emb(src), mask=src_mask)
out = self.decoder(self.dec_emb(tgt), context=enc, mask=tgt_mask, context_mask=src_mask)
return self.proj(out)
def get_padding_mask(self, x):
if self.pad_idx is None: return None
return (x != self.pad_idx)
# Cell
class ReversibleEncoderV2(Module):
"Stack of ReversibleBlocks"
def __init__(self,
d_model:int,
n_layers:int=6,
n_heads:int = 8,
max_seq_len:int = 512,
ff_chunks:int = 1,
causal:bool = False,
attn_dropout:float = 0.,
post_attn_dropout:float = None,
attn_bias:bool=False,
ff_dropout:float = 0.,
d_ff:int = None,
prenorm:bool=True,
final_norm:Module=None,
rev_thres:int=0):
# store_attr()
blocks = [RevChunk()]
norm_wrapper = PreNorm if prenorm else PostNorm
for ind in range(n_layers):
sublayer = (Attention(d_model, n_heads=n_heads, causal=causal, dropout=attn_dropout, out_dropout=post_attn_dropout, bias=attn_bias)
if ind%2==0 else
ChunkedFeedForward(d_model, d_ff, n_chunks=ff_chunks, dropout=ff_dropout, dim=1))
f = norm_wrapper(d_model, sublayer)
blocks += [RevHalfResidual(f), RevSwap()] if i!=(n_layers-1) else [RevHalfResidual(f)]
blocks += [RevMerge()]
self.norm = final_norm(d_model) if exists(final_norm) else None
# send_signal is not implemented for now
self.layers = ReversibleSequenceV2(nn.ModuleList(blocks), rev_thres=rev_thres, send_signal=False)
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim = -1)
arg_route = (False, False)
# pdb.set_trace()
x = self.layers(x, arg_route = arg_route, **kwargs)
x = torch.stack(x.chunk(2, dim=-1)).mean(dim=0)
if exists(self.norm): x = self.norm(x)
return x
# Cell
class ReversibleLMV2(Module, LMMixin):
"""
Reversible Transformer for language modelling
Parameters:
* vocab_sz: int
* d_model: int - inner dimension of the model
* n_layers: int (default: 6)
* n_heads: int (default: 8)
* d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model
* attn_dropout: float - attention dropout
* ff_dropout: float - feed-forward dropout
* emb_dropout: float - embedding dropout
* causal: bool (default: True) - if True does causal masking automatically
* max_seq_len: int (default: 512)
* tie_weights: bool - if True target embedding weights are used for computation output projection
* prenorm: bool - wether to use PreNorm or PostNorm
* attn_bias: bool - if True projection layers attention modules will have bias
* pad_idx: int - padding token id, required for autogeneration of padding mask
* pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use
* axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of
max_seq_len
* axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model
* rev_thres: int - if (seq_len < rev_thres) applies irreversible blocks
Inputs:
* x - input ids, shape [bs, sl]
* mask - optional boolean mask, shape [bs, sl]
Returns:
* logits - target token logits, shape [bs, sl, vocab_sz]
"""
def __init__(self,
vocab_sz:int,
d_model:int,
n_layers:int=6,
n_heads:int=8,
d_ff:int=None,
attn_dropout:float=0.1,
ff_dropout:float=0.1,
emb_dropout:float=0.1,
tie_weights:bool=True,
causal:bool=True,
pos_enc:str='absolute',
max_seq_len:int=512,
axial_shape=None,
axial_emb_dims=None,
pad_idx:int=None,
prenorm:bool=False,
attn_bias:bool=False,
rev_thres:int=0):
store_attr()
self.emb = TransformerEmbedding(vocab_sz, d_model, max_seq_len, dropout=emb_dropout,
pos_enc=pos_enc, axial_shape=axial_shape,
axial_emb_dims=axial_emb_dims)
self.encoder = ReversibleEncoderV2(d_model, n_layers, n_heads, causal=causal, d_ff=d_ff,
attn_dropout=attn_dropout, ff_dropout=ff_dropout,
prenorm=prenorm, attn_bias=attn_bias,
final_norm=nn.LayerNorm, rev_thres=rev_thres)
self.proj = nn.Linear(d_model, vocab_sz)
if tie_weights: self.proj.weight = self.emb.emb.weight
def forward(self, x, mask=None):
x = self.emb(x)
x = self.encoder(x, mask=mask)
return self.proj(x)
# Cell
class LSHEncoderBlock(Module):
"Encoder block using ReformerAttention"
def __init__(self,
d_model:int,
n_heads:int = 8,
d_ff:int = None,
attn_dropout:float = 0.1,
ff_dropout:float = 0.1,
causal:bool = False,
attn_bias:bool = False,
prenorm:bool=False,
use_lsh:bool=True,
n_hashes:int=8,
bucket_size:int=64,
seed:int=None):
store_attr('attn_dropout') # mb separate argument attn_post_dropout
if prenorm:
self.attn = Residual(PreNorm(d_model, ReformerAttention(d_model, n_heads=n_heads, causal=causal,
dropout=attn_dropout, bias=attn_bias, use_lsh=use_lsh,
n_hashes=n_hashes, bucket_size=bucket_size,
seed=seed)))
self.ff = Residual(PreNorm(d_model, FeedForward(d_model, d_ff=d_ff, dropout=ff_dropout)))
else:
self.attn = PostNorm(d_model, Residual(ReformerAttention(d_model, n_heads=n_heads, causal=causal,
dropout=attn_dropout, bias=attn_bias, use_lsh=use_lsh,
n_hashes=n_hashes, bucket_size=bucket_size,
seed=seed)))
self.ff = PostNorm(d_model, Residual(FeedForward(d_model, d_ff=d_ff, dropout=ff_dropout)))
def forward(self, x, mask=None):
out = self.attn(x, mask=mask)
return self.ff(out)
# Cell
class LSHEncoder(Module):
"""Stack of TransformerEncoderBlocks"""
def __init__(self,
d_model,
n_layers=6,
n_heads=8,
d_ff=None,
ff_dropout=0.1,
attn_dropout=0.1,
attn_bias=False,
causal=False,
prenorm=False,
use_lsh:bool=True,
final_norm=None,
n_hashes:int=8,
bucket_size:int=64,
seed:int=None):
store_attr('d_model')
self.layers = nn.ModuleList([])
for _ in range(n_layers):
self.layers.append(LSHEncoderBlock(d_model, n_heads, causal=causal,
d_ff=d_ff, attn_dropout=attn_dropout, ff_dropout=ff_dropout,
prenorm=prenorm, attn_bias=attn_bias, use_lsh=use_lsh,
n_hashes=n_hashes, bucket_size=bucket_size,
seed=seed))
self.norm = None if final_norm is None else final_norm(d_model)
def forward(self, x, mask=None):
for layer in self.layers: x = layer(x, mask=mask)
if self.norm is not None: x = self.norm(x)
return x
# Cell
class LSHLM(Module, LMMixin):
"""
Transformer for language modelling with LSH attention
Parameters:
* vocab_sz: int
* d_model: int - inner dimension of the model
* n_layers: int (default: 6)
* n_heads: int (default: 8)
* d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model
* attn_dropout: float - attention dropout
* ff_dropout: float - feed-forward dropout
* emb_dropout: float - embedding dropout
* causal: bool (default: True) - if True does causal masking automatically
* max_seq_len: int (default: 512)
* tie_weights: bool - if True target embedding weights are used for computation output projection
* prenorm: bool - wether to use PreNorm or PostNorm
* attn_bias: bool - wether to allow biases in attention projection layers
* pad_idx: int - padding token id, required for autogeneration of padding mask
* pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use
* axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of
max_seq_len
* axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model
* use_slh: bool - parameter to switch between LSH and full attention
* n_hashes: int - number of hashing rounds for LSH
* bucket_size: int - input sequence length should be divisible by 2*bucket_size
* seed: int - for LSHAttention module
Inputs:
* x - input ids, shape [bs, sl]
* mask - optional boolean mask, shape [bs, sl]
Returns:
* logits - target token logits, shape [bs, sl, vocab_sz]
"""
def __init__(self,
vocab_sz:int,
d_model:int,
n_layers:int=6,
n_heads:int=8,
d_ff:int=None,
attn_dropout:float=0.1,
ff_dropout:float=0.1,
emb_dropout:float=0.1,
tie_weights:bool=True,
causal:bool=True,
pos_enc:str='absolute',
max_seq_len:int=512,
axial_shape:tuple=None,
axial_emb_dims:tuple=None,
pad_idx:int=None,
prenorm:bool=False,
attn_bias:bool=False,
use_lsh:bool=True,
n_hashes:int=8,
bucket_size:int=64,
seed:int=None):
store_attr()
self._use_lsh = use_lsh
self._n_hashes = n_hashes
self.emb = TransformerEmbedding(vocab_sz, d_model, max_seq_len, dropout=emb_dropout,
pos_enc=pos_enc, axial_shape=axial_shape,
axial_emb_dims=axial_emb_dims)
final_norm = nn.LayerNorm if prenorm else None
self.encoder = LSHEncoder(d_model, n_layers, n_heads, causal=causal, d_ff=d_ff,
attn_dropout=attn_dropout, ff_dropout=ff_dropout,
prenorm=prenorm, attn_bias=attn_bias, use_lsh=use_lsh,
final_norm=final_norm, n_hashes=n_hashes, bucket_size=bucket_size,
seed=seed)
self.proj = nn.Linear(d_model, vocab_sz)
if tie_weights: self.proj.weight = self.emb.emb.weight
def forward(self, x, mask=None):
x = self.emb(x)
x = self.encoder(x, mask=mask)
return self.proj(x)
@property
def use_lsh(self):
return self._use_lsh
@use_lsh.setter
def use_lsh(self, val):
self._use_lsh = val
for c in self.children():
for m in c.modules():
if hasattr(m, 'use_lsh'): m.use_lsh=val
@property
def n_hashes(self):
return self._n_hashes
@n_hashes.setter
def n_hashes(self, val):
self._n_hashes = val
for c in self.children():
for m in c.modules():
if hasattr(m, 'n_hashes'): m.n_hashes=val
# Cell
class ReformerEncoder(Module):
"Stack of ReversibleBlocks"
def __init__(self,
d_model:int,
n_layers:int=6,
n_heads:int = 8,
max_seq_len:int = 512,
ff_chunks:int = 1,
causal:bool = False,
attn_dropout:float = 0.,
post_attn_dropout:float = None,
attn_bias:bool=False,
ff_dropout:float = 0.,
d_ff:int = None,
prenorm:bool=True,
final_norm:Module=None,
rev_thres:int=0,
use_lsh:bool=True,
n_hashes:int=8,
bucket_size:int=64,
seed:int=None):
# store_attr()
blocks = []
norm_wrapper = PreNorm if prenorm else PostNorm
for ind in range(n_layers):
attn = ReformerAttention(d_model, n_heads=n_heads, causal=causal, dropout=attn_dropout,
bias=attn_bias, use_lsh=use_lsh, n_hashes=n_hashes, bucket_size=bucket_size,
seed=seed)
ff = ChunkedFeedForward(d_model, d_ff, n_chunks=ff_chunks, dropout=ff_dropout, dim=1)
f = norm_wrapper(d_model, attn)
g = norm_wrapper(d_model, ff)
blocks.append(nn.ModuleList([f, g]))
self.norm = final_norm(d_model) if exists(final_norm) else None
self.layers = ReversibleSequence(nn.ModuleList(blocks), rev_thres=rev_thres, send_signal=True)
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim = -1)
arg_route = (True, False)
# pdb.set_trace()
x = self.layers(x, arg_route = arg_route, **kwargs)
x = torch.stack(x.chunk(2, dim=-1)).mean(dim=0)
if exists(self.norm): x = self.norm(x)
return x
# Cell
class ReformerLM(Module, LMMixin):
"""
Reformer for language modelling. Uses LSH or full sharedQK attention
Parameters:
* vocab_sz: int
* d_model: int - inner dimension of the model
* n_layers: int (default: 6)
* n_heads: int (default: 8)
* d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model
* ff_chunkes: int - number of chunks for FeedForward layer computation
* attn_dropout: float - attention dropout
* ff_dropout: float - feed-forward dropout
* emb_dropout: float - embedding dropout
* causal: bool (default: True) - if True does causal masking automatically
* max_seq_len: int (default: 512)
* tie_weights: bool - if True target embedding weights are used for computation output projection
* prenorm: bool - wether to use PreNorm or PostNorm
* attn_bias: bool - wether to allow biases in attention projection layers
* pad_idx: int - padding token id, required for autogeneration of padding mask
* pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use
* axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of
max_seq_len
* axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model
* rev_thres: int - if (seq_len < rev_thres) applies irreversible blocks
* use_slh: bool - parameter to switch between LSH and full attention
* n_hashes: int - number of hashing rounds for LSH
* bucket_size: int - input sequence length should be divisible by 2*bucket_size
* seed: int - for LSHAttention module
Inputs:
* x - input ids, shape [bs, sl]
* mask - optional boolean mask, shape [bs, sl]
Returns:
* logits - target token logits, shape [bs, sl, vocab_sz]
"""
def __init__(self,
vocab_sz:int,
d_model:int,
n_layers:int=6,
n_heads:int=8,
d_ff:int=None,
ff_chunks:int=1,
attn_dropout:float=0.1,
ff_dropout:float=0.1,
emb_dropout:float=0.1,
tie_weights:bool=True,
causal:bool=True,
pos_enc:str='axial',
max_seq_len:int=512,
axial_shape:tuple=None,
axial_emb_dims:tuple=None,
pad_idx:int=None,
prenorm:bool=True,
attn_bias:bool=False,
use_lsh:bool=True,
n_hashes:int=8,
bucket_size:int=64,
rev_thres:int=0,
seed:int=None):
store_attr()
self._use_lsh = use_lsh
self._n_hashes = n_hashes
self.emb = TransformerEmbedding(vocab_sz, d_model, max_seq_len, dropout=emb_dropout,
pos_enc=pos_enc, axial_shape=axial_shape,
axial_emb_dims=axial_emb_dims)
final_norm = nn.LayerNorm if prenorm else None
self.encoder = ReformerEncoder(d_model, n_layers, n_heads, causal=causal, d_ff=d_ff,
attn_dropout=attn_dropout, ff_dropout=ff_dropout,
prenorm=prenorm, attn_bias=attn_bias, use_lsh=use_lsh,
final_norm=final_norm, n_hashes=n_hashes, bucket_size=bucket_size,
ff_chunks=ff_chunks, rev_thres=rev_thres, seed=seed)
self.proj = nn.Linear(d_model, vocab_sz)
if tie_weights: self.proj.weight = self.emb.emb.weight
def forward(self, x, mask=None):
x = self.emb(x)
x = self.encoder(x, mask=mask)
return self.proj(x)
@property
def use_lsh(self):
return self._use_lsh
@use_lsh.setter
def use_lsh(self, val):
self._use_lsh = val
for c in self.children():
for m in c.modules():
if hasattr(m, 'use_lsh'): m.use_lsh=val
@property
def n_hashes(self):
return self._n_hashes
@n_hashes.setter
def n_hashes(self, val):
self._n_hashes = val
for c in self.children():
for m in c.modules():
if hasattr(m, 'n_hashes'): m.n_hashes=val
# Cell
def reformer_lm_splits(model):
"Splits ReformerLM `model` into groups for differential learning rates."
groups = L([model.emb] + [l for l in model.encoder.layers.blocks] + [model.proj])
return groups.map(params) | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04a_models.reformer.ipynb (unless otherwise specified).
__all__ = ['Chunk', 'ChunkedFeedForward', 'Deterministic', 'ReversibleBlock', 'IrreversibleBlock', 'ReversibleSequence',
'RevSwap', 'RevHalfResidual', 'RevChunk', 'RevMerge', 'ReversibleSequenceV2', 'ReversibleEncoder',
'ReversibleDecoder', 'ReversibleLM', 'ReversibleTransformer', 'ReversibleEncoderV2', 'ReversibleLMV2',
'LSHEncoderBlock', 'LSHEncoder', 'LSHLM', 'ReformerEncoder', 'ReformerLM', 'reformer_lm_splits']
# Cell
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
from functools import wraps
from fastai.basics import *
from .core import *
from .layers import *
from .attention.all import *
from .transformer import LMMixin, EncDecMixin
# Cell
class Chunk(Module):
"Applies fn to input chunked along dim"
def __init__(self, n_chunks:int, fn:Module, dim:int=-1):
store_attr()
def forward(self, x, **kwargs):
if self.n_chunks == 1:
return self.fn(x, **kwargs)
chunks = x.chunk(self.n_chunks, dim = self.dim)
return torch.cat([self.fn(c, **kwargs) for c in chunks], dim = self.dim)
# Cell
class ChunkedFeedForward(Module):
"Applies positionwise feed-forward layer to input chunced along dim"
def __init__(self, d:int, d_ff:int=None, n_chunks:int=1, dropout:float=0., dim:int=-1):
store_attr('n_chunks,dim')
d_ff = default(d_ff, 4*d)
self.net = nn.Sequential(
nn.Linear(d, d_ff),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(d_ff, d),
nn.Dropout(dropout)
)
def forward(self, x, **kwargs):
if self.n_chunks == 1:
return self.net(x)
chunks = x.chunk(self.n_chunks, dim = self.dim)
return torch.cat([self.net(c) for c in chunks], dim = self.dim)
# Cell
class Deterministic(Module):
"""
Wrapper module to ensure determinism for backward pass
following example for saving and setting rng here
https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
"""
def __init__(self, net:Module):
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# Cell
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(Module):
"Applies f and g in reversible manner. Avoids storing outputs for backpropagation"
def __init__(self, f:Module, g:Module, depth=None, send_signal=False):
store_attr('depth, send_signal')
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
if self.send_signal:
f_args['_reverse'] = g_args['_reverse'] = False
f_args['_depth'] = g_args['_depth'] = self.depth
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
if self.send_signal:
f_args['_reverse'] = g_args['_reverse'] = True
f_args['_depth'] = g_args['_depth'] = self.depth
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
# Cell
class IrreversibleBlock(Module):
"Mimics ReversibleBlock computation but gradients are computed as ussual"
def __init__(self, f, g):
store_attr()
def forward(self, x, f_args={}, g_args={}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1 = x1 + self.f(x2, **f_args)
y2 = x2 + self.g(y1, **g_args)
return torch.cat([y1, y2], dim=2)
# Cell
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, kwargs):
ctx.kwargs = kwargs
for block in blocks:
x = block(x, **kwargs)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
kwargs = ctx.kwargs
for block in ctx.blocks[::-1]:
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
# Cell
class ReversibleSequence(Module):
"""
Stack of ReversibleBlocks constructed from blocks.Applies ReversibleBlocks if
sequence length is > rev_thres or else IrreversibleBlocks.
"""
def __init__(self, blocks, rev_thres = 0, send_signal = False):
self.rev_thres = rev_thres # uses revblocks if seq_len else irrev_blocks
self.blocks = nn.ModuleList([ReversibleBlock(f, g, depth, send_signal) for depth, (f, g) in enumerate(blocks)])
self.irrev_blocks = nn.ModuleList([IrreversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, arg_route = (True, True), **kwargs):
reverse = x.shape[1] > self.rev_thres
blocks = self.blocks if reverse else self.irrev_blocks
f_args, g_args = map(lambda route: kwargs if route else {}, arg_route)
block_kwargs = {'f_args': f_args, 'g_args': g_args}
if not reverse:
for block in blocks:
x = block(x, **block_kwargs)
return x
return _ReversibleFunction.apply(x, blocks, block_kwargs)
# Cell
class RevSwap(Module):
def forward(self, x1, x2):
return x2, x1
def backward_pass(self, x1, x2, dx1, dx2, **kwargs):
return x2, x1, dx2, dx1
# Cell
class RevHalfResidual(Module):
"Reversible Half-Residual. Can be used to stack arbitrary number of residual blocks in reversible manner"
def __init__(self, submodule:Module, depth=None, send_signal=False):
store_attr('depth, send_signal')
self.f = Deterministic(submodule)
def forward(self, x1, x2):
return x1 + self.f(x2, record_rng=self.training), x2
def backward_pass(self, y1, y2, dy1, dy2, **kwargs):
x2 = y2
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True)
torch.autograd.backward(fx2, dy1)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
return x1, x2, dy1, dx2
# Cell
# needed for compatibility with `ReversibleEncoder`
class RevChunk(Module):
def forward(self, x):
return x.chunk(2, dim=-1)
def backward_pass(self, x1, x2, dx1, dx2):
return torch.cat([x1,x2], dim=-1), torch.cat([dx1,dx2], dim=-1)
class RevMerge(Module):
def forward(self, x1, x2):
return torch.cat([x1, x2], dim=-1)
def backward_pass(self, y, dy):
y1, y2 = y.chunk(2, dim=-1)
dy1, dy2 = y.chunk(2, dim=-1)
return y1, y2, dy1, dy2
# Cell
class _ReversibleFunctionV2(Function):
@staticmethod
def forward(ctx, x, blocks, kwargs):
ctx.kwargs = kwargs
x1,x2 = blocks[0](x)
for block in blocks[1:-1]:
x1,x2 = block(x1,x2, **kwargs)
x = blocks[-1](x1,x2)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
kwargs = ctx.kwargs
y1, y2, dy1, dy2 = ctx.blocks[-1].backward_pass(y, dy)
for block in ctx.blocks[-2:1:-1]:
y1,y2, dy1,dy2 = block.backward_pass(y1,y2, dy1,dy2, **kwargs)
_, dy = ctx.blocks[0].backward_pass(y1,y2, dy1,dy2)
return dy, None, None
# Cell
class ReversibleSequenceV2(Module):
"""
Stack of ReversibleBlocks constructed from blocks.Applies ReversibleBlocks if
sequence length is > rev_thres or else IrreversibleBlocks.
"""
def __init__(self, blocks, rev_thres = 0, send_signal = False):
self.rev_thres = rev_thres # uses revblocks if seq_len else irrev_blocks
self.blocks = nn.ModuleList(blocks)
# self.irrev_blocks = nn.ModuleList([IrreversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, arg_route = (True, True), **kwargs):
reverse = x.shape[1] > self.rev_thres
blocks = self.blocks if reverse else self.irrev_blocks
f_args, g_args = map(lambda route: kwargs if route else {}, arg_route)
block_kwargs = {'f_args': f_args, 'g_args': g_args}
if not reverse:
for block in blocks:
x = block(x, **block_kwargs)
return x
return _ReversibleFunctionV2.apply(x, blocks, {})
# Cell
class ReversibleEncoder(Module):
"Stack of ReversibleBlocks"
def __init__(self,
d_model:int,
n_layers:int=6,
n_heads:int = 8,
max_seq_len:int = 512,
ff_chunks:int = 1,
causal:bool = False,
attn_dropout:float = 0.,
post_attn_dropout:float = None,
attn_bias:bool=False,
ff_dropout:float = 0.,
d_ff:int = None,
prenorm:bool=True,
final_norm:Module=None,
rev_thres:int=0):
# store_attr()
blocks = []
norm_wrapper = PreNorm if prenorm else PostNorm
for ind in range(n_layers):
attn = Attention(d_model, n_heads=n_heads, causal=causal, dropout=attn_dropout, out_dropout=post_attn_dropout, bias=attn_bias)
ff = ChunkedFeedForward(d_model, d_ff, n_chunks=ff_chunks, dropout=ff_dropout, dim=1)
f = norm_wrapper(d_model, attn)
g = norm_wrapper(d_model, ff)
blocks.append(nn.ModuleList([f, g]))
self.norm = final_norm(d_model) if exists(final_norm) else None
self.layers = ReversibleSequence(nn.ModuleList(blocks), rev_thres=rev_thres, send_signal=False)
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim = -1)
arg_route = (False, False)
# pdb.set_trace()
x = self.layers(x, arg_route = arg_route, **kwargs)
x = torch.stack(x.chunk(2, dim=-1)).mean(dim=0)
if exists(self.norm): x = self.norm(x)
return x
# Cell
class ReversibleDecoder(Module):
"Stack of ReversibleBlocks. Uses AdditiveAttention."
def __init__(self,
d_model,
n_layers = 6,
n_heads = 8,
max_seq_len = 512,
d_head = None,
bucket_size = 64,
n_hashes = 8,
ff_chunks = 1,
attn_chunks = None, # ??
attn_dropout = 0.,
post_attn_dropout = None,
attn_bias:bool=False,
ff_dropout = 0.,
d_ff = None,
prenorm=True,
final_norm:Module=None,
rev_thres = 0,
):
store_attr('d_model,n_layers')
get_attn = lambda: AdditiveAttention(d_model, n_heads=n_heads, causal=True, dropout=attn_dropout, out_dropout=post_attn_dropout, bias=attn_bias)
get_ff = lambda: ChunkedFeedForward(d_model, d_ff, n_chunks=ff_chunks, dropout=ff_dropout, dim=1)
norm_wrapper = PreNorm if prenorm else PostNorm
blocks = []
for ind in range(n_layers):
f = norm_wrapper(d_model, get_attn())
g = norm_wrapper(d_model, get_ff())
blocks.append(nn.ModuleList([f, g]))
self.norm = final_norm(d_model) if exists(final_norm) else None
# send_signal is not implemented for now
self.layers = ReversibleSequence(nn.ModuleList(blocks), rev_thres=rev_thres, send_signal=False)
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim = -1)
arg_route = (True, False)
# pdb.set_trace()
x = self.layers(x, arg_route = arg_route, **kwargs)
x = torch.stack(x.chunk(2, dim=-1)).mean(dim=0)
if exists(self.norm): x = self.norm(x)
return x
# Cell
class ReversibleLM(Module, LMMixin):
"""
Reversible Transformer for language modelling
Parameters:
* vocab_sz: int
* d_model: int - inner dimension of the model
* n_layers: int (default: 6)
* n_heads: int (default: 8)
* d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model
* ff_chunkes: int - number of chunks for FeedForward layer computation
* attn_dropout: float - attention dropout
* ff_dropout: float - feed-forward dropout
* emb_dropout: float - embedding dropout
* causal: bool (default: True) - if True does causal masking automatically
* max_seq_len: int (default: 512)
* tie_weights: bool - if True target embedding weights are used for computation output projection
* prenorm: bool - wether to use PreNorm or PostNorm
* attn_bias: bool - if True projection layers attention modules will have bias
* pad_idx: int - padding token id, required for autogeneration of padding mask
* pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use
* axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of
max_seq_len
* axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model
* rev_thres: int - if (seq_len < rev_thres) applies irreversible blocks
Inputs:
* x - input ids, shape [bs, sl]
* mask - optional boolean mask, shape [bs, sl]
Returns:
* logits - target token logits, shape [bs, sl, vocab_sz]
"""
def __init__(self,
vocab_sz:int,
d_model:int,
n_layers:int=6,
n_heads:int=8,
d_ff:int=None,
ff_chunks:int=1,
attn_dropout:float=0.1,
ff_dropout:float=0.1,
emb_dropout:float=0.1,
tie_weights:bool=True,
causal:bool=True,
pos_enc:str='absolute',
max_seq_len:int=512,
axial_shape=None,
axial_emb_dims=None,
pad_idx:int=None,
prenorm:bool=True,
attn_bias:bool=False,
rev_thres:int=0):
store_attr()
self.emb = TransformerEmbedding(vocab_sz, d_model, max_seq_len, dropout=emb_dropout,
pos_enc=pos_enc, axial_shape=axial_shape,
axial_emb_dims=axial_emb_dims)
self.encoder = ReversibleEncoder(d_model, n_layers, n_heads, causal=causal, d_ff=d_ff,
attn_dropout=attn_dropout, ff_dropout=ff_dropout,
prenorm=prenorm, attn_bias=attn_bias, ff_chunks=ff_chunks,
final_norm=nn.LayerNorm, rev_thres=rev_thres)
self.proj = nn.Linear(d_model, vocab_sz)
if tie_weights: self.proj.weight = self.emb.emb.weight
def forward(self, x, mask=None):
x = self.emb(x)
x = self.encoder(x, mask=mask)
return self.proj(x)
# Cell
#TODO test weight tying
# Note on weight tying: it's done like here in fastai AWD_LSTM model
# Lucidrains does it with custom MatrixMultiply module https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reformer_pytorch.py#L106
#TODO: update docstrings
class ReversibleTransformer(Module):
"""
Basic Transformer Encoder-Decoder model
Parameters:
* enc_vocab_sz: int - source vocab size
* dec_vocab_sz: int - target vocab size
* d_model: int - inner dimension of the model
* n_enc_layers: int (default: 6)
* n_dec_layers: int (default: 6)
* n_heads: int (default: 8)
* d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model
* ff_chunkes: int - number of chunks for FeedForward layer computation
* attn_dropout: float - attention dropout
* ff_dropout: float - feed-forward dropout
* emb_dropout: float - embedding dropout
* max_seq_len: int (default: 512)
* prenorm: bool - whether to use PreNorm or PostNorm
* attn_bias: bool - whether to allow biases in attention projection layers
* pad_idx: int - padding token id, if pad_idx is provided, and no mask/context_mask are
passed to forward method will be used to generate padding masks
* tie_weights: bool - if True target embedding weights are used for computation output projection
* shared_emb: bool - if True encoder and decoder will use shared embedding layer
* pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use
* axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of
max_seq_len
* axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model
Inputs:
* src - source input ids, shape [bs, src_sl]
* tgt - target input ids, shape [bs, tgt_sl]
* src_mask - optional boolean source mask, shape [bs, src_sl]
* tgt_mask - optional boolean target mask, shape [bs, tgt_sl]
Returns:
* logits - target token logits, shape [bs, tgt_sl, tgt_vocab_sz]
"""
def __init__(self,
enc_vocab_sz,
dec_vocab_sz,
d_model,
n_layers:int=6,
n_enc_layers=None,
n_dec_layers=None,
n_heads=8,
d_ff=None,
ff_chunks:int=1,
pad_idx=None,
tie_weights=True,
shared_emb = False,
attn_dropout=0.1,
ff_dropout=0.1,
emb_dropout=0.1,
prenorm=True,
attn_bias=False,
comb_attn=False,
pos_enc='absolute',
max_seq_len=512,
axial_shape=None,
axial_emb_dims=None):
store_attr()
n_enc_layers = ifnone(n_enc_layers, n_layers)
n_dec_layers = ifnone(n_dec_layers, n_layers)
self.enc_emb = TransformerEmbedding(enc_vocab_sz, d_model, max_seq_len, dropout=emb_dropout, pos_enc=pos_enc,
axial_shape=axial_shape, axial_emb_dims=axial_emb_dims)
if shared_emb:
assert (enc_vocab_sz == dec_vocab_sz), "Encoder and decoder vocab size doesn't match"
self.dec_emb = self.enc_emb
else:
self.dec_emb = TransformerEmbedding(dec_vocab_sz, d_model, max_seq_len, dropout=emb_dropout, pos_enc=pos_enc,
axial_shape=axial_shape, axial_emb_dims=axial_emb_dims)
self.encoder = ReversibleEncoder(d_model, n_enc_layers, n_heads, d_ff=d_ff, attn_dropout=attn_dropout,
ff_dropout=ff_dropout, prenorm=prenorm, attn_bias=attn_bias,
final_norm=nn.LayerNorm, causal=False, ff_chunks=ff_chunks)
self.decoder = ReversibleDecoder(d_model, n_dec_layers, n_heads, d_ff=d_ff, attn_dropout=attn_dropout,
ff_dropout=ff_dropout, prenorm=prenorm, attn_bias=attn_bias,
final_norm=nn.LayerNorm, ff_chunks=ff_chunks)
self.proj = nn.Linear(d_model, dec_vocab_sz)
if tie_weights: self.proj.weight = self.dec_emb.emb.weight
def forward(self, src, tgt, src_mask=None, tgt_mask=None):
src_mask = default(src_mask, self.get_padding_mask(src))
tgt_mask = default(tgt_mask, self.get_padding_mask(tgt))
enc = self.encoder(self.enc_emb(src), mask=src_mask)
out = self.decoder(self.dec_emb(tgt), context=enc, mask=tgt_mask, context_mask=src_mask)
return self.proj(out)
def get_padding_mask(self, x):
if self.pad_idx is None: return None
return (x != self.pad_idx)
# Cell
class ReversibleEncoderV2(Module):
"Stack of ReversibleBlocks"
def __init__(self,
d_model:int,
n_layers:int=6,
n_heads:int = 8,
max_seq_len:int = 512,
ff_chunks:int = 1,
causal:bool = False,
attn_dropout:float = 0.,
post_attn_dropout:float = None,
attn_bias:bool=False,
ff_dropout:float = 0.,
d_ff:int = None,
prenorm:bool=True,
final_norm:Module=None,
rev_thres:int=0):
# store_attr()
blocks = [RevChunk()]
norm_wrapper = PreNorm if prenorm else PostNorm
for ind in range(n_layers):
sublayer = (Attention(d_model, n_heads=n_heads, causal=causal, dropout=attn_dropout, out_dropout=post_attn_dropout, bias=attn_bias)
if ind%2==0 else
ChunkedFeedForward(d_model, d_ff, n_chunks=ff_chunks, dropout=ff_dropout, dim=1))
f = norm_wrapper(d_model, sublayer)
blocks += [RevHalfResidual(f), RevSwap()] if i!=(n_layers-1) else [RevHalfResidual(f)]
blocks += [RevMerge()]
self.norm = final_norm(d_model) if exists(final_norm) else None
# send_signal is not implemented for now
self.layers = ReversibleSequenceV2(nn.ModuleList(blocks), rev_thres=rev_thres, send_signal=False)
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim = -1)
arg_route = (False, False)
# pdb.set_trace()
x = self.layers(x, arg_route = arg_route, **kwargs)
x = torch.stack(x.chunk(2, dim=-1)).mean(dim=0)
if exists(self.norm): x = self.norm(x)
return x
# Cell
class ReversibleLMV2(Module, LMMixin):
"""
Reversible Transformer for language modelling
Parameters:
* vocab_sz: int
* d_model: int - inner dimension of the model
* n_layers: int (default: 6)
* n_heads: int (default: 8)
* d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model
* attn_dropout: float - attention dropout
* ff_dropout: float - feed-forward dropout
* emb_dropout: float - embedding dropout
* causal: bool (default: True) - if True does causal masking automatically
* max_seq_len: int (default: 512)
* tie_weights: bool - if True target embedding weights are used for computation output projection
* prenorm: bool - wether to use PreNorm or PostNorm
* attn_bias: bool - if True projection layers attention modules will have bias
* pad_idx: int - padding token id, required for autogeneration of padding mask
* pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use
* axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of
max_seq_len
* axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model
* rev_thres: int - if (seq_len < rev_thres) applies irreversible blocks
Inputs:
* x - input ids, shape [bs, sl]
* mask - optional boolean mask, shape [bs, sl]
Returns:
* logits - target token logits, shape [bs, sl, vocab_sz]
"""
def __init__(self,
vocab_sz:int,
d_model:int,
n_layers:int=6,
n_heads:int=8,
d_ff:int=None,
attn_dropout:float=0.1,
ff_dropout:float=0.1,
emb_dropout:float=0.1,
tie_weights:bool=True,
causal:bool=True,
pos_enc:str='absolute',
max_seq_len:int=512,
axial_shape=None,
axial_emb_dims=None,
pad_idx:int=None,
prenorm:bool=False,
attn_bias:bool=False,
rev_thres:int=0):
store_attr()
self.emb = TransformerEmbedding(vocab_sz, d_model, max_seq_len, dropout=emb_dropout,
pos_enc=pos_enc, axial_shape=axial_shape,
axial_emb_dims=axial_emb_dims)
self.encoder = ReversibleEncoderV2(d_model, n_layers, n_heads, causal=causal, d_ff=d_ff,
attn_dropout=attn_dropout, ff_dropout=ff_dropout,
prenorm=prenorm, attn_bias=attn_bias,
final_norm=nn.LayerNorm, rev_thres=rev_thres)
self.proj = nn.Linear(d_model, vocab_sz)
if tie_weights: self.proj.weight = self.emb.emb.weight
def forward(self, x, mask=None):
x = self.emb(x)
x = self.encoder(x, mask=mask)
return self.proj(x)
# Cell
class LSHEncoderBlock(Module):
"Encoder block using ReformerAttention"
def __init__(self,
d_model:int,
n_heads:int = 8,
d_ff:int = None,
attn_dropout:float = 0.1,
ff_dropout:float = 0.1,
causal:bool = False,
attn_bias:bool = False,
prenorm:bool=False,
use_lsh:bool=True,
n_hashes:int=8,
bucket_size:int=64,
seed:int=None):
store_attr('attn_dropout') # mb separate argument attn_post_dropout
if prenorm:
self.attn = Residual(PreNorm(d_model, ReformerAttention(d_model, n_heads=n_heads, causal=causal,
dropout=attn_dropout, bias=attn_bias, use_lsh=use_lsh,
n_hashes=n_hashes, bucket_size=bucket_size,
seed=seed)))
self.ff = Residual(PreNorm(d_model, FeedForward(d_model, d_ff=d_ff, dropout=ff_dropout)))
else:
self.attn = PostNorm(d_model, Residual(ReformerAttention(d_model, n_heads=n_heads, causal=causal,
dropout=attn_dropout, bias=attn_bias, use_lsh=use_lsh,
n_hashes=n_hashes, bucket_size=bucket_size,
seed=seed)))
self.ff = PostNorm(d_model, Residual(FeedForward(d_model, d_ff=d_ff, dropout=ff_dropout)))
def forward(self, x, mask=None):
out = self.attn(x, mask=mask)
return self.ff(out)
# Cell
class LSHEncoder(Module):
"""Stack of TransformerEncoderBlocks"""
def __init__(self,
d_model,
n_layers=6,
n_heads=8,
d_ff=None,
ff_dropout=0.1,
attn_dropout=0.1,
attn_bias=False,
causal=False,
prenorm=False,
use_lsh:bool=True,
final_norm=None,
n_hashes:int=8,
bucket_size:int=64,
seed:int=None):
store_attr('d_model')
self.layers = nn.ModuleList([])
for _ in range(n_layers):
self.layers.append(LSHEncoderBlock(d_model, n_heads, causal=causal,
d_ff=d_ff, attn_dropout=attn_dropout, ff_dropout=ff_dropout,
prenorm=prenorm, attn_bias=attn_bias, use_lsh=use_lsh,
n_hashes=n_hashes, bucket_size=bucket_size,
seed=seed))
self.norm = None if final_norm is None else final_norm(d_model)
def forward(self, x, mask=None):
for layer in self.layers: x = layer(x, mask=mask)
if self.norm is not None: x = self.norm(x)
return x
# Cell
class LSHLM(Module, LMMixin):
"""
Transformer for language modelling with LSH attention
Parameters:
* vocab_sz: int
* d_model: int - inner dimension of the model
* n_layers: int (default: 6)
* n_heads: int (default: 8)
* d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model
* attn_dropout: float - attention dropout
* ff_dropout: float - feed-forward dropout
* emb_dropout: float - embedding dropout
* causal: bool (default: True) - if True does causal masking automatically
* max_seq_len: int (default: 512)
* tie_weights: bool - if True target embedding weights are used for computation output projection
* prenorm: bool - wether to use PreNorm or PostNorm
* attn_bias: bool - wether to allow biases in attention projection layers
* pad_idx: int - padding token id, required for autogeneration of padding mask
* pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use
* axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of
max_seq_len
* axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model
* use_slh: bool - parameter to switch between LSH and full attention
* n_hashes: int - number of hashing rounds for LSH
* bucket_size: int - input sequence length should be divisible by 2*bucket_size
* seed: int - for LSHAttention module
Inputs:
* x - input ids, shape [bs, sl]
* mask - optional boolean mask, shape [bs, sl]
Returns:
* logits - target token logits, shape [bs, sl, vocab_sz]
"""
def __init__(self,
vocab_sz:int,
d_model:int,
n_layers:int=6,
n_heads:int=8,
d_ff:int=None,
attn_dropout:float=0.1,
ff_dropout:float=0.1,
emb_dropout:float=0.1,
tie_weights:bool=True,
causal:bool=True,
pos_enc:str='absolute',
max_seq_len:int=512,
axial_shape:tuple=None,
axial_emb_dims:tuple=None,
pad_idx:int=None,
prenorm:bool=False,
attn_bias:bool=False,
use_lsh:bool=True,
n_hashes:int=8,
bucket_size:int=64,
seed:int=None):
store_attr()
self._use_lsh = use_lsh
self._n_hashes = n_hashes
self.emb = TransformerEmbedding(vocab_sz, d_model, max_seq_len, dropout=emb_dropout,
pos_enc=pos_enc, axial_shape=axial_shape,
axial_emb_dims=axial_emb_dims)
final_norm = nn.LayerNorm if prenorm else None
self.encoder = LSHEncoder(d_model, n_layers, n_heads, causal=causal, d_ff=d_ff,
attn_dropout=attn_dropout, ff_dropout=ff_dropout,
prenorm=prenorm, attn_bias=attn_bias, use_lsh=use_lsh,
final_norm=final_norm, n_hashes=n_hashes, bucket_size=bucket_size,
seed=seed)
self.proj = nn.Linear(d_model, vocab_sz)
if tie_weights: self.proj.weight = self.emb.emb.weight
def forward(self, x, mask=None):
x = self.emb(x)
x = self.encoder(x, mask=mask)
return self.proj(x)
@property
def use_lsh(self):
return self._use_lsh
@use_lsh.setter
def use_lsh(self, val):
self._use_lsh = val
for c in self.children():
for m in c.modules():
if hasattr(m, 'use_lsh'): m.use_lsh=val
@property
def n_hashes(self):
return self._n_hashes
@n_hashes.setter
def n_hashes(self, val):
self._n_hashes = val
for c in self.children():
for m in c.modules():
if hasattr(m, 'n_hashes'): m.n_hashes=val
# Cell
class ReformerEncoder(Module):
"Stack of ReversibleBlocks"
def __init__(self,
d_model:int,
n_layers:int=6,
n_heads:int = 8,
max_seq_len:int = 512,
ff_chunks:int = 1,
causal:bool = False,
attn_dropout:float = 0.,
post_attn_dropout:float = None,
attn_bias:bool=False,
ff_dropout:float = 0.,
d_ff:int = None,
prenorm:bool=True,
final_norm:Module=None,
rev_thres:int=0,
use_lsh:bool=True,
n_hashes:int=8,
bucket_size:int=64,
seed:int=None):
# store_attr()
blocks = []
norm_wrapper = PreNorm if prenorm else PostNorm
for ind in range(n_layers):
attn = ReformerAttention(d_model, n_heads=n_heads, causal=causal, dropout=attn_dropout,
bias=attn_bias, use_lsh=use_lsh, n_hashes=n_hashes, bucket_size=bucket_size,
seed=seed)
ff = ChunkedFeedForward(d_model, d_ff, n_chunks=ff_chunks, dropout=ff_dropout, dim=1)
f = norm_wrapper(d_model, attn)
g = norm_wrapper(d_model, ff)
blocks.append(nn.ModuleList([f, g]))
self.norm = final_norm(d_model) if exists(final_norm) else None
self.layers = ReversibleSequence(nn.ModuleList(blocks), rev_thres=rev_thres, send_signal=True)
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim = -1)
arg_route = (True, False)
# pdb.set_trace()
x = self.layers(x, arg_route = arg_route, **kwargs)
x = torch.stack(x.chunk(2, dim=-1)).mean(dim=0)
if exists(self.norm): x = self.norm(x)
return x
# Cell
class ReformerLM(Module, LMMixin):
"""
Reformer for language modelling. Uses LSH or full sharedQK attention
Parameters:
* vocab_sz: int
* d_model: int - inner dimension of the model
* n_layers: int (default: 6)
* n_heads: int (default: 8)
* d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model
* ff_chunkes: int - number of chunks for FeedForward layer computation
* attn_dropout: float - attention dropout
* ff_dropout: float - feed-forward dropout
* emb_dropout: float - embedding dropout
* causal: bool (default: True) - if True does causal masking automatically
* max_seq_len: int (default: 512)
* tie_weights: bool - if True target embedding weights are used for computation output projection
* prenorm: bool - wether to use PreNorm or PostNorm
* attn_bias: bool - wether to allow biases in attention projection layers
* pad_idx: int - padding token id, required for autogeneration of padding mask
* pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use
* axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of
max_seq_len
* axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model
* rev_thres: int - if (seq_len < rev_thres) applies irreversible blocks
* use_slh: bool - parameter to switch between LSH and full attention
* n_hashes: int - number of hashing rounds for LSH
* bucket_size: int - input sequence length should be divisible by 2*bucket_size
* seed: int - for LSHAttention module
Inputs:
* x - input ids, shape [bs, sl]
* mask - optional boolean mask, shape [bs, sl]
Returns:
* logits - target token logits, shape [bs, sl, vocab_sz]
"""
def __init__(self,
vocab_sz:int,
d_model:int,
n_layers:int=6,
n_heads:int=8,
d_ff:int=None,
ff_chunks:int=1,
attn_dropout:float=0.1,
ff_dropout:float=0.1,
emb_dropout:float=0.1,
tie_weights:bool=True,
causal:bool=True,
pos_enc:str='axial',
max_seq_len:int=512,
axial_shape:tuple=None,
axial_emb_dims:tuple=None,
pad_idx:int=None,
prenorm:bool=True,
attn_bias:bool=False,
use_lsh:bool=True,
n_hashes:int=8,
bucket_size:int=64,
rev_thres:int=0,
seed:int=None):
store_attr()
self._use_lsh = use_lsh
self._n_hashes = n_hashes
self.emb = TransformerEmbedding(vocab_sz, d_model, max_seq_len, dropout=emb_dropout,
pos_enc=pos_enc, axial_shape=axial_shape,
axial_emb_dims=axial_emb_dims)
final_norm = nn.LayerNorm if prenorm else None
self.encoder = ReformerEncoder(d_model, n_layers, n_heads, causal=causal, d_ff=d_ff,
attn_dropout=attn_dropout, ff_dropout=ff_dropout,
prenorm=prenorm, attn_bias=attn_bias, use_lsh=use_lsh,
final_norm=final_norm, n_hashes=n_hashes, bucket_size=bucket_size,
ff_chunks=ff_chunks, rev_thres=rev_thres, seed=seed)
self.proj = nn.Linear(d_model, vocab_sz)
if tie_weights: self.proj.weight = self.emb.emb.weight
def forward(self, x, mask=None):
x = self.emb(x)
x = self.encoder(x, mask=mask)
return self.proj(x)
@property
def use_lsh(self):
return self._use_lsh
@use_lsh.setter
def use_lsh(self, val):
self._use_lsh = val
for c in self.children():
for m in c.modules():
if hasattr(m, 'use_lsh'): m.use_lsh=val
@property
def n_hashes(self):
return self._n_hashes
@n_hashes.setter
def n_hashes(self, val):
self._n_hashes = val
for c in self.children():
for m in c.modules():
if hasattr(m, 'n_hashes'): m.n_hashes=val
# Cell
def reformer_lm_splits(model):
"Splits ReformerLM `model` into groups for differential learning rates."
groups = L([model.emb] + [l for l in model.encoder.layers.blocks] + [model.proj])
return groups.map(params) | en | 0.526001 | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04a_models.reformer.ipynb (unless otherwise specified). # Cell # Cell # Cell # Cell Wrapper module to ensure determinism for backward pass following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html # Cell # heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py # once multi-GPU is confirmed working, refactor and send PR back to source # Cell # Cell # Cell Stack of ReversibleBlocks constructed from blocks.Applies ReversibleBlocks if sequence length is > rev_thres or else IrreversibleBlocks. # uses revblocks if seq_len else irrev_blocks # Cell # Cell # Cell # needed for compatibility with `ReversibleEncoder` # Cell # Cell Stack of ReversibleBlocks constructed from blocks.Applies ReversibleBlocks if sequence length is > rev_thres or else IrreversibleBlocks. # uses revblocks if seq_len else irrev_blocks # self.irrev_blocks = nn.ModuleList([IrreversibleBlock(f=f, g=g) for f, g in blocks]) # Cell # store_attr() # pdb.set_trace() # Cell # ?? # send_signal is not implemented for now # pdb.set_trace() # Cell Reversible Transformer for language modelling Parameters: * vocab_sz: int * d_model: int - inner dimension of the model * n_layers: int (default: 6) * n_heads: int (default: 8) * d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model * ff_chunkes: int - number of chunks for FeedForward layer computation * attn_dropout: float - attention dropout * ff_dropout: float - feed-forward dropout * emb_dropout: float - embedding dropout * causal: bool (default: True) - if True does causal masking automatically * max_seq_len: int (default: 512) * tie_weights: bool - if True target embedding weights are used for computation output projection * prenorm: bool - wether to use PreNorm or PostNorm * attn_bias: bool - if True projection layers attention modules will have bias * pad_idx: int - padding token id, required for autogeneration of padding mask * pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use * axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of max_seq_len * axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model * rev_thres: int - if (seq_len < rev_thres) applies irreversible blocks Inputs: * x - input ids, shape [bs, sl] * mask - optional boolean mask, shape [bs, sl] Returns: * logits - target token logits, shape [bs, sl, vocab_sz] # Cell #TODO test weight tying # Note on weight tying: it's done like here in fastai AWD_LSTM model # Lucidrains does it with custom MatrixMultiply module https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reformer_pytorch.py#L106 #TODO: update docstrings Basic Transformer Encoder-Decoder model Parameters: * enc_vocab_sz: int - source vocab size * dec_vocab_sz: int - target vocab size * d_model: int - inner dimension of the model * n_enc_layers: int (default: 6) * n_dec_layers: int (default: 6) * n_heads: int (default: 8) * d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model * ff_chunkes: int - number of chunks for FeedForward layer computation * attn_dropout: float - attention dropout * ff_dropout: float - feed-forward dropout * emb_dropout: float - embedding dropout * max_seq_len: int (default: 512) * prenorm: bool - whether to use PreNorm or PostNorm * attn_bias: bool - whether to allow biases in attention projection layers * pad_idx: int - padding token id, if pad_idx is provided, and no mask/context_mask are passed to forward method will be used to generate padding masks * tie_weights: bool - if True target embedding weights are used for computation output projection * shared_emb: bool - if True encoder and decoder will use shared embedding layer * pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use * axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of max_seq_len * axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model Inputs: * src - source input ids, shape [bs, src_sl] * tgt - target input ids, shape [bs, tgt_sl] * src_mask - optional boolean source mask, shape [bs, src_sl] * tgt_mask - optional boolean target mask, shape [bs, tgt_sl] Returns: * logits - target token logits, shape [bs, tgt_sl, tgt_vocab_sz] # Cell # store_attr() # send_signal is not implemented for now # pdb.set_trace() # Cell Reversible Transformer for language modelling Parameters: * vocab_sz: int * d_model: int - inner dimension of the model * n_layers: int (default: 6) * n_heads: int (default: 8) * d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model * attn_dropout: float - attention dropout * ff_dropout: float - feed-forward dropout * emb_dropout: float - embedding dropout * causal: bool (default: True) - if True does causal masking automatically * max_seq_len: int (default: 512) * tie_weights: bool - if True target embedding weights are used for computation output projection * prenorm: bool - wether to use PreNorm or PostNorm * attn_bias: bool - if True projection layers attention modules will have bias * pad_idx: int - padding token id, required for autogeneration of padding mask * pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use * axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of max_seq_len * axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model * rev_thres: int - if (seq_len < rev_thres) applies irreversible blocks Inputs: * x - input ids, shape [bs, sl] * mask - optional boolean mask, shape [bs, sl] Returns: * logits - target token logits, shape [bs, sl, vocab_sz] # Cell # mb separate argument attn_post_dropout # Cell Stack of TransformerEncoderBlocks # Cell Transformer for language modelling with LSH attention Parameters: * vocab_sz: int * d_model: int - inner dimension of the model * n_layers: int (default: 6) * n_heads: int (default: 8) * d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model * attn_dropout: float - attention dropout * ff_dropout: float - feed-forward dropout * emb_dropout: float - embedding dropout * causal: bool (default: True) - if True does causal masking automatically * max_seq_len: int (default: 512) * tie_weights: bool - if True target embedding weights are used for computation output projection * prenorm: bool - wether to use PreNorm or PostNorm * attn_bias: bool - wether to allow biases in attention projection layers * pad_idx: int - padding token id, required for autogeneration of padding mask * pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use * axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of max_seq_len * axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model * use_slh: bool - parameter to switch between LSH and full attention * n_hashes: int - number of hashing rounds for LSH * bucket_size: int - input sequence length should be divisible by 2*bucket_size * seed: int - for LSHAttention module Inputs: * x - input ids, shape [bs, sl] * mask - optional boolean mask, shape [bs, sl] Returns: * logits - target token logits, shape [bs, sl, vocab_sz] # Cell # store_attr() # pdb.set_trace() # Cell Reformer for language modelling. Uses LSH or full sharedQK attention Parameters: * vocab_sz: int * d_model: int - inner dimension of the model * n_layers: int (default: 6) * n_heads: int (default: 8) * d_ff: int - inner dimension of the pointwise FeedForward net, if None defaults to 4*d_model * ff_chunkes: int - number of chunks for FeedForward layer computation * attn_dropout: float - attention dropout * ff_dropout: float - feed-forward dropout * emb_dropout: float - embedding dropout * causal: bool (default: True) - if True does causal masking automatically * max_seq_len: int (default: 512) * tie_weights: bool - if True target embedding weights are used for computation output projection * prenorm: bool - wether to use PreNorm or PostNorm * attn_bias: bool - wether to allow biases in attention projection layers * pad_idx: int - padding token id, required for autogeneration of padding mask * pos_enc: str from {'absolute', 'fixed', 'axial'} - type of positional encoding to use * axial_shape: tuple - required if 'axial' positional encoding are used, should be factors of max_seq_len * axial_emb_dims: tuple - [optional] axial embedding components, should sum to d_model * rev_thres: int - if (seq_len < rev_thres) applies irreversible blocks * use_slh: bool - parameter to switch between LSH and full attention * n_hashes: int - number of hashing rounds for LSH * bucket_size: int - input sequence length should be divisible by 2*bucket_size * seed: int - for LSHAttention module Inputs: * x - input ids, shape [bs, sl] * mask - optional boolean mask, shape [bs, sl] Returns: * logits - target token logits, shape [bs, sl, vocab_sz] # Cell | 2.214108 | 2 |
pygame_demo4/playmp3test.py | chivitc1/pygamedemo | 0 | 6631585 | import pygame
MUSIC_PATH = "/home/chinv/Music/rap_viet/ViDoLaEm-OsadShinHyunWoo.mp3"
pygame.mixer.pre_init(44100, 16, 2, 4096)
pygame.init()
pygame.display.set_mode((200,100))
pygame.mixer.music.load(MUSIC_PATH)
pygame.mixer.music.play()
pygame.time.wait(5000)
pygame.mixer.music.load("/home/chinv/Music/rap_viet/HaiTrieuNam-DenBien.mp3")
pygame.mixer.music.play()
pygame.time.wait(2000)
clock = pygame.time.Clock()
clock.tick(10)
while pygame.mixer.music.get_busy():
# pygame.event.poll()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
clock.tick(10) | import pygame
MUSIC_PATH = "/home/chinv/Music/rap_viet/ViDoLaEm-OsadShinHyunWoo.mp3"
pygame.mixer.pre_init(44100, 16, 2, 4096)
pygame.init()
pygame.display.set_mode((200,100))
pygame.mixer.music.load(MUSIC_PATH)
pygame.mixer.music.play()
pygame.time.wait(5000)
pygame.mixer.music.load("/home/chinv/Music/rap_viet/HaiTrieuNam-DenBien.mp3")
pygame.mixer.music.play()
pygame.time.wait(2000)
clock = pygame.time.Clock()
clock.tick(10)
while pygame.mixer.music.get_busy():
# pygame.event.poll()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
clock.tick(10) | de | 0.176934 | # pygame.event.poll() | 2.764001 | 3 |
flask/app/forms.py | pnnl/Temwizard | 2 | 6631586 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, FloatField, IntegerField
from wtforms.validators import DataRequired
class FirstSublattice(FlaskForm):
scale_image = FloatField("2. Scale Image", default=1)
pixels_nanometer = FloatField("3. Pixels per nanometer", default=1) #90 #218.365
max_dist = FloatField("4. Half the distance between the most intense atoms", default=45) #.19 #.24
plane_first_sublattice = IntegerField("5. The second sublattice is on the line between which zone", default=2, description = "Indicate with the plane order (first is 0)")
plane_second_sublattice = IntegerField("6. The third sublattice is on the line between which zone", default=2, description = "Indicate with the plane order (first is 0)")
submit = SubmitField('Rerun Atomap on Current Page')
#max_dist image scale_image pixels_nanometer
class ViewImageForm(FlaskForm):
scale_image = FloatField("Scale Image", default=1, id = "scale_image")
pixels_nanometer = IntegerField("Pixels per nanometer", default=1, id = "pixels_nanometer")
max_dist = FloatField("Approximately half the distance between the most intense atoms in the structure (used to calculate atom position) in nanometers", default=30, id = "max_dist")
| from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, FloatField, IntegerField
from wtforms.validators import DataRequired
class FirstSublattice(FlaskForm):
scale_image = FloatField("2. Scale Image", default=1)
pixels_nanometer = FloatField("3. Pixels per nanometer", default=1) #90 #218.365
max_dist = FloatField("4. Half the distance between the most intense atoms", default=45) #.19 #.24
plane_first_sublattice = IntegerField("5. The second sublattice is on the line between which zone", default=2, description = "Indicate with the plane order (first is 0)")
plane_second_sublattice = IntegerField("6. The third sublattice is on the line between which zone", default=2, description = "Indicate with the plane order (first is 0)")
submit = SubmitField('Rerun Atomap on Current Page')
#max_dist image scale_image pixels_nanometer
class ViewImageForm(FlaskForm):
scale_image = FloatField("Scale Image", default=1, id = "scale_image")
pixels_nanometer = IntegerField("Pixels per nanometer", default=1, id = "pixels_nanometer")
max_dist = FloatField("Approximately half the distance between the most intense atoms in the structure (used to calculate atom position) in nanometers", default=30, id = "max_dist")
| en | 0.29666 | #90 #218.365 #.19 #.24 #max_dist image scale_image pixels_nanometer | 2.604084 | 3 |
core/struct/mc.py | iceman121/pathforger | 0 | 6631587 | <reponame>iceman121/pathforger
import pickle
class MainCharacter:
def __init__(self):
"""
Class to hold character information
"""
# Main character id
self.name = None
self.p1 = None
self.p1_is = None
self.p2 = None
self.p3 = None
def new_game(self, name, p1, p1_is, p2, p3):
"""
Start new game
:param name: Toon name
:param p1: Subject pronoun
:param p1_is: Subject pronoun verb
:param p2: Possessive pronoun
:param p3: Object pronoun
:return:
"""
self.name = name
self.p1 = p1
self.p1_is = p1_is
self.p2 = p2
self.p3 = p3
def load_game(self, choice):
"""
Load game from file
:param choice: name of selected toon
:return:
"""
with open(f'assets/character/{choice}', 'rb') as f:
self.__dict__ = pickle.load(f)
f.close()
def save(self):
"""
Save game to file
:return:
"""
with open(f'assets/character/toon_{self.name}.pickle', 'wb') as f:
pickle.dump(self.__dict__, f)
f.close()
| import pickle
class MainCharacter:
def __init__(self):
"""
Class to hold character information
"""
# Main character id
self.name = None
self.p1 = None
self.p1_is = None
self.p2 = None
self.p3 = None
def new_game(self, name, p1, p1_is, p2, p3):
"""
Start new game
:param name: Toon name
:param p1: Subject pronoun
:param p1_is: Subject pronoun verb
:param p2: Possessive pronoun
:param p3: Object pronoun
:return:
"""
self.name = name
self.p1 = p1
self.p1_is = p1_is
self.p2 = p2
self.p3 = p3
def load_game(self, choice):
"""
Load game from file
:param choice: name of selected toon
:return:
"""
with open(f'assets/character/{choice}', 'rb') as f:
self.__dict__ = pickle.load(f)
f.close()
def save(self):
"""
Save game to file
:return:
"""
with open(f'assets/character/toon_{self.name}.pickle', 'wb') as f:
pickle.dump(self.__dict__, f)
f.close() | en | 0.754987 | Class to hold character information # Main character id Start new game :param name: Toon name :param p1: Subject pronoun :param p1_is: Subject pronoun verb :param p2: Possessive pronoun :param p3: Object pronoun :return: Load game from file :param choice: name of selected toon :return: Save game to file :return: | 3.381972 | 3 |
middleware/legato/templates/legato_gfx_pda_5000/Support_BSP_PIC32MZ_EF_Curiosity.py | rbryson74/gfx | 0 | 6631588 | # coding: utf-8
##############################################################################
# Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
#
# Subject to your compliance with these terms, you may use Microchip software
# and any derivatives exclusively with Microchip products. It is your
# responsibility to comply with third party license terms applicable to your
# use of third party software (including open source software) that may
# accompany Microchip software.
#
# THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
# EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
# WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
# PARTICULAR PURPOSE.
#
# IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
# INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
# WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
# BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
# FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
# ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
# THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
##############################################################################
############ SSD1963 + TOUCH I2C CONFIG ######################################################
bsp_pic32mzef_cu_ActivateList_SSD = ["le_gfx_driver_ssd1963", "le_gfx_intf_parallel_ebi", "i2c2", "drv_i2c", "drv_i2c0", "core_timer", "sys_time", "ebi"]
bsp_pic32mzef_cu_AutoConnectList_SSD = [["gfx_legato", "gfx_driver", "le_gfx_driver_ssd1963", "gfx_driver_ssd1963"],
["le_gfx_driver_ssd1963", "Graphics Display", "gfx_disp_pdatm5000_800x480", "gfx_display"],
["drv_i2c_0", "drv_i2c_I2C_dependency", "i2c2", "I2C2_I2C"],
["gfx_maxtouch_controller", "i2c", "drv_i2c_0", "drv_i2c"],
["le_gfx_driver_ssd1963", "Display Interface", "le_gfx_intf_parallel_ebi", "le_gfx_intf_parallel_ebi"],
["le_gfx_intf_parallel_ebi", "EBI_CS", "ebi", "ebi_cs0"],
["sys_time", "sys_time_TMR_dependency", "core_timer", "CORE_TIMER_TMR"]]
bsp_pic32mzef_cu_PinConfig_SSD = [{"pin": 104, "name": "BSP_MAXTOUCH_CHG", "type": "GPIO", "direction": "In", "latch": "", "abcd": ""}, #RD0
{"pin": 53, "name": "GFX_DISP_INTF_PIN_RD", "type": "GPIO", "direction": "Out", "latch": "High", "abcd": ""}, #RK3
{"pin": 51, "name": "GFX_DISP_INTF_PIN_RSDC", "type": "GPIO", "direction": "Out", "latch": "High", "abcd": ""}, #RK1
{"pin": 52, "name": "GFX_DISP_INTF_PIN_CS", "type": "GPIO", "direction": "Out", "latch": "High", "abcd": ""}, #RK2
{"pin": 29, "name": "GFX_DISP_INTF_PIN_RESET", "type": "GPIO", "direction": "Out", "latch": "Low", "abcd": ""}, #RJ14
{"pin": 12, "name": "GFX_DISP_INTF_PIN_WR", "type": "GPIO", "direction": "Out", "latch": "High", "abcd": ""}] #RC3
##########################################################################################
def bsp_pic32mzef_cu_EventHandler(event):
global pinConfigureFxn
if (event == "configure"):
#Override default pin configur function w/ PIC32M specific one
pinConfigureFxn = configurePinsPIC32M
try:
### Slow down I2C2 to 10kHz
Database.setSymbolValue("i2c2", "I2C_CLOCK_SPEED", 10000L, 1)
except:
return
bsp_pic32mzef_cu_DisplayInterfaceList = ["SSD1963"]
bsp_pic32mzef_cu_obj_SSD = bspSupportObj(bsp_pic32mzef_cu_PinConfig_SSD,
bsp_pic32mzef_cu_ActivateList_SSD,
None,
bsp_pic32mzef_cu_AutoConnectList_SSD,
bsp_pic32mzef_cu_EventHandler)
addDisplayIntfSupport("BSP_PIC32MZ_EF_Curiosity_2.0", bsp_pic32mzef_cu_DisplayInterfaceList)
addBSPSupport("BSP_PIC32MZ_EF_Curiosity_2.0", "SSD1963", bsp_pic32mzef_cu_obj_SSD)
| # coding: utf-8
##############################################################################
# Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
#
# Subject to your compliance with these terms, you may use Microchip software
# and any derivatives exclusively with Microchip products. It is your
# responsibility to comply with third party license terms applicable to your
# use of third party software (including open source software) that may
# accompany Microchip software.
#
# THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
# EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
# WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
# PARTICULAR PURPOSE.
#
# IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
# INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
# WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
# BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
# FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
# ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
# THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
##############################################################################
############ SSD1963 + TOUCH I2C CONFIG ######################################################
bsp_pic32mzef_cu_ActivateList_SSD = ["le_gfx_driver_ssd1963", "le_gfx_intf_parallel_ebi", "i2c2", "drv_i2c", "drv_i2c0", "core_timer", "sys_time", "ebi"]
bsp_pic32mzef_cu_AutoConnectList_SSD = [["gfx_legato", "gfx_driver", "le_gfx_driver_ssd1963", "gfx_driver_ssd1963"],
["le_gfx_driver_ssd1963", "Graphics Display", "gfx_disp_pdatm5000_800x480", "gfx_display"],
["drv_i2c_0", "drv_i2c_I2C_dependency", "i2c2", "I2C2_I2C"],
["gfx_maxtouch_controller", "i2c", "drv_i2c_0", "drv_i2c"],
["le_gfx_driver_ssd1963", "Display Interface", "le_gfx_intf_parallel_ebi", "le_gfx_intf_parallel_ebi"],
["le_gfx_intf_parallel_ebi", "EBI_CS", "ebi", "ebi_cs0"],
["sys_time", "sys_time_TMR_dependency", "core_timer", "CORE_TIMER_TMR"]]
bsp_pic32mzef_cu_PinConfig_SSD = [{"pin": 104, "name": "BSP_MAXTOUCH_CHG", "type": "GPIO", "direction": "In", "latch": "", "abcd": ""}, #RD0
{"pin": 53, "name": "GFX_DISP_INTF_PIN_RD", "type": "GPIO", "direction": "Out", "latch": "High", "abcd": ""}, #RK3
{"pin": 51, "name": "GFX_DISP_INTF_PIN_RSDC", "type": "GPIO", "direction": "Out", "latch": "High", "abcd": ""}, #RK1
{"pin": 52, "name": "GFX_DISP_INTF_PIN_CS", "type": "GPIO", "direction": "Out", "latch": "High", "abcd": ""}, #RK2
{"pin": 29, "name": "GFX_DISP_INTF_PIN_RESET", "type": "GPIO", "direction": "Out", "latch": "Low", "abcd": ""}, #RJ14
{"pin": 12, "name": "GFX_DISP_INTF_PIN_WR", "type": "GPIO", "direction": "Out", "latch": "High", "abcd": ""}] #RC3
##########################################################################################
def bsp_pic32mzef_cu_EventHandler(event):
global pinConfigureFxn
if (event == "configure"):
#Override default pin configur function w/ PIC32M specific one
pinConfigureFxn = configurePinsPIC32M
try:
### Slow down I2C2 to 10kHz
Database.setSymbolValue("i2c2", "I2C_CLOCK_SPEED", 10000L, 1)
except:
return
bsp_pic32mzef_cu_DisplayInterfaceList = ["SSD1963"]
bsp_pic32mzef_cu_obj_SSD = bspSupportObj(bsp_pic32mzef_cu_PinConfig_SSD,
bsp_pic32mzef_cu_ActivateList_SSD,
None,
bsp_pic32mzef_cu_AutoConnectList_SSD,
bsp_pic32mzef_cu_EventHandler)
addDisplayIntfSupport("BSP_PIC32MZ_EF_Curiosity_2.0", bsp_pic32mzef_cu_DisplayInterfaceList)
addBSPSupport("BSP_PIC32MZ_EF_Curiosity_2.0", "SSD1963", bsp_pic32mzef_cu_obj_SSD)
| en | 0.433803 | # coding: utf-8 ############################################################################## # Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries. # # Subject to your compliance with these terms, you may use Microchip software # and any derivatives exclusively with Microchip products. It is your # responsibility to comply with third party license terms applicable to your # use of third party software (including open source software) that may # accompany Microchip software. # # THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER # EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED # WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A # PARTICULAR PURPOSE. # # IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE, # INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND # WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS # BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE # FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN # ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY, # THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE. ############################################################################## ############ SSD1963 + TOUCH I2C CONFIG ###################################################### #RD0 #RK3 #RK1 #RK2 #RJ14 #RC3 ########################################################################################## #Override default pin configur function w/ PIC32M specific one ### Slow down I2C2 to 10kHz | 1.081395 | 1 |
sgtpy/gammamie_pure/g2mca_chain.py | MatKie/SGTPy | 12 | 6631589 | from __future__ import division, print_function, absolute_import
import numpy as np
# Equation (62) Paper 2014
def g2mca(xhi00, khs, xs_m, da2new, suma_g2, eps_ii, a1vdw_cteii):
g2 = 3 * da2new / eps_ii
g2 -= khs * suma_g2 / xhi00
g2 /= xs_m
g2 /= - a1vdw_cteii
return g2
def dg2mca_dxhi00(xhi00, khs, dkhs, xs_m, d2a2new, dsuma_g2, eps_ii,
a1vdw_cteii):
sum1, dsum1 = dsuma_g2
g2 = 3. * np.asarray(d2a2new) / eps_ii
g2[0] -= khs * sum1 / xhi00
g2[1] += khs * sum1 / xhi00**2
g2[1] -= (sum1 * dkhs + khs * dsum1)/xhi00
g2 /= xs_m
g2 /= - a1vdw_cteii
return g2
def d2g2mca_dxhi00(xhi00, khs, dkhs, d2khs, xs_m, d3a2new, d2suma_g2,
eps_ii, a1vdw_cteii):
sum1, dsum1, d2sum1 = d2suma_g2
g2 = 3. * np.asarray(d3a2new) / eps_ii
aux1 = khs * sum1 / xhi00
aux2 = aux1 / xhi00
aux3 = (sum1 * dkhs + khs * dsum1)/xhi00
g2[0] -= aux1
g2[1] += aux2
g2[1] -= aux3
g2[2] += 2*aux3/xhi00
g2[2] -= 2*aux2/xhi00
g2[2] -= (2*dkhs*dsum1 + sum1*d2khs + khs*d2sum1)/xhi00
g2 /= xs_m
g2 /= - a1vdw_cteii
return g2
| from __future__ import division, print_function, absolute_import
import numpy as np
# Equation (62) Paper 2014
def g2mca(xhi00, khs, xs_m, da2new, suma_g2, eps_ii, a1vdw_cteii):
g2 = 3 * da2new / eps_ii
g2 -= khs * suma_g2 / xhi00
g2 /= xs_m
g2 /= - a1vdw_cteii
return g2
def dg2mca_dxhi00(xhi00, khs, dkhs, xs_m, d2a2new, dsuma_g2, eps_ii,
a1vdw_cteii):
sum1, dsum1 = dsuma_g2
g2 = 3. * np.asarray(d2a2new) / eps_ii
g2[0] -= khs * sum1 / xhi00
g2[1] += khs * sum1 / xhi00**2
g2[1] -= (sum1 * dkhs + khs * dsum1)/xhi00
g2 /= xs_m
g2 /= - a1vdw_cteii
return g2
def d2g2mca_dxhi00(xhi00, khs, dkhs, d2khs, xs_m, d3a2new, d2suma_g2,
eps_ii, a1vdw_cteii):
sum1, dsum1, d2sum1 = d2suma_g2
g2 = 3. * np.asarray(d3a2new) / eps_ii
aux1 = khs * sum1 / xhi00
aux2 = aux1 / xhi00
aux3 = (sum1 * dkhs + khs * dsum1)/xhi00
g2[0] -= aux1
g2[1] += aux2
g2[1] -= aux3
g2[2] += 2*aux3/xhi00
g2[2] -= 2*aux2/xhi00
g2[2] -= (2*dkhs*dsum1 + sum1*d2khs + khs*d2sum1)/xhi00
g2 /= xs_m
g2 /= - a1vdw_cteii
return g2
| en | 0.607808 | # Equation (62) Paper 2014 | 2.269409 | 2 |
daemon/files.py | arijitdas123student/jina | 15,179 | 6631590 | <gh_stars>1000+
import os
import re
from itertools import chain
from pathlib import Path
from typing import Dict, List, Union
from fastapi import UploadFile
from jina.logging.logger import JinaLogger
from jina.excepts import DaemonInvalidDockerfile
from . import __rootdir__, __dockerfiles__, jinad_args
from .helper import get_workspace_path
from .models import DaemonID
from .models.enums import DaemonDockerfile, PythonVersion
def store_files_in_workspace(
workspace_id: DaemonID, files: List[UploadFile], logger: "JinaLogger"
) -> None:
"""Store the uploaded files in local disk
:param workspace_id: workspace id representing the local directory
:param files: files uploaded to the workspace endpoint
:param logger: JinaLogger to use
"""
workdir = get_workspace_path(workspace_id)
Path(workdir).mkdir(parents=True, exist_ok=True)
if not files:
logger.warning(f'couldn\'t find any files to upload!')
return
for f in files:
dest = os.path.join(workdir, f.filename)
if os.path.isfile(dest):
logger.warning(
f'file {f.filename} already exists in workspace {workspace_id}, will be replaced'
)
with open(dest, 'wb+') as fp:
content = f.file.read()
fp.write(content)
logger.debug(f'saved uploads to {dest}')
def is_requirements_txt(filename) -> bool:
"""Check if filename is of requirements.txt format
:param filename: filename
:return: True if filename is in requirements.txt format
"""
return True if re.match(r'.*requirements.*\.txt$', filename) else False
class DaemonFile:
"""Object representing .jinad file"""
extension = '.jinad'
def __init__(self, workdir: str, logger: 'JinaLogger' = None) -> None:
self._logger = (
logger
if logger
else JinaLogger(self.__class__.__name__, **vars(jinad_args))
)
self._workdir = workdir
self._logger.debug(
f'analysing {self.extension} files in workdir: {self._workdir}'
)
self._build = DaemonDockerfile.default
self._dockerfile = os.path.join(
__dockerfiles__, f'{DaemonDockerfile.default}.Dockerfile'
)
self._python = PythonVersion.default
self._jina = 'master'
self._run = ''
self._ports = []
self.process_file()
@property
def dockerfile(self) -> str:
"""Property representing dockerfile value
:return: daemon dockerfile in the daemonfile
"""
return self._dockerfile
@dockerfile.setter
def dockerfile(self, value: Union[DaemonDockerfile, str]):
"""Property setter for dockerfile
:param value: allowed values in DaemonDockerfile
"""
try:
self._dockerfile = os.path.join(
__dockerfiles__, f'{DaemonDockerfile(value).value}.Dockerfile'
)
self._build = DaemonDockerfile(value)
except ValueError:
self._logger.debug(
f'value passed for `dockerfile` not in default list of values: {DaemonDockerfile.values}.'
)
if os.path.isfile(os.path.join(self._workdir, value)):
self._dockerfile = os.path.join(self._workdir, value)
self._build = DaemonDockerfile.OTHERS
else:
self._logger.critical(
f'unable to find dockerfile passed at {value}, cannot proceed with the build'
)
raise DaemonInvalidDockerfile()
@property
def python(self):
"""Property representing python version
:return: python version in the daemonfile
"""
return self._python
@python.setter
def python(self, python: PythonVersion):
"""Property setter for python version
:param python: allowed values in PythonVersion
"""
try:
self._python = PythonVersion(python)
except ValueError:
self._logger.warning(
f'invalid value `{python}` passed for \'python\'. allowed values: {PythonVersion.values}. '
f'picking default version: {self._python}'
)
@property
def jinav(self):
"""Property representing python version
:return: python version in the daemonfile
"""
return self._jina
@jinav.setter
def jinav(self, jinav: str):
self._jina = jinav
@property
def run(self) -> str:
"""Property representing run command
:return: run command in the daemonfile
"""
return self._run
@run.setter
def run(self, run: str) -> None:
"""Property setter for run command
:param run: command passed in .jinad file
"""
# remove any leading/trailing spaces and quotes
if len(run) > 1 and run[0] == '\"' and run[-1] == '\"':
run = run.strip('\"')
self._run = run
@property
def ports(self) -> List[int]:
"""Property representing ports
:return: ports to be mapped in the daemonfile
"""
return self._ports
@ports.setter
def ports(self, ports: str):
"""Property setter for ports command
:param ports: ports passed in .jinad file
"""
try:
self._ports = list(map(int, filter(None, ports.split(','))))
except ValueError:
self._logger.warning(f'invalid value `{ports}` passed for \'ports\'')
@property
def requirements(self) -> str:
"""pip packages mentioned in requirements.txt
:return: space separated values
"""
requirements = ''
for filename in os.listdir(self._workdir):
if is_requirements_txt(filename):
with open(os.path.join(self._workdir, filename)) as f:
requirements += ' '.join(f.read().splitlines())
if not requirements:
self._logger.warning(
'please add a requirements.txt file to manage python dependencies in the workspace'
)
return ''
else:
return requirements
@property
def dockercontext(self) -> str:
"""directory for docker context during docker build
:return: docker context directory"""
return __rootdir__ if self._build == DaemonDockerfile.DEVEL else self._workdir
@property
def dockerargs(self) -> Dict:
"""dict of args to be passed during docker build
.. note::
For DEVEL, we expect an already built jina image to be available locally.
We only pass the pip requirements as arguments.
For DEFAULT (cpu), we pass the python version, jina version used to pull the
image from docker hub in addition to the requirements.
:return: dict of args to be passed during docker build
"""
return (
{'PIP_REQUIREMENTS': self.requirements}
if self._build == DaemonDockerfile.DEVEL
else {
'PIP_REQUIREMENTS': self.requirements,
'PY_VERSION': self.python.name.lower(),
'JINA_VERSION': self.jinav,
}
)
def process_file(self) -> None:
"""Process .jinad file and set args"""
# Checks if a file .jinad exists in the workspace
jinad_file_path = Path(self._workdir) / self.extension
if jinad_file_path.is_file():
self._logger.debug(f'found .jinad file in path {jinad_file_path}')
self.set_args(jinad_file_path)
else:
self._logger.warning(
f'please add a .jinad file to manage the docker image in the workspace'
)
def set_args(self, file: Path) -> None:
"""read .jinad file & set properties
:param file: .jinad filepath
"""
from configparser import ConfigParser, DEFAULTSECT
config = ConfigParser()
with open(file) as fp:
config.read_file(chain([f'[{DEFAULTSECT}]'], fp))
params = dict(config.items(DEFAULTSECT))
self.dockerfile = params.get('dockerfile', DaemonDockerfile.default)
self.python = params.get('python')
self.run = params.get('run', '').strip()
self.ports = params.get('ports', '')
def __repr__(self) -> str:
return (
f'DaemonFile(dockerfile={self.dockerfile}, python={self.python}, jina={self.jinav}, '
f'run={self.run}, context={self.dockercontext}, args={self.dockerargs}), '
f'ports={self.ports})'
)
| import os
import re
from itertools import chain
from pathlib import Path
from typing import Dict, List, Union
from fastapi import UploadFile
from jina.logging.logger import JinaLogger
from jina.excepts import DaemonInvalidDockerfile
from . import __rootdir__, __dockerfiles__, jinad_args
from .helper import get_workspace_path
from .models import DaemonID
from .models.enums import DaemonDockerfile, PythonVersion
def store_files_in_workspace(
workspace_id: DaemonID, files: List[UploadFile], logger: "JinaLogger"
) -> None:
"""Store the uploaded files in local disk
:param workspace_id: workspace id representing the local directory
:param files: files uploaded to the workspace endpoint
:param logger: JinaLogger to use
"""
workdir = get_workspace_path(workspace_id)
Path(workdir).mkdir(parents=True, exist_ok=True)
if not files:
logger.warning(f'couldn\'t find any files to upload!')
return
for f in files:
dest = os.path.join(workdir, f.filename)
if os.path.isfile(dest):
logger.warning(
f'file {f.filename} already exists in workspace {workspace_id}, will be replaced'
)
with open(dest, 'wb+') as fp:
content = f.file.read()
fp.write(content)
logger.debug(f'saved uploads to {dest}')
def is_requirements_txt(filename) -> bool:
"""Check if filename is of requirements.txt format
:param filename: filename
:return: True if filename is in requirements.txt format
"""
return True if re.match(r'.*requirements.*\.txt$', filename) else False
class DaemonFile:
"""Object representing .jinad file"""
extension = '.jinad'
def __init__(self, workdir: str, logger: 'JinaLogger' = None) -> None:
self._logger = (
logger
if logger
else JinaLogger(self.__class__.__name__, **vars(jinad_args))
)
self._workdir = workdir
self._logger.debug(
f'analysing {self.extension} files in workdir: {self._workdir}'
)
self._build = DaemonDockerfile.default
self._dockerfile = os.path.join(
__dockerfiles__, f'{DaemonDockerfile.default}.Dockerfile'
)
self._python = PythonVersion.default
self._jina = 'master'
self._run = ''
self._ports = []
self.process_file()
@property
def dockerfile(self) -> str:
"""Property representing dockerfile value
:return: daemon dockerfile in the daemonfile
"""
return self._dockerfile
@dockerfile.setter
def dockerfile(self, value: Union[DaemonDockerfile, str]):
"""Property setter for dockerfile
:param value: allowed values in DaemonDockerfile
"""
try:
self._dockerfile = os.path.join(
__dockerfiles__, f'{DaemonDockerfile(value).value}.Dockerfile'
)
self._build = DaemonDockerfile(value)
except ValueError:
self._logger.debug(
f'value passed for `dockerfile` not in default list of values: {DaemonDockerfile.values}.'
)
if os.path.isfile(os.path.join(self._workdir, value)):
self._dockerfile = os.path.join(self._workdir, value)
self._build = DaemonDockerfile.OTHERS
else:
self._logger.critical(
f'unable to find dockerfile passed at {value}, cannot proceed with the build'
)
raise DaemonInvalidDockerfile()
@property
def python(self):
"""Property representing python version
:return: python version in the daemonfile
"""
return self._python
@python.setter
def python(self, python: PythonVersion):
"""Property setter for python version
:param python: allowed values in PythonVersion
"""
try:
self._python = PythonVersion(python)
except ValueError:
self._logger.warning(
f'invalid value `{python}` passed for \'python\'. allowed values: {PythonVersion.values}. '
f'picking default version: {self._python}'
)
@property
def jinav(self):
"""Property representing python version
:return: python version in the daemonfile
"""
return self._jina
@jinav.setter
def jinav(self, jinav: str):
self._jina = jinav
@property
def run(self) -> str:
"""Property representing run command
:return: run command in the daemonfile
"""
return self._run
@run.setter
def run(self, run: str) -> None:
"""Property setter for run command
:param run: command passed in .jinad file
"""
# remove any leading/trailing spaces and quotes
if len(run) > 1 and run[0] == '\"' and run[-1] == '\"':
run = run.strip('\"')
self._run = run
@property
def ports(self) -> List[int]:
"""Property representing ports
:return: ports to be mapped in the daemonfile
"""
return self._ports
@ports.setter
def ports(self, ports: str):
"""Property setter for ports command
:param ports: ports passed in .jinad file
"""
try:
self._ports = list(map(int, filter(None, ports.split(','))))
except ValueError:
self._logger.warning(f'invalid value `{ports}` passed for \'ports\'')
@property
def requirements(self) -> str:
"""pip packages mentioned in requirements.txt
:return: space separated values
"""
requirements = ''
for filename in os.listdir(self._workdir):
if is_requirements_txt(filename):
with open(os.path.join(self._workdir, filename)) as f:
requirements += ' '.join(f.read().splitlines())
if not requirements:
self._logger.warning(
'please add a requirements.txt file to manage python dependencies in the workspace'
)
return ''
else:
return requirements
@property
def dockercontext(self) -> str:
"""directory for docker context during docker build
:return: docker context directory"""
return __rootdir__ if self._build == DaemonDockerfile.DEVEL else self._workdir
@property
def dockerargs(self) -> Dict:
"""dict of args to be passed during docker build
.. note::
For DEVEL, we expect an already built jina image to be available locally.
We only pass the pip requirements as arguments.
For DEFAULT (cpu), we pass the python version, jina version used to pull the
image from docker hub in addition to the requirements.
:return: dict of args to be passed during docker build
"""
return (
{'PIP_REQUIREMENTS': self.requirements}
if self._build == DaemonDockerfile.DEVEL
else {
'PIP_REQUIREMENTS': self.requirements,
'PY_VERSION': self.python.name.lower(),
'JINA_VERSION': self.jinav,
}
)
def process_file(self) -> None:
"""Process .jinad file and set args"""
# Checks if a file .jinad exists in the workspace
jinad_file_path = Path(self._workdir) / self.extension
if jinad_file_path.is_file():
self._logger.debug(f'found .jinad file in path {jinad_file_path}')
self.set_args(jinad_file_path)
else:
self._logger.warning(
f'please add a .jinad file to manage the docker image in the workspace'
)
def set_args(self, file: Path) -> None:
"""read .jinad file & set properties
:param file: .jinad filepath
"""
from configparser import ConfigParser, DEFAULTSECT
config = ConfigParser()
with open(file) as fp:
config.read_file(chain([f'[{DEFAULTSECT}]'], fp))
params = dict(config.items(DEFAULTSECT))
self.dockerfile = params.get('dockerfile', DaemonDockerfile.default)
self.python = params.get('python')
self.run = params.get('run', '').strip()
self.ports = params.get('ports', '')
def __repr__(self) -> str:
return (
f'DaemonFile(dockerfile={self.dockerfile}, python={self.python}, jina={self.jinav}, '
f'run={self.run}, context={self.dockercontext}, args={self.dockerargs}), '
f'ports={self.ports})'
) | en | 0.757414 | Store the uploaded files in local disk :param workspace_id: workspace id representing the local directory :param files: files uploaded to the workspace endpoint :param logger: JinaLogger to use Check if filename is of requirements.txt format :param filename: filename :return: True if filename is in requirements.txt format Object representing .jinad file Property representing dockerfile value :return: daemon dockerfile in the daemonfile Property setter for dockerfile :param value: allowed values in DaemonDockerfile Property representing python version :return: python version in the daemonfile Property setter for python version :param python: allowed values in PythonVersion Property representing python version :return: python version in the daemonfile Property representing run command :return: run command in the daemonfile Property setter for run command :param run: command passed in .jinad file # remove any leading/trailing spaces and quotes Property representing ports :return: ports to be mapped in the daemonfile Property setter for ports command :param ports: ports passed in .jinad file pip packages mentioned in requirements.txt :return: space separated values directory for docker context during docker build :return: docker context directory dict of args to be passed during docker build .. note:: For DEVEL, we expect an already built jina image to be available locally. We only pass the pip requirements as arguments. For DEFAULT (cpu), we pass the python version, jina version used to pull the image from docker hub in addition to the requirements. :return: dict of args to be passed during docker build Process .jinad file and set args # Checks if a file .jinad exists in the workspace read .jinad file & set properties :param file: .jinad filepath | 2.411075 | 2 |
webapp/views/mlva_query_view.py | fasemoreakinyemi/coxbase_webapp | 0 | 6631591 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
from pyramid.view import view_config
from pyramid.response import Response
from pyramid.httpexceptions import HTTPFound
from sqlalchemy.exc import DBAPIError
from pyramid.paster import get_appsettings
from sqlalchemy import engine_from_config, create_engine
from sqlalchemy.ext.automap import automap_base
from .. import process_request
from sqlalchemy import or_
from .. import models
import logging
import traceback
import sys
@view_config(
route_name="entry_view_mlva", renderer="../templates/mlva_query_view.jinja2"
)
def detailed_mlva_view(request):
ID = request.matchdict["ID"]
Base = automap_base()
settings = get_appsettings(
"/home/travis/build/foerstner-lab/CoxBase-Webapp/development.ini", name="main"
)
engine = engine_from_config(settings, "db2.")
Base.prepare(engine, reflect=True)
isolates = Base.classes.isolates
isolatesRef = Base.classes.isolate_refs2
try:
query = request.db2_session.query(isolates).filter(isolates.mlvaGenotype == ID)
# query = request.db2_session.query(isolates).join(isolatesRef, isolates.isolateid == isolatesRef.isolate_id).filter(isolatesRef.pmid == 25037926).filter(
# isolates.mlvaGenotype == ID)
except DBAPIError:
return Response(db_err_msg, content_type="text/plain", status=500)
return {"count": query.count(), "results": query.all()}
@view_config(
route_name="entry_view_mlva_6", renderer="../templates/mlva_tilburg_query_view.jinja2"
)
def detailed_mlva_tilburg_view(request):
ID = request.matchdict["ID"]
Base = automap_base()
settings = get_appsettings(
"/home/travis/build/foerstner-lab/CoxBase-Webapp/development.ini", name="main"
)
engine = engine_from_config(settings, "db2.")
Base.prepare(engine, reflect=True)
isolates = Base.classes.tilburg_isolates
try:
query = request.db2_session.query(isolates).filter(isolates.profile_ID == ID)
except DBAPIError:
return Response(db_err_msg, content_type="text/plain", status=500)
return {"count": query.count(), "results": query.all()}
db_err_msg = """\
Pyramid is having a problem using your SQL database. The problem
might be caused by one of the following things:
1. You may need to initialize your database tables with `alembic`.
Check your README.txt for descriptions and try to run it.
2. Your database server may not be running. Check that the
database server referred to by the "sqlalchemy.url" setting in
your "development.ini" file is running.
After you fix the problem, please restart the Pyramid application to
try it again.
"""
| #!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
from pyramid.view import view_config
from pyramid.response import Response
from pyramid.httpexceptions import HTTPFound
from sqlalchemy.exc import DBAPIError
from pyramid.paster import get_appsettings
from sqlalchemy import engine_from_config, create_engine
from sqlalchemy.ext.automap import automap_base
from .. import process_request
from sqlalchemy import or_
from .. import models
import logging
import traceback
import sys
@view_config(
route_name="entry_view_mlva", renderer="../templates/mlva_query_view.jinja2"
)
def detailed_mlva_view(request):
ID = request.matchdict["ID"]
Base = automap_base()
settings = get_appsettings(
"/home/travis/build/foerstner-lab/CoxBase-Webapp/development.ini", name="main"
)
engine = engine_from_config(settings, "db2.")
Base.prepare(engine, reflect=True)
isolates = Base.classes.isolates
isolatesRef = Base.classes.isolate_refs2
try:
query = request.db2_session.query(isolates).filter(isolates.mlvaGenotype == ID)
# query = request.db2_session.query(isolates).join(isolatesRef, isolates.isolateid == isolatesRef.isolate_id).filter(isolatesRef.pmid == 25037926).filter(
# isolates.mlvaGenotype == ID)
except DBAPIError:
return Response(db_err_msg, content_type="text/plain", status=500)
return {"count": query.count(), "results": query.all()}
@view_config(
route_name="entry_view_mlva_6", renderer="../templates/mlva_tilburg_query_view.jinja2"
)
def detailed_mlva_tilburg_view(request):
ID = request.matchdict["ID"]
Base = automap_base()
settings = get_appsettings(
"/home/travis/build/foerstner-lab/CoxBase-Webapp/development.ini", name="main"
)
engine = engine_from_config(settings, "db2.")
Base.prepare(engine, reflect=True)
isolates = Base.classes.tilburg_isolates
try:
query = request.db2_session.query(isolates).filter(isolates.profile_ID == ID)
except DBAPIError:
return Response(db_err_msg, content_type="text/plain", status=500)
return {"count": query.count(), "results": query.all()}
db_err_msg = """\
Pyramid is having a problem using your SQL database. The problem
might be caused by one of the following things:
1. You may need to initialize your database tables with `alembic`.
Check your README.txt for descriptions and try to run it.
2. Your database server may not be running. Check that the
database server referred to by the "sqlalchemy.url" setting in
your "development.ini" file is running.
After you fix the problem, please restart the Pyramid application to
try it again.
""" | en | 0.76493 | #!/usr/bin/env python # -*- coding: iso-8859-15 -*- # query = request.db2_session.query(isolates).join(isolatesRef, isolates.isolateid == isolatesRef.isolate_id).filter(isolatesRef.pmid == 25037926).filter( # isolates.mlvaGenotype == ID) \ Pyramid is having a problem using your SQL database. The problem might be caused by one of the following things: 1. You may need to initialize your database tables with `alembic`. Check your README.txt for descriptions and try to run it. 2. Your database server may not be running. Check that the database server referred to by the "sqlalchemy.url" setting in your "development.ini" file is running. After you fix the problem, please restart the Pyramid application to try it again. | 1.896822 | 2 |
annotation/validate_genome.py | Hua-CM/HuaSmallTools | 21 | 6631592 | # -*- coding: utf-8 -*-
# @Time : 2021/8/23 13:09
# @Author : <NAME>
# @File : validate_genome.py
# @Usage :
# @Note :
# @E-mail : <EMAIL>
import argparse
import copy
from BCBio import GFF
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import FeatureLocation
from Bio.Data.CodonTable import TranslationError
from collections import defaultdict
start_codon = {'ATG'}
stop_codon = {'TAG', 'TAA', 'TGA'}
def get_cds(gene, scaffold_seq):
cds_features = [_ for _ in gene.sub_features[0].sub_features if _.type == "CDS"]
cds_features.sort(key=lambda x: x.location.start)
if gene.strand == -1:
cds_features = cds_features[::-1]
if not cds_features[0].qualifiers['phase'] == ['0']:
raise TranslationError('The phase of first CDS is not 0')
cds_features = [_.location.extract(scaffold_seq.seq) for _ in cds_features]
cds_seq_full_length = Seq('')
for cds_seq in cds_features:
cds_seq_full_length += cds_seq
protein_seq = cds_seq_full_length.translate(cds=True)
return protein_seq
def to_gene_dict(_gff: list):
gene_dict = {}
for scaffold in _gff:
for gene in scaffold.features:
gene_dict[gene.id] = gene
return gene_dict
def correct_stop_codon(gene, scaffold_seq):
_gene = copy.deepcopy(gene)
cds_features = [_ for _ in _gene.sub_features[0].sub_features if _.type == "CDS"]
cds_features.sort(key=lambda x: x.location.start)
cds_seq_full_length = Seq('')
extention_len = 0
if _gene.strand == 1:
for _feature in cds_features[:-1]:
cds_seq_full_length += _feature.location.extract(scaffold_seq.seq)
# extension
while True:
extention_len += 3
cds_features[-1].location = FeatureLocation(cds_features[-1].location.start,
cds_features[-1].location.end + 3,
strand=1)
cds_seq_test = cds_seq_full_length + cds_features[-1].location.extract(scaffold_seq.seq)
try:
cds_seq_test.translate(cds=True)
break
except TranslationError:
if extention_len > 3000:
break
else:
continue
# modify_gene
new_sub_features = []
for _ in _gene.sub_features[0].sub_features:
if _.type == "exon" and _.location.start == cds_features[-1].location.start:
_.location = FeatureLocation(_.location.start, cds_features[-1].location.end, strand=1)
if _.type == 'three_prime_UTR':
continue
new_sub_features.append(_)
_gene.location = FeatureLocation(_gene.location.start, cds_features[-1].location.end, strand=1)
_gene.sub_features[0].location = FeatureLocation(_gene.sub_features[0].location.start,
cds_features[-1].location.end,
strand=1)
else:
cds_features = cds_features[::-1]
for _feature in cds_features[:-1]:
cds_seq_full_length += _feature.location.extract(scaffold_seq.seq)
while True:
extention_len += 3
cds_features[-1].location = FeatureLocation(cds_features[-1].location.start - 3,
cds_features[-1].location.end,
strand=-1)
cds_seq_test = cds_seq_full_length + cds_features[-1].location.extract(scaffold_seq.seq)
try:
cds_seq_test.translate(cds=True)
break
except TranslationError:
if extention_len > 3000:
break
else:
continue
# modify_gene
new_sub_features = []
for _ in _gene.sub_features[0].sub_features:
if _.type == 'three_prime_UTR':
continue
if _.type == "exon" and _.location.start == cds_features[-1].location.start:
_.location = FeatureLocation(cds_features[-1].location.start, _.location.end, strand=-1)
new_sub_features.append(_)
_gene.location = FeatureLocation(cds_features[-1].location.start, _gene.location.end, strand=-1)
_gene.sub_features[0].location = FeatureLocation(cds_features[-1].location.start,
_gene.sub_features[0].location.end,
strand=-1)
_gene.sub_features[0].sub_features = new_sub_features
return _gene
def correct_start_codon(gene, scaffold_seq):
_gene = copy.deepcopy(gene)
cds_features = [_ for _ in _gene.sub_features[0].sub_features if _.type == "CDS"]
cds_features.sort(key=lambda x: x.location.start)
if _gene.strand == 1:
extention_len = 0
while True:
extention_len += 3
_start_pos = cds_features[0].location.start
cds_features[0].location = FeatureLocation(_start_pos-3, cds_features[0].location.end, strand=1)
_codon = scaffold_seq.seq[_start_pos-3: _start_pos]
if _codon in start_codon or extention_len > 3000:
break
if _codon in stop_codon:
raise TranslationError('First codon could not be found')
# modify_gene
new_sub_features = []
for _ in _gene.sub_features[0].sub_features:
if _.type == "exon" and _.location.end == cds_features[0].location.end:
_.location = cds_features[0].location
if _.type == 'five_prime_UTR':
continue
new_sub_features.append(_)
_gene.location = FeatureLocation(cds_features[0].location.start, _gene.location.end, strand=1)
_gene.sub_features[0].location = FeatureLocation(cds_features[0].location.start,
_gene.sub_features[0].location.end,
strand=1)
else:
extention_len = 0
while True:
extention_len += 3
_start_pos = cds_features[-1].location.end
cds_features[-1].location = FeatureLocation(cds_features[-1].location.start, _start_pos + 3, strand=-1)
_codon = scaffold_seq.seq[_start_pos: _start_pos + 3].reverse_complement()
if _codon in start_codon or extention_len > 3000:
break
if _codon in stop_codon:
raise TranslationError('First codon could not be found')
# modify_gene
new_sub_features = []
for _ in _gene.sub_features[0].sub_features:
if _.type == "exon" and _.location.start == cds_features[-1].location.start:
_.location = cds_features[-1].location
if _.type == 'five_prime_UTR':
continue
new_sub_features.append(_)
_gene.location = FeatureLocation(_gene.location.start, cds_features[-1].location.end, strand=-1)
_gene.sub_features[0].location = FeatureLocation(_gene.sub_features[0].location.start,
cds_features[-1].location.end,
strand=-1)
_gene.sub_features[0].sub_features = new_sub_features
return _gene
def correct_phase(gene, scaffold_seq):
_gene = copy.deepcopy(gene)
cds_features = [_ for _ in _gene.sub_features[0].sub_features if _.type == "CDS"]
cds_features.sort(key=lambda x: x.location.start)
if _gene.strand == 1:
if cds_features[0].qualifiers['phase'] == ['1']:
cds_features[0].location = FeatureLocation(cds_features[0].location.start - 1,
cds_features[0].location.end,
strand=1)
elif cds_features[0].qualifiers['phase'] == ['2']:
cds_features[0].location = FeatureLocation(cds_features[0].location.start - 2,
cds_features[0].location.end,
strand=1)
else:
raise TranslationError('The phase was not 0/1/2')
else:
if cds_features[-1].qualifiers['phase'] == ['1']:
cds_features[-1].location = FeatureLocation(cds_features[-1].location.start,
cds_features[-1].location.end + 1,
strand=-1)
elif cds_features[-1].qualifiers['phase'] == ['2']:
cds_features[-1].location = FeatureLocation(cds_features[-1].location.start,
cds_features[-1].location.end + 2,
strand=-1)
else:
raise TranslationError('The phase was not 0/1/2')
cds_features = cds_features[::-1]
cds_seqs = [_.location.extract(scaffold_seq.seq) for _ in cds_features]
cds_seq_full_length = Seq('')
for cds_seq in cds_seqs:
cds_seq_full_length += cds_seq
# modify_gene
cds_features[0].qualifiers['phase'] = ['0']
new_sub_features = []
if _gene.strand == 1:
for _ in _gene.sub_features[0].sub_features:
if _.type == "exon" and _.location.end == cds_features[0].location.end:
_.location = cds_features[0].location
if _.type == 'five_prime_UTR':
continue
new_sub_features.append(_)
_gene.location = FeatureLocation(cds_features[0].location.start, _gene.location.end, strand=1)
_gene.sub_features[0].location = FeatureLocation(cds_features[0].location.start,
_gene.sub_features[0].location.end,
strand=1)
else:
for _ in _gene.sub_features[0].sub_features:
if _.type == "exon" and _.location.start == cds_features[0].location.start:
_.location = cds_features[0].location
if _.type == 'five_prime_UTR':
continue
new_sub_features.append(_)
_gene.location = FeatureLocation(_gene.location.start, cds_features[0].location.end, strand=-1)
_gene.sub_features[0].location = FeatureLocation(_gene.sub_features[0].location.start,
cds_features[0].location.end,
strand=-1)
_gene.sub_features[0].sub_features = new_sub_features
try:
cds_seq_full_length.translate(cds=True)
return _gene
except TranslationError:
raise TranslationError('These genes need another round correction', _gene)
def correct(_gff, _genome):
_seqs = SeqIO.to_dict(SeqIO.parse(_genome, 'fasta'))
_gff = [_ for _ in GFF.parse(_gff, base_dict=_seqs)]
correct_list = []
error_dict = defaultdict(list)
for scaffold in _gff:
correct_scaffold = SeqRecord(seq="1234", id=scaffold.id, name=scaffold.name, description=scaffold.description)
for gene in scaffold.features:
try:
get_cds(gene, scaffold)
correct_scaffold.features.append(gene)
except TranslationError as e:
try:
if e.args[0].startswith("First codon"):
_tmp_gene = correct_start_codon(gene, scaffold)
get_cds(_tmp_gene, scaffold)
correct_scaffold.features.append(_tmp_gene)
error_dict.setdefault('corrected', []).append(gene.id)
elif e.args[0].startswith('The phase of first CDS is not 0'):
correct_scaffold.features.append(correct_phase(gene, scaffold))
error_dict.setdefault('corrected', []).append(gene.id)
elif e.args[0].endswith("is not a stop codon"):
_tmp_gene = correct_start_codon(gene, scaffold)
get_cds(_tmp_gene, scaffold)
correct_scaffold.features.append(correct_stop_codon(gene, scaffold))
error_dict.setdefault('corrected', []).append(gene.id)
# can not handle for now
elif e.args[0] == "Extra in frame stop codon found":
error_dict.setdefault('internal', []).append(gene.id)
elif e.args[0].endswith("is not a multiple of three"):
error_dict.setdefault('three', []).append(gene.id)
except TranslationError as e2:
if e2.args[0].startswith('These genes need another round correction'):
correct_scaffold.features.append(e2.args[1])
error_dict.setdefault('phase', []).append(gene.id)
# for second round
elif e2.args[0] == "Extra in frame stop codon found":
error_dict.setdefault('internal', []).append(gene.id)
elif e2.args[0].startswith('First codon'):
error_dict.setdefault('first2', []).append(gene.id)
elif e2.args[0].endswith("is not a stop codon"):
error_dict.setdefault('final', []).append(gene.id)
elif e2.args[0].endswith("is not a multiple of three"):
error_dict.setdefault('three', []).append(gene.id)
except Exception as e:
print(e)
print(gene.id)
correct_list.append(correct_scaffold)
return correct_list, error_dict
def write_result(correct_list: list, error_dict: dict, prefix):
GFF.write(correct_list, open(prefix+'.gff3', 'w'))
with open(prefix+'.log', 'w') as f_log:
f_log.write('01. These genes start codon are illegal (not ATG):\n' +
'\n'.join(error_dict['first']) + '\n')
f_log.write('02. These genes start codon are illegal (not ATG) and could not be corrected:\n' +
'\n'.join(error_dict['first2']) + '\n')
f_log.write('03. These genes\'s first phase is not 0 and need a second round correction:\n' +
'\n'.join(error_dict['phase']) + '\n')
f_log.write('04. These genes has internal stop codon:\n' +
'\n'.join(error_dict['internal']) + '\n')
f_log.write('05. These genes end codon are illegal:\n' +
'\n'.join(error_dict['final']) + '\n')
f_log.write('06. These genes lengths are not a multiple of three:\n' +
'\n'.join(error_dict['three']) + '\n')
f_log.write('07. These genes corrected:\n' +
'\n'.join(error_dict['corrected']) + '\n')
def getArgs():
parser = argparse.ArgumentParser(description="Validate whether the features in gff is legal. Will generate a log \
file and a corresponding gff file")
parser.add_argument('fasta', help="genome fasta file")
parser.add_argument('gff', help="gff file")
parser.add_argument('-p', '--prefix', type=str, default='validated', help="output file prefix. Default: genome")
args = parser.parse_args()
return args
if __name__ == '__main__':
Args = getArgs()
correct, err = correct(Args.gff, Args.fasta)
write_result(correct, err, Args.prefix + '_validated')
| # -*- coding: utf-8 -*-
# @Time : 2021/8/23 13:09
# @Author : <NAME>
# @File : validate_genome.py
# @Usage :
# @Note :
# @E-mail : <EMAIL>
import argparse
import copy
from BCBio import GFF
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import FeatureLocation
from Bio.Data.CodonTable import TranslationError
from collections import defaultdict
start_codon = {'ATG'}
stop_codon = {'TAG', 'TAA', 'TGA'}
def get_cds(gene, scaffold_seq):
cds_features = [_ for _ in gene.sub_features[0].sub_features if _.type == "CDS"]
cds_features.sort(key=lambda x: x.location.start)
if gene.strand == -1:
cds_features = cds_features[::-1]
if not cds_features[0].qualifiers['phase'] == ['0']:
raise TranslationError('The phase of first CDS is not 0')
cds_features = [_.location.extract(scaffold_seq.seq) for _ in cds_features]
cds_seq_full_length = Seq('')
for cds_seq in cds_features:
cds_seq_full_length += cds_seq
protein_seq = cds_seq_full_length.translate(cds=True)
return protein_seq
def to_gene_dict(_gff: list):
gene_dict = {}
for scaffold in _gff:
for gene in scaffold.features:
gene_dict[gene.id] = gene
return gene_dict
def correct_stop_codon(gene, scaffold_seq):
_gene = copy.deepcopy(gene)
cds_features = [_ for _ in _gene.sub_features[0].sub_features if _.type == "CDS"]
cds_features.sort(key=lambda x: x.location.start)
cds_seq_full_length = Seq('')
extention_len = 0
if _gene.strand == 1:
for _feature in cds_features[:-1]:
cds_seq_full_length += _feature.location.extract(scaffold_seq.seq)
# extension
while True:
extention_len += 3
cds_features[-1].location = FeatureLocation(cds_features[-1].location.start,
cds_features[-1].location.end + 3,
strand=1)
cds_seq_test = cds_seq_full_length + cds_features[-1].location.extract(scaffold_seq.seq)
try:
cds_seq_test.translate(cds=True)
break
except TranslationError:
if extention_len > 3000:
break
else:
continue
# modify_gene
new_sub_features = []
for _ in _gene.sub_features[0].sub_features:
if _.type == "exon" and _.location.start == cds_features[-1].location.start:
_.location = FeatureLocation(_.location.start, cds_features[-1].location.end, strand=1)
if _.type == 'three_prime_UTR':
continue
new_sub_features.append(_)
_gene.location = FeatureLocation(_gene.location.start, cds_features[-1].location.end, strand=1)
_gene.sub_features[0].location = FeatureLocation(_gene.sub_features[0].location.start,
cds_features[-1].location.end,
strand=1)
else:
cds_features = cds_features[::-1]
for _feature in cds_features[:-1]:
cds_seq_full_length += _feature.location.extract(scaffold_seq.seq)
while True:
extention_len += 3
cds_features[-1].location = FeatureLocation(cds_features[-1].location.start - 3,
cds_features[-1].location.end,
strand=-1)
cds_seq_test = cds_seq_full_length + cds_features[-1].location.extract(scaffold_seq.seq)
try:
cds_seq_test.translate(cds=True)
break
except TranslationError:
if extention_len > 3000:
break
else:
continue
# modify_gene
new_sub_features = []
for _ in _gene.sub_features[0].sub_features:
if _.type == 'three_prime_UTR':
continue
if _.type == "exon" and _.location.start == cds_features[-1].location.start:
_.location = FeatureLocation(cds_features[-1].location.start, _.location.end, strand=-1)
new_sub_features.append(_)
_gene.location = FeatureLocation(cds_features[-1].location.start, _gene.location.end, strand=-1)
_gene.sub_features[0].location = FeatureLocation(cds_features[-1].location.start,
_gene.sub_features[0].location.end,
strand=-1)
_gene.sub_features[0].sub_features = new_sub_features
return _gene
def correct_start_codon(gene, scaffold_seq):
_gene = copy.deepcopy(gene)
cds_features = [_ for _ in _gene.sub_features[0].sub_features if _.type == "CDS"]
cds_features.sort(key=lambda x: x.location.start)
if _gene.strand == 1:
extention_len = 0
while True:
extention_len += 3
_start_pos = cds_features[0].location.start
cds_features[0].location = FeatureLocation(_start_pos-3, cds_features[0].location.end, strand=1)
_codon = scaffold_seq.seq[_start_pos-3: _start_pos]
if _codon in start_codon or extention_len > 3000:
break
if _codon in stop_codon:
raise TranslationError('First codon could not be found')
# modify_gene
new_sub_features = []
for _ in _gene.sub_features[0].sub_features:
if _.type == "exon" and _.location.end == cds_features[0].location.end:
_.location = cds_features[0].location
if _.type == 'five_prime_UTR':
continue
new_sub_features.append(_)
_gene.location = FeatureLocation(cds_features[0].location.start, _gene.location.end, strand=1)
_gene.sub_features[0].location = FeatureLocation(cds_features[0].location.start,
_gene.sub_features[0].location.end,
strand=1)
else:
extention_len = 0
while True:
extention_len += 3
_start_pos = cds_features[-1].location.end
cds_features[-1].location = FeatureLocation(cds_features[-1].location.start, _start_pos + 3, strand=-1)
_codon = scaffold_seq.seq[_start_pos: _start_pos + 3].reverse_complement()
if _codon in start_codon or extention_len > 3000:
break
if _codon in stop_codon:
raise TranslationError('First codon could not be found')
# modify_gene
new_sub_features = []
for _ in _gene.sub_features[0].sub_features:
if _.type == "exon" and _.location.start == cds_features[-1].location.start:
_.location = cds_features[-1].location
if _.type == 'five_prime_UTR':
continue
new_sub_features.append(_)
_gene.location = FeatureLocation(_gene.location.start, cds_features[-1].location.end, strand=-1)
_gene.sub_features[0].location = FeatureLocation(_gene.sub_features[0].location.start,
cds_features[-1].location.end,
strand=-1)
_gene.sub_features[0].sub_features = new_sub_features
return _gene
def correct_phase(gene, scaffold_seq):
_gene = copy.deepcopy(gene)
cds_features = [_ for _ in _gene.sub_features[0].sub_features if _.type == "CDS"]
cds_features.sort(key=lambda x: x.location.start)
if _gene.strand == 1:
if cds_features[0].qualifiers['phase'] == ['1']:
cds_features[0].location = FeatureLocation(cds_features[0].location.start - 1,
cds_features[0].location.end,
strand=1)
elif cds_features[0].qualifiers['phase'] == ['2']:
cds_features[0].location = FeatureLocation(cds_features[0].location.start - 2,
cds_features[0].location.end,
strand=1)
else:
raise TranslationError('The phase was not 0/1/2')
else:
if cds_features[-1].qualifiers['phase'] == ['1']:
cds_features[-1].location = FeatureLocation(cds_features[-1].location.start,
cds_features[-1].location.end + 1,
strand=-1)
elif cds_features[-1].qualifiers['phase'] == ['2']:
cds_features[-1].location = FeatureLocation(cds_features[-1].location.start,
cds_features[-1].location.end + 2,
strand=-1)
else:
raise TranslationError('The phase was not 0/1/2')
cds_features = cds_features[::-1]
cds_seqs = [_.location.extract(scaffold_seq.seq) for _ in cds_features]
cds_seq_full_length = Seq('')
for cds_seq in cds_seqs:
cds_seq_full_length += cds_seq
# modify_gene
cds_features[0].qualifiers['phase'] = ['0']
new_sub_features = []
if _gene.strand == 1:
for _ in _gene.sub_features[0].sub_features:
if _.type == "exon" and _.location.end == cds_features[0].location.end:
_.location = cds_features[0].location
if _.type == 'five_prime_UTR':
continue
new_sub_features.append(_)
_gene.location = FeatureLocation(cds_features[0].location.start, _gene.location.end, strand=1)
_gene.sub_features[0].location = FeatureLocation(cds_features[0].location.start,
_gene.sub_features[0].location.end,
strand=1)
else:
for _ in _gene.sub_features[0].sub_features:
if _.type == "exon" and _.location.start == cds_features[0].location.start:
_.location = cds_features[0].location
if _.type == 'five_prime_UTR':
continue
new_sub_features.append(_)
_gene.location = FeatureLocation(_gene.location.start, cds_features[0].location.end, strand=-1)
_gene.sub_features[0].location = FeatureLocation(_gene.sub_features[0].location.start,
cds_features[0].location.end,
strand=-1)
_gene.sub_features[0].sub_features = new_sub_features
try:
cds_seq_full_length.translate(cds=True)
return _gene
except TranslationError:
raise TranslationError('These genes need another round correction', _gene)
def correct(_gff, _genome):
_seqs = SeqIO.to_dict(SeqIO.parse(_genome, 'fasta'))
_gff = [_ for _ in GFF.parse(_gff, base_dict=_seqs)]
correct_list = []
error_dict = defaultdict(list)
for scaffold in _gff:
correct_scaffold = SeqRecord(seq="1234", id=scaffold.id, name=scaffold.name, description=scaffold.description)
for gene in scaffold.features:
try:
get_cds(gene, scaffold)
correct_scaffold.features.append(gene)
except TranslationError as e:
try:
if e.args[0].startswith("First codon"):
_tmp_gene = correct_start_codon(gene, scaffold)
get_cds(_tmp_gene, scaffold)
correct_scaffold.features.append(_tmp_gene)
error_dict.setdefault('corrected', []).append(gene.id)
elif e.args[0].startswith('The phase of first CDS is not 0'):
correct_scaffold.features.append(correct_phase(gene, scaffold))
error_dict.setdefault('corrected', []).append(gene.id)
elif e.args[0].endswith("is not a stop codon"):
_tmp_gene = correct_start_codon(gene, scaffold)
get_cds(_tmp_gene, scaffold)
correct_scaffold.features.append(correct_stop_codon(gene, scaffold))
error_dict.setdefault('corrected', []).append(gene.id)
# can not handle for now
elif e.args[0] == "Extra in frame stop codon found":
error_dict.setdefault('internal', []).append(gene.id)
elif e.args[0].endswith("is not a multiple of three"):
error_dict.setdefault('three', []).append(gene.id)
except TranslationError as e2:
if e2.args[0].startswith('These genes need another round correction'):
correct_scaffold.features.append(e2.args[1])
error_dict.setdefault('phase', []).append(gene.id)
# for second round
elif e2.args[0] == "Extra in frame stop codon found":
error_dict.setdefault('internal', []).append(gene.id)
elif e2.args[0].startswith('First codon'):
error_dict.setdefault('first2', []).append(gene.id)
elif e2.args[0].endswith("is not a stop codon"):
error_dict.setdefault('final', []).append(gene.id)
elif e2.args[0].endswith("is not a multiple of three"):
error_dict.setdefault('three', []).append(gene.id)
except Exception as e:
print(e)
print(gene.id)
correct_list.append(correct_scaffold)
return correct_list, error_dict
def write_result(correct_list: list, error_dict: dict, prefix):
GFF.write(correct_list, open(prefix+'.gff3', 'w'))
with open(prefix+'.log', 'w') as f_log:
f_log.write('01. These genes start codon are illegal (not ATG):\n' +
'\n'.join(error_dict['first']) + '\n')
f_log.write('02. These genes start codon are illegal (not ATG) and could not be corrected:\n' +
'\n'.join(error_dict['first2']) + '\n')
f_log.write('03. These genes\'s first phase is not 0 and need a second round correction:\n' +
'\n'.join(error_dict['phase']) + '\n')
f_log.write('04. These genes has internal stop codon:\n' +
'\n'.join(error_dict['internal']) + '\n')
f_log.write('05. These genes end codon are illegal:\n' +
'\n'.join(error_dict['final']) + '\n')
f_log.write('06. These genes lengths are not a multiple of three:\n' +
'\n'.join(error_dict['three']) + '\n')
f_log.write('07. These genes corrected:\n' +
'\n'.join(error_dict['corrected']) + '\n')
def getArgs():
parser = argparse.ArgumentParser(description="Validate whether the features in gff is legal. Will generate a log \
file and a corresponding gff file")
parser.add_argument('fasta', help="genome fasta file")
parser.add_argument('gff', help="gff file")
parser.add_argument('-p', '--prefix', type=str, default='validated', help="output file prefix. Default: genome")
args = parser.parse_args()
return args
if __name__ == '__main__':
Args = getArgs()
correct, err = correct(Args.gff, Args.fasta)
write_result(correct, err, Args.prefix + '_validated')
| en | 0.329656 | # -*- coding: utf-8 -*- # @Time : 2021/8/23 13:09 # @Author : <NAME> # @File : validate_genome.py # @Usage : # @Note : # @E-mail : <EMAIL> # extension # modify_gene # modify_gene # modify_gene # modify_gene # modify_gene # can not handle for now # for second round | 2.377015 | 2 |
screenplay_selenium/actions/click_on.py | byran/ScreenPlaySelenium | 0 | 6631593 | from screenplay import Action, Actor, log_message
from screenplay_selenium.abilities.browse_the_web import waiting_browser_for
from selenium.common.exceptions import StaleElementReferenceException, NoSuchElementException
from ._handle_no_such_element_base_action import handle_no_such_element_base_action
class _click_on_element(handle_no_such_element_base_action):
def __init__(self, locator):
super().__init__()
self._locator = locator
@log_message("Clicks on {self._locator}")
def perform_as(self, actor: Actor):
def click_on_element(browser):
browser.find_element(*self._locator).click()
return True
waiting_browser_for(actor, (StaleElementReferenceException, NoSuchElementException)).until(click_on_element)
class _click_on_stored_element(Action):
def __init__(self, id):
super().__init__()
self._id = id
@log_message("Clicks on {self._id}")
def perform_as(self, actor: Actor):
actor.state[self._id].value.click()
class click_on:
def element(self, locator):
return _click_on_element(locator)
def stored_element(self, id):
return _click_on_stored_element(id)
| from screenplay import Action, Actor, log_message
from screenplay_selenium.abilities.browse_the_web import waiting_browser_for
from selenium.common.exceptions import StaleElementReferenceException, NoSuchElementException
from ._handle_no_such_element_base_action import handle_no_such_element_base_action
class _click_on_element(handle_no_such_element_base_action):
def __init__(self, locator):
super().__init__()
self._locator = locator
@log_message("Clicks on {self._locator}")
def perform_as(self, actor: Actor):
def click_on_element(browser):
browser.find_element(*self._locator).click()
return True
waiting_browser_for(actor, (StaleElementReferenceException, NoSuchElementException)).until(click_on_element)
class _click_on_stored_element(Action):
def __init__(self, id):
super().__init__()
self._id = id
@log_message("Clicks on {self._id}")
def perform_as(self, actor: Actor):
actor.state[self._id].value.click()
class click_on:
def element(self, locator):
return _click_on_element(locator)
def stored_element(self, id):
return _click_on_stored_element(id)
| none | 1 | 2.515979 | 3 |
|
backend/monitoring/python_RPA_scripts/test.py | khouloudS/RPA_Telecom_Drive_tests | 0 | 6631594 | import time
import sys
import random
import time
count=0
while True:
if count%60==0:
value=random.randint(0,1)
if(value==0):
run('C:\\Users\\Khouloud\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe')
wait()
#maximize_window('Welcome - Visual Studio Code')
move_mouse_to(x=1481, y=251)
click(x=1481, y=251)
move_mouse_to(x=49, y=11)
click(x=49, y=11)
move_mouse_to(x=44, y=126)
click(x=44, y=126)
move_mouse_to(x=470, y=47)
click(x=470, y=47)
type_text('C:\\Users\\khouloud\\Desktop\\testProject\\speed_evaluator')
press_key('enter')
move_mouse_to(x=791, y=506)
click(x=791, y=506) # open project
move_mouse_to(x=327, y=11)
click(x=327, y=13)
wait()
move_mouse_to(x=327, y=38)
click(x=327, y=38)
type_text('py speed_evaluator.py')
press_key('enter')
while file_exists('C:\\Users\\khouloud\\Desktop\\testProject\\speed_evaluator\\ping.png') == False:
wait()
print ('wait for result !! ')
kill_process('Code.exe')
call(["node", "C:\\Users\\khouloud\\Documents\\PI\\MERN_Stack_PI\\backend\\monitoring\\dropbox_file_upload.js"])
title_notification = sys.argv[1]
message_notification = sys.argv[2]
icon_notification = sys.argv[3]
from notification import my_notification_function
my_notification_function(title_notification,message_notification,icon_notification)
sys.exit()
else:
title_notification = "RPA_stopped_manually"
message_notification = "Your RPA process is stopped manually"
icon_notification = "C:\\Users\\khouloud\\Documents\\PI\\MERN_Stack_PI\\src\\assets\\img\\icons\\common\\process-info.ico"
from notification import my_notification_function
my_notification_function(title_notification,message_notification,icon_notification)
time.sleep(120)
count+=1
| import time
import sys
import random
import time
count=0
while True:
if count%60==0:
value=random.randint(0,1)
if(value==0):
run('C:\\Users\\Khouloud\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe')
wait()
#maximize_window('Welcome - Visual Studio Code')
move_mouse_to(x=1481, y=251)
click(x=1481, y=251)
move_mouse_to(x=49, y=11)
click(x=49, y=11)
move_mouse_to(x=44, y=126)
click(x=44, y=126)
move_mouse_to(x=470, y=47)
click(x=470, y=47)
type_text('C:\\Users\\khouloud\\Desktop\\testProject\\speed_evaluator')
press_key('enter')
move_mouse_to(x=791, y=506)
click(x=791, y=506) # open project
move_mouse_to(x=327, y=11)
click(x=327, y=13)
wait()
move_mouse_to(x=327, y=38)
click(x=327, y=38)
type_text('py speed_evaluator.py')
press_key('enter')
while file_exists('C:\\Users\\khouloud\\Desktop\\testProject\\speed_evaluator\\ping.png') == False:
wait()
print ('wait for result !! ')
kill_process('Code.exe')
call(["node", "C:\\Users\\khouloud\\Documents\\PI\\MERN_Stack_PI\\backend\\monitoring\\dropbox_file_upload.js"])
title_notification = sys.argv[1]
message_notification = sys.argv[2]
icon_notification = sys.argv[3]
from notification import my_notification_function
my_notification_function(title_notification,message_notification,icon_notification)
sys.exit()
else:
title_notification = "RPA_stopped_manually"
message_notification = "Your RPA process is stopped manually"
icon_notification = "C:\\Users\\khouloud\\Documents\\PI\\MERN_Stack_PI\\src\\assets\\img\\icons\\common\\process-info.ico"
from notification import my_notification_function
my_notification_function(title_notification,message_notification,icon_notification)
time.sleep(120)
count+=1
| en | 0.265196 | #maximize_window('Welcome - Visual Studio Code') # open project | 2.601119 | 3 |
src/exabgp/configuration/process/parser.py | pierky/exabgp | 1,560 | 6631595 | # encoding: utf-8
"""
parse_process.py
Created by <NAME> on 2015-06-18.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
import os
import stat
def encoder(tokeniser):
value = tokeniser()
if value not in ('text', 'json'):
raise ValueError('"%s" is an invalid option' % value)
return value
def _make_path(prg):
parts = prg.split('/')
env = os.environ.get('EXABGP_ETC', '')
if env:
options = [os.path.join(env.rstrip('/'), os.path.join(*parts[2:])), '/etc/exabgp']
else:
options = []
options.append('/etc/exabgp')
pwd = os.environ.get('PWD', '').split('/')
if pwd:
# without abspath the path is not / prefixed !
if pwd[-1] in ('etc', 'sbin'):
options.append(os.path.abspath(os.path.join(os.path.join(*pwd[:-1]), os.path.join(*parts))))
if 'etc' not in pwd:
options.append(os.path.abspath(os.path.join(os.path.join(*pwd), os.path.join(*parts))))
return options
def run(tokeniser):
prg = tokeniser()
if prg[0] != '/':
if prg.startswith('etc/exabgp'):
options = _make_path(prg)
else:
options = [
os.path.abspath(os.path.join('/etc/exabgp', prg)),
os.path.abspath(os.path.join(os.path.dirname(tokeniser.fname), prg)),
]
options.extend((os.path.abspath(os.path.join(p, prg)) for p in os.getenv('PATH').split(':')))
for option in options:
if os.path.exists(option):
prg = option
if not os.path.exists(prg):
raise ValueError('can not locate the the program "%s"' % prg)
# race conditions are possible, those are sanity checks not security ones ...
s = os.stat(prg)
if stat.S_ISDIR(s.st_mode):
raise ValueError('can not execute directories "%s"' % prg)
if s.st_mode & stat.S_ISUID:
raise ValueError('refusing to run setuid programs "%s"' % prg)
check = stat.S_IXOTH
if s.st_uid == os.getuid():
check |= stat.S_IXUSR
if s.st_gid == os.getgid():
check |= stat.S_IXGRP
if not check & s.st_mode:
raise ValueError('exabgp will not be able to run this program "%s"' % prg)
return [prg] + [_ for _ in tokeniser.generator]
| # encoding: utf-8
"""
parse_process.py
Created by <NAME> on 2015-06-18.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
import os
import stat
def encoder(tokeniser):
value = tokeniser()
if value not in ('text', 'json'):
raise ValueError('"%s" is an invalid option' % value)
return value
def _make_path(prg):
parts = prg.split('/')
env = os.environ.get('EXABGP_ETC', '')
if env:
options = [os.path.join(env.rstrip('/'), os.path.join(*parts[2:])), '/etc/exabgp']
else:
options = []
options.append('/etc/exabgp')
pwd = os.environ.get('PWD', '').split('/')
if pwd:
# without abspath the path is not / prefixed !
if pwd[-1] in ('etc', 'sbin'):
options.append(os.path.abspath(os.path.join(os.path.join(*pwd[:-1]), os.path.join(*parts))))
if 'etc' not in pwd:
options.append(os.path.abspath(os.path.join(os.path.join(*pwd), os.path.join(*parts))))
return options
def run(tokeniser):
prg = tokeniser()
if prg[0] != '/':
if prg.startswith('etc/exabgp'):
options = _make_path(prg)
else:
options = [
os.path.abspath(os.path.join('/etc/exabgp', prg)),
os.path.abspath(os.path.join(os.path.dirname(tokeniser.fname), prg)),
]
options.extend((os.path.abspath(os.path.join(p, prg)) for p in os.getenv('PATH').split(':')))
for option in options:
if os.path.exists(option):
prg = option
if not os.path.exists(prg):
raise ValueError('can not locate the the program "%s"' % prg)
# race conditions are possible, those are sanity checks not security ones ...
s = os.stat(prg)
if stat.S_ISDIR(s.st_mode):
raise ValueError('can not execute directories "%s"' % prg)
if s.st_mode & stat.S_ISUID:
raise ValueError('refusing to run setuid programs "%s"' % prg)
check = stat.S_IXOTH
if s.st_uid == os.getuid():
check |= stat.S_IXUSR
if s.st_gid == os.getgid():
check |= stat.S_IXGRP
if not check & s.st_mode:
raise ValueError('exabgp will not be able to run this program "%s"' % prg)
return [prg] + [_ for _ in tokeniser.generator]
| en | 0.82942 | # encoding: utf-8 parse_process.py Created by <NAME> on 2015-06-18. Copyright (c) 2009-2017 Exa Networks. All rights reserved. License: 3-clause BSD. (See the COPYRIGHT file) # without abspath the path is not / prefixed ! # race conditions are possible, those are sanity checks not security ones ... | 2.328371 | 2 |
extractor.py | alangadiel/whatsapp-android-extractor | 0 | 6631596 | <reponame>alangadiel/whatsapp-android-extractor
import sqlite3
import csv
import sys
import os
import codecs
from datetime import datetime
def fmt_phone(number, name):
if number is None:
return
number = number.replace(' ', '').replace('-', '').replace('+', '')
if number == '':
return
if not number.startswith('549'):
number = '549' + number
contacts_dict[number] = name
contacts_file = sys.argv[1]
db_path = sys.argv[2]
dest_path = sys.argv[3]
dest_path_contacts = dest_path+'\\contacts'
os.mkdir(dest_path_contacts)
dest_path_groups = dest_path+'\\groups'
os.mkdir(dest_path_groups)
# TODO: replace unicode chars for files
# get contact list from csv
contacts_dict = {}
with open(contacts_file, newline='') as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader, None) # skip first item.
for row in csv_reader:
name = row[0]
fmt_phone(row[34], name+" - 1")
fmt_phone(row[36], name+" - 2")
fmt_phone(row[38], name+" - 3")
# get groups from whatsapp db
groups_dict = {}
wa_db_path = db_path+"\\wa.db"
print("reading from "+wa_db_path)
with sqlite3.connect(wa_db_path) as con_wa:
cur_wa = con_wa.cursor()
groups = cur_wa.execute("""
SELECT jid, display_name
FROM wa_contacts
WHERE display_name IS NOT NULL""")
for row in groups:
id = row[0]
name = row[1]
groups_dict[id] = name
# get messages from whatsapp db
ms_db_path = db_path+"\\msgstore.db"
print("reading from "+ms_db_path)
with sqlite3.connect(ms_db_path) as con_ms:
cur_ms1 = con_ms.cursor()
chat_ids = cur_ms1.execute("""
SELECT DISTINCT key_remote_jid
FROM messages""")
for chat_id in chat_ids:
if chat_id[0].endswith("@s.whatsapp.net"): # is a contact
# get name
phone = chat_id[0].split('@')[0]
if phone in contacts_dict:
name = contacts_dict[phone]
else:
name = phone
# query messages
cur_ms2 = con_ms.cursor()
messages = cur_ms2.execute("""
SELECT data, timestamp, media_mime_type, key_from_me, media_caption
FROM messages
WHERE key_remote_jid = '"""+chat_id[0]+"""'
ORDER BY timestamp""")
# create file
file_name = dest_path_contacts+"\\"+name+".txt"
print("writing at "+file_name)
with codecs.open(file_name, "x", "utf-8-sig") as file:
for ms in messages:
res_data = ms[0]
res_timestamp = ms[1]
res_media_mime_type = ms[2]
res_key_from_me = ms[3]
res_media_caption = ms[4]
content = ''
if res_data is not None: # TODO: sent img
content = res_data
elif res_media_mime_type is not None:
content = '['+res_media_mime_type+']'
if res_media_caption is not None:
content = content + ' ' + res_media_caption
dt = datetime.utcfromtimestamp(int(res_timestamp)/1000)
dt_str = dt.strftime('%Y-%m-%d %H:%M:%S')
if res_key_from_me == 1:
separator = '>>>'
else:
separator = '<<<'
line = dt_str+' '+separator+' '+content+'\n'
file.write(line)
# TODO: groups
| import sqlite3
import csv
import sys
import os
import codecs
from datetime import datetime
def fmt_phone(number, name):
if number is None:
return
number = number.replace(' ', '').replace('-', '').replace('+', '')
if number == '':
return
if not number.startswith('549'):
number = '549' + number
contacts_dict[number] = name
contacts_file = sys.argv[1]
db_path = sys.argv[2]
dest_path = sys.argv[3]
dest_path_contacts = dest_path+'\\contacts'
os.mkdir(dest_path_contacts)
dest_path_groups = dest_path+'\\groups'
os.mkdir(dest_path_groups)
# TODO: replace unicode chars for files
# get contact list from csv
contacts_dict = {}
with open(contacts_file, newline='') as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader, None) # skip first item.
for row in csv_reader:
name = row[0]
fmt_phone(row[34], name+" - 1")
fmt_phone(row[36], name+" - 2")
fmt_phone(row[38], name+" - 3")
# get groups from whatsapp db
groups_dict = {}
wa_db_path = db_path+"\\wa.db"
print("reading from "+wa_db_path)
with sqlite3.connect(wa_db_path) as con_wa:
cur_wa = con_wa.cursor()
groups = cur_wa.execute("""
SELECT jid, display_name
FROM wa_contacts
WHERE display_name IS NOT NULL""")
for row in groups:
id = row[0]
name = row[1]
groups_dict[id] = name
# get messages from whatsapp db
ms_db_path = db_path+"\\msgstore.db"
print("reading from "+ms_db_path)
with sqlite3.connect(ms_db_path) as con_ms:
cur_ms1 = con_ms.cursor()
chat_ids = cur_ms1.execute("""
SELECT DISTINCT key_remote_jid
FROM messages""")
for chat_id in chat_ids:
if chat_id[0].endswith("@s.whatsapp.net"): # is a contact
# get name
phone = chat_id[0].split('@')[0]
if phone in contacts_dict:
name = contacts_dict[phone]
else:
name = phone
# query messages
cur_ms2 = con_ms.cursor()
messages = cur_ms2.execute("""
SELECT data, timestamp, media_mime_type, key_from_me, media_caption
FROM messages
WHERE key_remote_jid = '"""+chat_id[0]+"""'
ORDER BY timestamp""")
# create file
file_name = dest_path_contacts+"\\"+name+".txt"
print("writing at "+file_name)
with codecs.open(file_name, "x", "utf-8-sig") as file:
for ms in messages:
res_data = ms[0]
res_timestamp = ms[1]
res_media_mime_type = ms[2]
res_key_from_me = ms[3]
res_media_caption = ms[4]
content = ''
if res_data is not None: # TODO: sent img
content = res_data
elif res_media_mime_type is not None:
content = '['+res_media_mime_type+']'
if res_media_caption is not None:
content = content + ' ' + res_media_caption
dt = datetime.utcfromtimestamp(int(res_timestamp)/1000)
dt_str = dt.strftime('%Y-%m-%d %H:%M:%S')
if res_key_from_me == 1:
separator = '>>>'
else:
separator = '<<<'
line = dt_str+' '+separator+' '+content+'\n'
file.write(line)
# TODO: groups | en | 0.341667 | # TODO: replace unicode chars for files # get contact list from csv # skip first item. # get groups from whatsapp db SELECT jid, display_name FROM wa_contacts WHERE display_name IS NOT NULL # get messages from whatsapp db SELECT DISTINCT key_remote_jid FROM messages # is a contact # get name # query messages SELECT data, timestamp, media_mime_type, key_from_me, media_caption FROM messages WHERE key_remote_jid = ' ' ORDER BY timestamp # create file # TODO: sent img # TODO: groups | 3.218523 | 3 |
src/compas_rhino/ui/mouse.py | arpastrana/compas | 0 | 6631597 | from __future__ import print_function
import compas
if compas.IPY:
from Rhino.UI import MouseCallback
else:
class MouseCallback(object):
pass
__all__ = ['Mouse']
class Mouse(MouseCallback):
""""""
def __init__(self, parent=None):
super(Mouse, self).__init__()
self.parent = parent
self.x = None # x-coordinate of 2D point in the viewport
self.y = None # y-coordinate of 2D point in the viewport
self.p1 = None # start of the frustum line in world coordinates
self.p2 = None # end of the frustum line in world coordinates
def OnMouseMove(self, e):
line = e.View.ActiveViewport.ClientToWorld(e.ViewportPoint)
self.x = e.ViewportPoint.X
self.y = e.ViewportPoint.Y
self.p1 = line.From
self.p2 = line.To
e.View.Redraw()
def OnMouseDown(self, e):
pass
def OnMouseUp(self, e):
pass
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
from compas.geometry import distance_point_line
from System.Drawing import Color
import Rhino
from Rhino.Geometry import Point3d
import rhinoscriptsyntax as rs
class Inspector(Rhino.Display.DisplayConduit):
""""""
def __init__(self, points, tol=0.1):
super(Inspector, self).__init__()
self.mouse = Mouse()
self.points = points
self.tol = tol
self.dotcolor = Color.FromArgb(255, 0, 0)
self.textcolor = Color.FromArgb(0, 0, 0)
def DrawForeground(self, e):
p1 = self.mouse.p1
p2 = self.mouse.p2
for i, p0 in enumerate(self.points):
if distance_point_line(p0, (p1, p2)) < self.tol:
e.Display.DrawDot(Point3d(*p0), str(i), self.dotcolor, self.textcolor)
break
points = [[i, i, 0] for i in range(10)]
try:
inspector = Inspector(points)
inspector.mouse.Enabled = True
inspector.Enabled = True
# this interrupts the script until the user provides a string or escapes
rs.GetString(message='Do some hovering')
except Exception as e:
print(e)
finally:
inspector.mouse.Enabled = False
inspector.Enabled = False
del inspector.mouse
del inspector
| from __future__ import print_function
import compas
if compas.IPY:
from Rhino.UI import MouseCallback
else:
class MouseCallback(object):
pass
__all__ = ['Mouse']
class Mouse(MouseCallback):
""""""
def __init__(self, parent=None):
super(Mouse, self).__init__()
self.parent = parent
self.x = None # x-coordinate of 2D point in the viewport
self.y = None # y-coordinate of 2D point in the viewport
self.p1 = None # start of the frustum line in world coordinates
self.p2 = None # end of the frustum line in world coordinates
def OnMouseMove(self, e):
line = e.View.ActiveViewport.ClientToWorld(e.ViewportPoint)
self.x = e.ViewportPoint.X
self.y = e.ViewportPoint.Y
self.p1 = line.From
self.p2 = line.To
e.View.Redraw()
def OnMouseDown(self, e):
pass
def OnMouseUp(self, e):
pass
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
from compas.geometry import distance_point_line
from System.Drawing import Color
import Rhino
from Rhino.Geometry import Point3d
import rhinoscriptsyntax as rs
class Inspector(Rhino.Display.DisplayConduit):
""""""
def __init__(self, points, tol=0.1):
super(Inspector, self).__init__()
self.mouse = Mouse()
self.points = points
self.tol = tol
self.dotcolor = Color.FromArgb(255, 0, 0)
self.textcolor = Color.FromArgb(0, 0, 0)
def DrawForeground(self, e):
p1 = self.mouse.p1
p2 = self.mouse.p2
for i, p0 in enumerate(self.points):
if distance_point_line(p0, (p1, p2)) < self.tol:
e.Display.DrawDot(Point3d(*p0), str(i), self.dotcolor, self.textcolor)
break
points = [[i, i, 0] for i in range(10)]
try:
inspector = Inspector(points)
inspector.mouse.Enabled = True
inspector.Enabled = True
# this interrupts the script until the user provides a string or escapes
rs.GetString(message='Do some hovering')
except Exception as e:
print(e)
finally:
inspector.mouse.Enabled = False
inspector.Enabled = False
del inspector.mouse
del inspector
| en | 0.57007 | # x-coordinate of 2D point in the viewport # y-coordinate of 2D point in the viewport # start of the frustum line in world coordinates # end of the frustum line in world coordinates # ============================================================================== # Main # ============================================================================== # this interrupts the script until the user provides a string or escapes | 2.563555 | 3 |
bark_ml/tests/py_bark_behavior_model_tests.py | bark-simulator/rl | 58 | 6631598 | # Copyright (c) 2020 fortiss GmbH
#
# Authors: <NAME>
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import unittest
import numpy as np
import gym
# BARK
from bark.runtime.commons.parameters import ParameterServer
# BARK-ML
from bark_ml.library_wrappers.lib_tf_agents.agents.sac_agent import BehaviorSACAgent
from bark_ml.library_wrappers.lib_tf_agents.agents.ppo_agent import BehaviorPPOAgent
from bark_ml.environments.blueprints import ContinuousMergingBlueprint
from bark_ml.environments.single_agent_runtime import SingleAgentRuntime
from bark_ml.library_wrappers.lib_tf_agents.agents import BehaviorGraphSACAgent
from bark_ml.observers.graph_observer import GraphObserver
import bark_ml.environments.gym # pylint: disable=unused-import
class PyBarkBehaviorModelTests(unittest.TestCase):
def test_sac_agent(self):
params = ParameterServer()
env = gym.make("highway-v0")
sac_agent = BehaviorSACAgent(environment=env, params=params)
ppo_agent = BehaviorPPOAgent(environment=env, params=params)
behaviors = [ppo_agent, sac_agent]
for ml_agent in behaviors:
env.ml_behavior = ml_agent
env.reset()
eval_id = env._scenario._eval_agent_ids[0]
cloned_world = env._world.Copy()
self.assertEqual(env._world.agents[eval_id].behavior_model, ml_agent)
for _ in range(0, 5):
env._world.Step(0.2)
self.assertEqual(cloned_world.agents[eval_id].behavior_model, ml_agent)
for _ in range(0, 5):
cloned_world.Step(0.2)
for cloned_agent, agent in zip(env._world.agents.values(),
cloned_world.agents.values()):
# NOTE: should be the same as mean is taken from the agents
np.testing.assert_array_equal(cloned_agent.state, agent.state)
def test_sac_graph_agent(self):
params = ParameterServer()
bp = ContinuousMergingBlueprint(params,
num_scenarios=2500,
random_seed=0)
observer = GraphObserver(params=params)
env = SingleAgentRuntime(
blueprint=bp,
observer=observer,
render=False)
sac_agent = BehaviorGraphSACAgent(environment=env,
observer=observer,
params=params)
env.ml_behavior = sac_agent
env.reset()
eval_id = env._scenario._eval_agent_ids[0]
self.assertEqual(env._world.agents[eval_id].behavior_model, sac_agent)
for _ in range(0, 5):
env._world.Step(0.2)
if __name__ == '__main__':
unittest.main() | # Copyright (c) 2020 fortiss GmbH
#
# Authors: <NAME>
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import unittest
import numpy as np
import gym
# BARK
from bark.runtime.commons.parameters import ParameterServer
# BARK-ML
from bark_ml.library_wrappers.lib_tf_agents.agents.sac_agent import BehaviorSACAgent
from bark_ml.library_wrappers.lib_tf_agents.agents.ppo_agent import BehaviorPPOAgent
from bark_ml.environments.blueprints import ContinuousMergingBlueprint
from bark_ml.environments.single_agent_runtime import SingleAgentRuntime
from bark_ml.library_wrappers.lib_tf_agents.agents import BehaviorGraphSACAgent
from bark_ml.observers.graph_observer import GraphObserver
import bark_ml.environments.gym # pylint: disable=unused-import
class PyBarkBehaviorModelTests(unittest.TestCase):
def test_sac_agent(self):
params = ParameterServer()
env = gym.make("highway-v0")
sac_agent = BehaviorSACAgent(environment=env, params=params)
ppo_agent = BehaviorPPOAgent(environment=env, params=params)
behaviors = [ppo_agent, sac_agent]
for ml_agent in behaviors:
env.ml_behavior = ml_agent
env.reset()
eval_id = env._scenario._eval_agent_ids[0]
cloned_world = env._world.Copy()
self.assertEqual(env._world.agents[eval_id].behavior_model, ml_agent)
for _ in range(0, 5):
env._world.Step(0.2)
self.assertEqual(cloned_world.agents[eval_id].behavior_model, ml_agent)
for _ in range(0, 5):
cloned_world.Step(0.2)
for cloned_agent, agent in zip(env._world.agents.values(),
cloned_world.agents.values()):
# NOTE: should be the same as mean is taken from the agents
np.testing.assert_array_equal(cloned_agent.state, agent.state)
def test_sac_graph_agent(self):
params = ParameterServer()
bp = ContinuousMergingBlueprint(params,
num_scenarios=2500,
random_seed=0)
observer = GraphObserver(params=params)
env = SingleAgentRuntime(
blueprint=bp,
observer=observer,
render=False)
sac_agent = BehaviorGraphSACAgent(environment=env,
observer=observer,
params=params)
env.ml_behavior = sac_agent
env.reset()
eval_id = env._scenario._eval_agent_ids[0]
self.assertEqual(env._world.agents[eval_id].behavior_model, sac_agent)
for _ in range(0, 5):
env._world.Step(0.2)
if __name__ == '__main__':
unittest.main() | en | 0.855509 | # Copyright (c) 2020 fortiss GmbH # # Authors: <NAME> # # This work is licensed under the terms of the MIT license. # For a copy, see <https://opensource.org/licenses/MIT>. # BARK # BARK-ML # pylint: disable=unused-import # NOTE: should be the same as mean is taken from the agents | 1.789001 | 2 |
pwndbg/symbol.py | CrackerCat/pwndbg_linux_kernel | 1 | 6631599 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Looking up addresses for function names / symbols, and
vice-versa.
Uses IDA when available if there isn't sufficient symbol
information available.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
import shutil
import tempfile
import elftools.common.exceptions
import elftools.elf.constants
import elftools.elf.elffile
import elftools.elf.segments
import gdb
import six
import pwndbg.arch
import pwndbg.elf
import pwndbg.events
import pwndbg.file
import pwndbg.ida
import pwndbg.memoize
import pwndbg.memory
import pwndbg.qemu
import pwndbg.remote
import pwndbg.stack
import pwndbg.vmmap
def get_directory():
"""
Retrieve the debug file directory path.
The debug file directory path ('show debug-file-directory') is a comma-
separated list of directories which GDB will look in to find the binaries
currently loaded.
"""
result = gdb.execute('show debug-file-directory', to_string=True, from_tty=False)
expr = r'The directory where separate debug symbols are searched for is "(.*)".\n'
match = re.search(expr, result)
if match:
return match.group(1)
return ''
def set_directory(d):
gdb.execute('set debug-file-directory %s' % d, to_string=True, from_tty=False)
def add_directory(d):
current = get_directory()
if current:
set_directory('%s:%s' % (current, d))
else:
set_directory(d)
remote_files = {}
remote_files_dir = None
@pwndbg.events.exit
def reset_remote_files():
global remote_files
global remote_files_dir
remote_files = {}
if remote_files_dir is not None:
shutil.rmtree(remote_files_dir)
remote_files_dir = None
@pwndbg.events.new_objfile
def autofetch():
"""
"""
global remote_files_dir
if not pwndbg.remote.is_remote():
return
if pwndbg.qemu.is_qemu_usermode():
return
if pwndbg.android.is_android():
return
if not remote_files_dir:
remote_files_dir = tempfile.mkdtemp()
add_directory(remote_files_dir)
searchpath = get_directory()
for mapping in pwndbg.vmmap.get():
objfile = mapping.objfile
# Don't attempt to download things like '[stack]' and '[heap]'
if not objfile.startswith('/'):
continue
# Don't re-download things that we have already downloaded
if not objfile or objfile in remote_files:
continue
msg = "Downloading %r from the remote server" % objfile
print(msg, end='')
try:
data = pwndbg.file.get(objfile)
print('\r' + msg + ': OK')
except OSError:
# The file could not be downloaded :(
print('\r' + msg + ': Failed')
return
filename = os.path.basename(objfile)
local_path = os.path.join(remote_files_dir, filename)
with open(local_path, 'wb+') as f:
f.write(data)
remote_files[objfile] = local_path
base = None
for mapping in pwndbg.vmmap.get():
if mapping.objfile != objfile:
continue
if base is None or mapping.vaddr < base.vaddr:
base = mapping
if not base:
continue
base = base.vaddr
try:
elf = elftools.elf.elffile.ELFFile(open(local_path, 'rb'))
except elftools.common.exceptions.ELFError:
continue
gdb_command = ['add-symbol-file', local_path, hex(int(base))]
for section in elf.iter_sections():
name = section.name #.decode('latin-1')
section = section.header
if not section.sh_flags & elftools.elf.constants.SH_FLAGS.SHF_ALLOC:
continue
gdb_command += ['-s', name, hex(int(base + section.sh_addr))]
print(' '.join(gdb_command))
# gdb.execute(' '.join(gdb_command), from_tty=False, to_string=True)
@pwndbg.memoize.reset_on_objfile
def get(address, gdb_only=False):
"""
Retrieve the textual name for a symbol
"""
# Fast path
if address < pwndbg.memory.MMAP_MIN_ADDR or address >= ((1 << 64)-1):
return ''
# Don't look up stack addresses
if pwndbg.stack.find(address):
return ''
# This sucks, but there's not a GDB API for this.
result = gdb.execute('info symbol %#x' % int(address), to_string=True, from_tty=False)
if not gdb_only and result.startswith('No symbol'):
address = int(address)
exe = pwndbg.elf.exe()
if exe:
exe_map = pwndbg.vmmap.find(exe.address)
if exe_map and address in exe_map:
res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)
return res or ''
# Expected format looks like this:
# main in section .text of /bin/bash
# main + 3 in section .text of /bin/bash
# system + 1 in section .text of /lib/x86_64-linux-gnu/libc.so.6
# No symbol matches system-1.
a, b, c, _ = result.split(None, 3)
if b == '+':
return "%s+%s" % (a, c)
if b == 'in':
return a
return ''
@pwndbg.memoize.reset_on_objfile
def address(symbol):
if isinstance(symbol, six.integer_types):
return symbol
try:
return int(symbol, 0)
except:
pass
try:
symbol_obj = gdb.lookup_symbol(symbol)[0]
if symbol_obj:
return int(symbol_obj.value().address)
except Exception:
pass
try:
result = gdb.execute('info address %s' % symbol, to_string=True, from_tty=False)
address = int(re.search('0x[0-9a-fA-F]+', result).group(), 0)
# The address found should lie in one of the memory maps
# There are cases when GDB shows offsets e.g.:
# pwndbg> info address tcache
# Symbol "tcache" is a thread-local variable at offset 0x40
# in the thread-local storage for `/lib/x86_64-linux-gnu/libc.so.6'.
if not pwndbg.vmmap.find(address):
return None
return address
except gdb.error:
return None
try:
address = pwndbg.ida.LocByName(symbol)
if address:
return address
except Exception:
pass
@pwndbg.events.stop
@pwndbg.memoize.reset_on_start
def add_main_exe_to_symbols():
if not pwndbg.remote.is_remote():
return
if pwndbg.android.is_android():
return
exe = pwndbg.elf.exe()
if not exe:
return
addr = exe.address
if not addr:
return
addr = int(addr)
mmap = pwndbg.vmmap.find(addr)
if not mmap:
return
path = mmap.objfile
if path and (pwndbg.arch.endian == pwndbg.arch.native_endian):
try:
gdb.execute('add-symbol-file %s %#x' % (path, addr), from_tty=False, to_string=True)
except gdb.error:
pass
if '/usr/lib/debug' not in get_directory():
set_directory(get_directory() + ':/usr/lib/debug')
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Looking up addresses for function names / symbols, and
vice-versa.
Uses IDA when available if there isn't sufficient symbol
information available.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
import shutil
import tempfile
import elftools.common.exceptions
import elftools.elf.constants
import elftools.elf.elffile
import elftools.elf.segments
import gdb
import six
import pwndbg.arch
import pwndbg.elf
import pwndbg.events
import pwndbg.file
import pwndbg.ida
import pwndbg.memoize
import pwndbg.memory
import pwndbg.qemu
import pwndbg.remote
import pwndbg.stack
import pwndbg.vmmap
def get_directory():
"""
Retrieve the debug file directory path.
The debug file directory path ('show debug-file-directory') is a comma-
separated list of directories which GDB will look in to find the binaries
currently loaded.
"""
result = gdb.execute('show debug-file-directory', to_string=True, from_tty=False)
expr = r'The directory where separate debug symbols are searched for is "(.*)".\n'
match = re.search(expr, result)
if match:
return match.group(1)
return ''
def set_directory(d):
gdb.execute('set debug-file-directory %s' % d, to_string=True, from_tty=False)
def add_directory(d):
current = get_directory()
if current:
set_directory('%s:%s' % (current, d))
else:
set_directory(d)
remote_files = {}
remote_files_dir = None
@pwndbg.events.exit
def reset_remote_files():
global remote_files
global remote_files_dir
remote_files = {}
if remote_files_dir is not None:
shutil.rmtree(remote_files_dir)
remote_files_dir = None
@pwndbg.events.new_objfile
def autofetch():
"""
"""
global remote_files_dir
if not pwndbg.remote.is_remote():
return
if pwndbg.qemu.is_qemu_usermode():
return
if pwndbg.android.is_android():
return
if not remote_files_dir:
remote_files_dir = tempfile.mkdtemp()
add_directory(remote_files_dir)
searchpath = get_directory()
for mapping in pwndbg.vmmap.get():
objfile = mapping.objfile
# Don't attempt to download things like '[stack]' and '[heap]'
if not objfile.startswith('/'):
continue
# Don't re-download things that we have already downloaded
if not objfile or objfile in remote_files:
continue
msg = "Downloading %r from the remote server" % objfile
print(msg, end='')
try:
data = pwndbg.file.get(objfile)
print('\r' + msg + ': OK')
except OSError:
# The file could not be downloaded :(
print('\r' + msg + ': Failed')
return
filename = os.path.basename(objfile)
local_path = os.path.join(remote_files_dir, filename)
with open(local_path, 'wb+') as f:
f.write(data)
remote_files[objfile] = local_path
base = None
for mapping in pwndbg.vmmap.get():
if mapping.objfile != objfile:
continue
if base is None or mapping.vaddr < base.vaddr:
base = mapping
if not base:
continue
base = base.vaddr
try:
elf = elftools.elf.elffile.ELFFile(open(local_path, 'rb'))
except elftools.common.exceptions.ELFError:
continue
gdb_command = ['add-symbol-file', local_path, hex(int(base))]
for section in elf.iter_sections():
name = section.name #.decode('latin-1')
section = section.header
if not section.sh_flags & elftools.elf.constants.SH_FLAGS.SHF_ALLOC:
continue
gdb_command += ['-s', name, hex(int(base + section.sh_addr))]
print(' '.join(gdb_command))
# gdb.execute(' '.join(gdb_command), from_tty=False, to_string=True)
@pwndbg.memoize.reset_on_objfile
def get(address, gdb_only=False):
"""
Retrieve the textual name for a symbol
"""
# Fast path
if address < pwndbg.memory.MMAP_MIN_ADDR or address >= ((1 << 64)-1):
return ''
# Don't look up stack addresses
if pwndbg.stack.find(address):
return ''
# This sucks, but there's not a GDB API for this.
result = gdb.execute('info symbol %#x' % int(address), to_string=True, from_tty=False)
if not gdb_only and result.startswith('No symbol'):
address = int(address)
exe = pwndbg.elf.exe()
if exe:
exe_map = pwndbg.vmmap.find(exe.address)
if exe_map and address in exe_map:
res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)
return res or ''
# Expected format looks like this:
# main in section .text of /bin/bash
# main + 3 in section .text of /bin/bash
# system + 1 in section .text of /lib/x86_64-linux-gnu/libc.so.6
# No symbol matches system-1.
a, b, c, _ = result.split(None, 3)
if b == '+':
return "%s+%s" % (a, c)
if b == 'in':
return a
return ''
@pwndbg.memoize.reset_on_objfile
def address(symbol):
if isinstance(symbol, six.integer_types):
return symbol
try:
return int(symbol, 0)
except:
pass
try:
symbol_obj = gdb.lookup_symbol(symbol)[0]
if symbol_obj:
return int(symbol_obj.value().address)
except Exception:
pass
try:
result = gdb.execute('info address %s' % symbol, to_string=True, from_tty=False)
address = int(re.search('0x[0-9a-fA-F]+', result).group(), 0)
# The address found should lie in one of the memory maps
# There are cases when GDB shows offsets e.g.:
# pwndbg> info address tcache
# Symbol "tcache" is a thread-local variable at offset 0x40
# in the thread-local storage for `/lib/x86_64-linux-gnu/libc.so.6'.
if not pwndbg.vmmap.find(address):
return None
return address
except gdb.error:
return None
try:
address = pwndbg.ida.LocByName(symbol)
if address:
return address
except Exception:
pass
@pwndbg.events.stop
@pwndbg.memoize.reset_on_start
def add_main_exe_to_symbols():
if not pwndbg.remote.is_remote():
return
if pwndbg.android.is_android():
return
exe = pwndbg.elf.exe()
if not exe:
return
addr = exe.address
if not addr:
return
addr = int(addr)
mmap = pwndbg.vmmap.find(addr)
if not mmap:
return
path = mmap.objfile
if path and (pwndbg.arch.endian == pwndbg.arch.native_endian):
try:
gdb.execute('add-symbol-file %s %#x' % (path, addr), from_tty=False, to_string=True)
except gdb.error:
pass
if '/usr/lib/debug' not in get_directory():
set_directory(get_directory() + ':/usr/lib/debug')
| en | 0.750832 | #!/usr/bin/env python # -*- coding: utf-8 -*- Looking up addresses for function names / symbols, and vice-versa. Uses IDA when available if there isn't sufficient symbol information available. Retrieve the debug file directory path. The debug file directory path ('show debug-file-directory') is a comma- separated list of directories which GDB will look in to find the binaries currently loaded. # Don't attempt to download things like '[stack]' and '[heap]' # Don't re-download things that we have already downloaded # The file could not be downloaded :( #.decode('latin-1') # gdb.execute(' '.join(gdb_command), from_tty=False, to_string=True) Retrieve the textual name for a symbol # Fast path # Don't look up stack addresses # This sucks, but there's not a GDB API for this. #x' % int(address), to_string=True, from_tty=False) # Expected format looks like this: # main in section .text of /bin/bash # main + 3 in section .text of /bin/bash # system + 1 in section .text of /lib/x86_64-linux-gnu/libc.so.6 # No symbol matches system-1. # The address found should lie in one of the memory maps # There are cases when GDB shows offsets e.g.: # pwndbg> info address tcache # Symbol "tcache" is a thread-local variable at offset 0x40 # in the thread-local storage for `/lib/x86_64-linux-gnu/libc.so.6'. #x' % (path, addr), from_tty=False, to_string=True) | 2.39905 | 2 |
surf/train_data.py | githmy/vnpymy | 1 | 6631600 | <reponame>githmy/vnpymy
# coding: utf-8
from surf.script_tab import keytab
import os, json, time, re, codecs, glob
from surf.surf_tool import regex2pairs
import matplotlib.pyplot as plt
import matplotlib as mpl
import logging.handlers
import pandas as pd
import itertools
import numpy as np
import lightgbm as lgb
class TrainFunc(object):
def __init__(self):
self.funcmap = {
"lgboost": self.lgboost,
"enforce_tab": self.enforce_tabf,
"enforce_net": self.enforce_netf,
# "tcn": None,
# "tabnet": None,
}
def enforce_tabf(self, dataobjs, params, outhead, projectpath):
pass
return loss_result
def enforce_netf(self, dataobjs, params, outhead, projectpath):
pass
return loss_result
def lgboost(self, dataobjs, params, outhead, projectpath):
train_X = []
train_y = []
val_X = []
val_y = []
collist = dataobjs[0][0].columns
colchar = [i1 for i1 in collist if not re.search("^label_", i1, re.M)]
collabel = [i1 for i1 in collist if re.search("^label_", i1, re.M)]
for ttrain, tval in dataobjs:
ttrain.dropna(axis=0, how='any', thresh=None, subset=None, inplace=True)
tval.dropna(axis=0, how='any', thresh=None, subset=None, inplace=True)
train_X.append(ttrain[colchar])
train_y.append(ttrain[collabel])
val_X.append(tval[colchar])
val_y.append(tval[collabel])
train_X = pd.concat(train_X, axis=0)
train_y = pd.concat(train_y, axis=0)
val_X = pd.concat(val_X, axis=0)
val_y = pd.concat(val_y, axis=0)
loss_result = {}
for id1, i1 in enumerate(collabel):
print("training:", i1)
evals_result = {}
lgtrain = lgb.Dataset(train_X, label=train_y.iloc[:, id1])
lgval = lgb.Dataset(val_X, label=val_y.iloc[:, id1])
model = lgb.train(params, lgtrain, 1000, valid_sets=lgval, early_stopping_rounds=100,
verbose_eval=100, evals_result=evals_result)
fulpath = os.path.join(projectpath, "{}lgboost_{}.txt".format(outhead, i1))
print("saving ", i1)
print(fulpath)
model.save_model(fulpath)
# fig2 = plt.figure(figsize=(20, 20))
# ax = fig2.subplots()
# lgb.plot_tree(model, tree_index=1, ax=ax)
# plt.show()
# lgb.create_tree_digraph(model, tree_index=1)
# print('画出训练结果...')
# # lgb.plot_metric(evals_result, metric='auc')
# lgb.plot_metric(evals_result, metric='rmse')
# plt.show()
# print('画特征重要性排序...')
# ax = lgb.plot_importance(model, max_num_features=20)
# # max_features表示最多展示出前10个重要性特征,可以自行设置
# plt.show()
loss_result[i1] = evals_result['valid_0']["rmse"][-1]
return loss_result
def __call__(self, oriinfiles, commands, outhead, projectpath):
# 1. 只有两个文件
print(oriinfiles, commands, outhead, projectpath)
pdobjlist, matchstrlist = regex2pairs(oriinfiles, projectpath)
outfilelist = [[i1[0] + i1[1][0] + i1[2], i1[0] + i1[1][1] + i1[2]] for i1 in matchstrlist]
print(outfilelist)
collist = pdobjlist[0][0].columns
collabel = [i1 for i1 in collist if re.search("^label_", i1, re.M)]
outjson = {i1: [] for i1 in collabel}
outjson["model"] = []
for command in commands:
tkey = list(command.keys())[0]
toutjson = self.funcmap[tkey](pdobjlist, command[tkey], outhead, projectpath)
outjson["model"].append(tkey)
[outjson[ik2].append(iv2) for ik2, iv2 in toutjson.items()]
pdobjout = pd.DataFrame(outjson)
pdobjout.set_index("model", inplace=True)
return pdobjout
class PredictFunc(object):
def __init__(self):
self.funcmap = {
"lgboost": self.lgboost,
# "tcn": None,
# "tabnet": None,
}
def lgboost(self, dataobj, modelhead, labelname, projectpath):
outpdlist = []
for i1 in labelname:
# 模型加载
modelpath = os.path.join(projectpath, "{}lgboost_{}.txt".format(modelhead, i1))
try:
model = lgb.Booster(model_file=modelpath)
# 如果在训练期间启用了早期停止,可以通过best_iteration方式从最佳迭代中获得预测
pred_pd = model.predict(dataobj, num_iteration=model.best_iteration)
outpdlist.append(pred_pd)
except Exception as e:
outpdlist.append(np.zeros(len(dataobj)))
print(e)
return outpdlist
def __call__(self, oriinfiles, modelhead, commands, outhead, labelfile, projectpath):
print(oriinfiles, commands, outhead, labelfile, projectpath)
anylabel = glob.glob(os.path.join(projectpath, labelfile))[0]
pdobj = pd.read_csv(os.path.join(projectpath, anylabel), header=0, encoding="utf8")
pdobj.set_index("date", inplace=True)
labelname = [i1 for i1 in pdobj.columns if re.search("^label_", i1, re.M)]
labellenth = len(labelname)
infiles = [glob.glob(os.path.join(projectpath, i2)) for i2 in oriinfiles] # 正则列出
infiles = list(set(itertools.chain(*infiles))) # 展开去重
for infile in infiles:
# 为了便于集成学习,不同模型的同一类型存储到一个文件
pdobj = pd.read_csv(infile, header=0, encoding="utf8")
pdobj.set_index("date", inplace=True)
pdobj = pdobj[[i3 for i3 in pdobj.columns if not re.search("^label_", i3, re.M)]]
tpdlist = [[] for i2 in range(labellenth)]
comkeys = [list(i2.keys())[0] for i2 in commands]
for tkey in comkeys:
outpdlist = self.funcmap[tkey](pdobj, modelhead, labelname, projectpath)
[tpdlist[i3].append(outpdlist[i3]) for i3 in range(labellenth)]
for id2, lbname in enumerate(labelname):
tjson = {"{}_{}".format(lbname, tkey): tpdlist[id2][id3] for id3, tkey in enumerate(comkeys)}
tmpoutpd = pd.DataFrame(tjson, index=pdobj.index)
(filepath, tfilename) = os.path.split(infile)
fname = os.path.join(filepath, "{}{}_{}".format(outhead, lbname, tfilename))
tmpoutpd.to_csv(fname, index=True, header=True, encoding="utf-8")
return None
train_func = {
"训练拟合": TrainFunc(),
}
predict_func = {
"数据预测": PredictFunc(),
}
| # coding: utf-8
from surf.script_tab import keytab
import os, json, time, re, codecs, glob
from surf.surf_tool import regex2pairs
import matplotlib.pyplot as plt
import matplotlib as mpl
import logging.handlers
import pandas as pd
import itertools
import numpy as np
import lightgbm as lgb
class TrainFunc(object):
def __init__(self):
self.funcmap = {
"lgboost": self.lgboost,
"enforce_tab": self.enforce_tabf,
"enforce_net": self.enforce_netf,
# "tcn": None,
# "tabnet": None,
}
def enforce_tabf(self, dataobjs, params, outhead, projectpath):
pass
return loss_result
def enforce_netf(self, dataobjs, params, outhead, projectpath):
pass
return loss_result
def lgboost(self, dataobjs, params, outhead, projectpath):
train_X = []
train_y = []
val_X = []
val_y = []
collist = dataobjs[0][0].columns
colchar = [i1 for i1 in collist if not re.search("^label_", i1, re.M)]
collabel = [i1 for i1 in collist if re.search("^label_", i1, re.M)]
for ttrain, tval in dataobjs:
ttrain.dropna(axis=0, how='any', thresh=None, subset=None, inplace=True)
tval.dropna(axis=0, how='any', thresh=None, subset=None, inplace=True)
train_X.append(ttrain[colchar])
train_y.append(ttrain[collabel])
val_X.append(tval[colchar])
val_y.append(tval[collabel])
train_X = pd.concat(train_X, axis=0)
train_y = pd.concat(train_y, axis=0)
val_X = pd.concat(val_X, axis=0)
val_y = pd.concat(val_y, axis=0)
loss_result = {}
for id1, i1 in enumerate(collabel):
print("training:", i1)
evals_result = {}
lgtrain = lgb.Dataset(train_X, label=train_y.iloc[:, id1])
lgval = lgb.Dataset(val_X, label=val_y.iloc[:, id1])
model = lgb.train(params, lgtrain, 1000, valid_sets=lgval, early_stopping_rounds=100,
verbose_eval=100, evals_result=evals_result)
fulpath = os.path.join(projectpath, "{}lgboost_{}.txt".format(outhead, i1))
print("saving ", i1)
print(fulpath)
model.save_model(fulpath)
# fig2 = plt.figure(figsize=(20, 20))
# ax = fig2.subplots()
# lgb.plot_tree(model, tree_index=1, ax=ax)
# plt.show()
# lgb.create_tree_digraph(model, tree_index=1)
# print('画出训练结果...')
# # lgb.plot_metric(evals_result, metric='auc')
# lgb.plot_metric(evals_result, metric='rmse')
# plt.show()
# print('画特征重要性排序...')
# ax = lgb.plot_importance(model, max_num_features=20)
# # max_features表示最多展示出前10个重要性特征,可以自行设置
# plt.show()
loss_result[i1] = evals_result['valid_0']["rmse"][-1]
return loss_result
def __call__(self, oriinfiles, commands, outhead, projectpath):
# 1. 只有两个文件
print(oriinfiles, commands, outhead, projectpath)
pdobjlist, matchstrlist = regex2pairs(oriinfiles, projectpath)
outfilelist = [[i1[0] + i1[1][0] + i1[2], i1[0] + i1[1][1] + i1[2]] for i1 in matchstrlist]
print(outfilelist)
collist = pdobjlist[0][0].columns
collabel = [i1 for i1 in collist if re.search("^label_", i1, re.M)]
outjson = {i1: [] for i1 in collabel}
outjson["model"] = []
for command in commands:
tkey = list(command.keys())[0]
toutjson = self.funcmap[tkey](pdobjlist, command[tkey], outhead, projectpath)
outjson["model"].append(tkey)
[outjson[ik2].append(iv2) for ik2, iv2 in toutjson.items()]
pdobjout = pd.DataFrame(outjson)
pdobjout.set_index("model", inplace=True)
return pdobjout
class PredictFunc(object):
def __init__(self):
self.funcmap = {
"lgboost": self.lgboost,
# "tcn": None,
# "tabnet": None,
}
def lgboost(self, dataobj, modelhead, labelname, projectpath):
outpdlist = []
for i1 in labelname:
# 模型加载
modelpath = os.path.join(projectpath, "{}lgboost_{}.txt".format(modelhead, i1))
try:
model = lgb.Booster(model_file=modelpath)
# 如果在训练期间启用了早期停止,可以通过best_iteration方式从最佳迭代中获得预测
pred_pd = model.predict(dataobj, num_iteration=model.best_iteration)
outpdlist.append(pred_pd)
except Exception as e:
outpdlist.append(np.zeros(len(dataobj)))
print(e)
return outpdlist
def __call__(self, oriinfiles, modelhead, commands, outhead, labelfile, projectpath):
print(oriinfiles, commands, outhead, labelfile, projectpath)
anylabel = glob.glob(os.path.join(projectpath, labelfile))[0]
pdobj = pd.read_csv(os.path.join(projectpath, anylabel), header=0, encoding="utf8")
pdobj.set_index("date", inplace=True)
labelname = [i1 for i1 in pdobj.columns if re.search("^label_", i1, re.M)]
labellenth = len(labelname)
infiles = [glob.glob(os.path.join(projectpath, i2)) for i2 in oriinfiles] # 正则列出
infiles = list(set(itertools.chain(*infiles))) # 展开去重
for infile in infiles:
# 为了便于集成学习,不同模型的同一类型存储到一个文件
pdobj = pd.read_csv(infile, header=0, encoding="utf8")
pdobj.set_index("date", inplace=True)
pdobj = pdobj[[i3 for i3 in pdobj.columns if not re.search("^label_", i3, re.M)]]
tpdlist = [[] for i2 in range(labellenth)]
comkeys = [list(i2.keys())[0] for i2 in commands]
for tkey in comkeys:
outpdlist = self.funcmap[tkey](pdobj, modelhead, labelname, projectpath)
[tpdlist[i3].append(outpdlist[i3]) for i3 in range(labellenth)]
for id2, lbname in enumerate(labelname):
tjson = {"{}_{}".format(lbname, tkey): tpdlist[id2][id3] for id3, tkey in enumerate(comkeys)}
tmpoutpd = pd.DataFrame(tjson, index=pdobj.index)
(filepath, tfilename) = os.path.split(infile)
fname = os.path.join(filepath, "{}{}_{}".format(outhead, lbname, tfilename))
tmpoutpd.to_csv(fname, index=True, header=True, encoding="utf-8")
return None
train_func = {
"训练拟合": TrainFunc(),
}
predict_func = {
"数据预测": PredictFunc(),
} | zh | 0.310279 | # coding: utf-8 # "tcn": None, # "tabnet": None, # fig2 = plt.figure(figsize=(20, 20)) # ax = fig2.subplots() # lgb.plot_tree(model, tree_index=1, ax=ax) # plt.show() # lgb.create_tree_digraph(model, tree_index=1) # print('画出训练结果...') # # lgb.plot_metric(evals_result, metric='auc') # lgb.plot_metric(evals_result, metric='rmse') # plt.show() # print('画特征重要性排序...') # ax = lgb.plot_importance(model, max_num_features=20) # # max_features表示最多展示出前10个重要性特征,可以自行设置 # plt.show() # 1. 只有两个文件 # "tcn": None, # "tabnet": None, # 模型加载 # 如果在训练期间启用了早期停止,可以通过best_iteration方式从最佳迭代中获得预测 # 正则列出 # 展开去重 # 为了便于集成学习,不同模型的同一类型存储到一个文件 | 2.250041 | 2 |
shapenet.py | roatienza/pc2pix | 12 | 6631601 | import json
import logging
import os.path as osp
from queue import Empty, Queue
from threading import Thread, current_thread
import numpy as np
import functools
from config import SHAPENET_IM
from config import SHAPENET_PC
from loader import read_camera, read_depth, read_im, read_quat, read_vol, read_view, read_gray, read_view_angle
from in_out import load_ply
from PIL import Image
def get_split(split_js='data/splits.json'):
dir_path = osp.dirname(osp.realpath(__file__))
with open(osp.join(dir_path, split_js), 'r') as f:
js = json.load(f)
return js
def nn(x, y):
return x[0] + x[1] + x[2] - y[0] - y[1] - y[2]
class ShapeNet(object):
def __init__(self,
im_dir=SHAPENET_IM,
split_file='data/splits.json',
vox_dir=None,
pc_dir=SHAPENET_PC,
shape_ids=None,
num_renders=20,
rng_seed=0):
self.vox_dir = vox_dir
self.im_dir = im_dir
self.pc_dir = pc_dir
self.split_file = split_file
self.splits_all = get_split(split_file)
self.shape_ids = (self.splits_all.keys()
if shape_ids is None else shape_ids)
self.splits = {k: self.splits_all[k] for k in self.shape_ids}
self.shape_cls = [
self.splits[x]['name'].split(',')[0] for x in self.shape_ids
]
self.rng = rng_seed
self.num_renders = num_renders
self.load_func = {
'im': self.get_im,
'im_128': self.get_im_128,
'depth': self.get_depth,
'K': self.get_K,
'R': self.get_R,
'quat': self.get_quat,
'vol': self.get_vol,
'shape_id': self.get_sid,
'model_id': self.get_mid,
'view_idx': self.get_view_idx,
'pc' : self.get_pc,
'view' : self.get_view,
'elev' : self.get_elev,
'azim' : self.get_azim,
'gray' : self.get_gray,
'gray_128' : self.get_gray_128
}
self.all_items = self.load_func.keys()
self.logger = logging.getLogger('mview3d.' + __name__)
np.random.seed(self.rng)
self.cmpnn = functools.cmp_to_key(nn)
def get_mids(self, sid):
return self.splits[sid]
def get_smids(self, split):
smids = []
for k, v in self.splits.items():
smids.extend([(k, m) for m in v[split]])
smids = np.random.permutation(smids)
return smids
def get_sid(self, sid, mid, idx=None):
return np.array([sid])
def get_view_idx(self, sid, mid, idx):
return idx
def get_mid(self, sid, mid, idx=None):
return np.array([mid])
def get_K(self, sid, mid, idx):
rand_idx = idx
cams = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, 'camera_{:d}.mat'.format(ix))
cams.append(read_camera(f))
camK = np.stack([c[0] for c in cams], axis=0)
return camK
def get_R(self, sid, mid, idx):
rand_idx = idx
cams = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, 'camera_{:d}.mat'.format(ix))
cams.append(read_camera(f))
camR = np.stack([c[1] for c in cams], axis=0)
return camR
def get_quat(self, sid, mid, idx):
rand_idx = idx
cams = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, 'camera_{:d}.mat'.format(ix))
cams.append(read_quat(f))
camq = np.stack(cams, axis=0)
return camq
def get_depth(self, sid, mid, idx):
rand_idx = idx
depths = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, 'depth_{:d}.png'.format(ix))
depths.append(read_depth(f))
return np.stack(depths, axis=0)
def get_im(self, sid, mid, idx):
rand_idx = idx
ims = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, 'render_{:d}.png'.format(ix))
ims.append(read_im(f))
return np.stack(ims, axis=0)
def get_im_128(self, sid, mid, idx):
rand_idx = idx
ims = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, 'render_{:d}_128.png'.format(ix))
ims.append(read_im(f))
return np.stack(ims, axis=0)
def get_gray(self, sid, mid, idx):
rand_idx = idx
ims = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, 'render_{:d}_gray.png'.format(ix))
ims.append(read_gray(f))
return np.stack(ims, axis=0)
def get_gray_128(self, sid, mid, idx):
rand_idx = idx
ims = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, 'render_{:d}_gray_128.png'.format(ix))
ims.append(read_gray(f))
return np.stack(ims, axis=0)
def get_vol(self, sid, mid, idx=None, tsdf=False):
if self.vox_dir is None:
self.logger.error('Voxel dir not defined')
f = osp.join(self.vox_dir, sid, mid)
return read_vol(f, tsdf)
def get_pc(self, sid, mid, idx):
rand_idx = idx
pcs = []
for ix in rand_idx:
f = mid + '.ply'
f = osp.join(self.pc_dir, sid, f)
pc = load_ply(f)
pcs.append(pc)
return np.stack(pcs, axis=0)
def get_view(self, sid, mid, idx):
rand_idx = idx
ims = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, "view.txt")
ims.append(read_view(f, ix))
return np.stack(ims, axis=0)
def get_elev(self, sid, mid, idx):
rand_idx = idx
ims = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, "view.txt")
ims.append(read_view_angle(f, ix))
return np.stack(ims, axis=0)
def get_azim(self, sid, mid, idx):
rand_idx = idx
ims = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, "view.txt")
ims.append(read_view_angle(f, ix, elev=False))
return np.stack(ims, axis=0)
def fetch_data(self, smids, items, im_batch):
with self.coord.stop_on_exception():
while not self.coord.should_stop():
data = {}
try:
data_idx = self.queue_idx.get(timeout=0.5)
except Empty:
self.logger.debug('Index queue empty - {:s}'.format(
current_thread().name))
print('Index queue empty - {:s}'.format(
current_thread().name))
continue
view_idx = np.random.choice(
self.num_renders, size=(im_batch, ), replace=False)
sid, mid = smids[data_idx]
for i in items:
data[i] = self.load_func[i](sid, mid, view_idx)
self.queue_data.put(data)
if self.loop_data:
self.queue_idx.put(data_idx)
def init_queue(self,
smids,
im_batch,
items,
coord,
nepochs=None,
qsize=32,
nthreads=4):
self.coord = coord
self.queue_data = Queue(maxsize=qsize)
if nepochs is None:
nepochs = 1
self.loop_data = True
else:
self.loop_data = False
self.total_items = nepochs * len(smids)
self.queue_idx = Queue(maxsize=self.total_items)
for nx in range(nepochs):
for rx in range(len(smids)):
self.queue_idx.put(rx)
self.qthreads = []
self.logger.info('Starting {:d} prefetch threads'.format(nthreads))
for qx in range(nthreads):
worker = Thread(
target=self.fetch_data, args=(smids, items, im_batch))
worker.start()
self.coord.register_thread(worker)
self.qthreads.append(worker)
def close_queue(self, e=None):
self.logger.debug('Closing queue')
self.coord.request_stop(e)
try:
while True:
self.queue_idx.get(block=False)
except Empty:
self.logger.debug('Emptied idx queue')
try:
while True:
self.queue_data.get(block=False)
except Empty:
self.logger.debug("Emptied data queue")
def next_batch(self, items, batch_size, timeout=0.5):
data = []
cnt = 0
while cnt < batch_size:
try:
dt = self.queue_data.get(timeout=timeout)
self.total_items -= 1
data.append(dt)
except Empty:
self.logger.debug('Example queue empty')
if self.total_items <= 0 and not self.loop_data:
# Exhausted all data
self.close_queue()
break
else:
continue
cnt += 1
if len(data) == 0:
return
batch_data = {}
for k in items:
batch_data[k] = []
for dt in data:
batch_data[k].append(dt[k])
batched = np.stack(batch_data[k])
batch_data[k] = batched
return batch_data
def reset(self):
np.random.seed(self.rng)
| import json
import logging
import os.path as osp
from queue import Empty, Queue
from threading import Thread, current_thread
import numpy as np
import functools
from config import SHAPENET_IM
from config import SHAPENET_PC
from loader import read_camera, read_depth, read_im, read_quat, read_vol, read_view, read_gray, read_view_angle
from in_out import load_ply
from PIL import Image
def get_split(split_js='data/splits.json'):
dir_path = osp.dirname(osp.realpath(__file__))
with open(osp.join(dir_path, split_js), 'r') as f:
js = json.load(f)
return js
def nn(x, y):
return x[0] + x[1] + x[2] - y[0] - y[1] - y[2]
class ShapeNet(object):
def __init__(self,
im_dir=SHAPENET_IM,
split_file='data/splits.json',
vox_dir=None,
pc_dir=SHAPENET_PC,
shape_ids=None,
num_renders=20,
rng_seed=0):
self.vox_dir = vox_dir
self.im_dir = im_dir
self.pc_dir = pc_dir
self.split_file = split_file
self.splits_all = get_split(split_file)
self.shape_ids = (self.splits_all.keys()
if shape_ids is None else shape_ids)
self.splits = {k: self.splits_all[k] for k in self.shape_ids}
self.shape_cls = [
self.splits[x]['name'].split(',')[0] for x in self.shape_ids
]
self.rng = rng_seed
self.num_renders = num_renders
self.load_func = {
'im': self.get_im,
'im_128': self.get_im_128,
'depth': self.get_depth,
'K': self.get_K,
'R': self.get_R,
'quat': self.get_quat,
'vol': self.get_vol,
'shape_id': self.get_sid,
'model_id': self.get_mid,
'view_idx': self.get_view_idx,
'pc' : self.get_pc,
'view' : self.get_view,
'elev' : self.get_elev,
'azim' : self.get_azim,
'gray' : self.get_gray,
'gray_128' : self.get_gray_128
}
self.all_items = self.load_func.keys()
self.logger = logging.getLogger('mview3d.' + __name__)
np.random.seed(self.rng)
self.cmpnn = functools.cmp_to_key(nn)
def get_mids(self, sid):
return self.splits[sid]
def get_smids(self, split):
smids = []
for k, v in self.splits.items():
smids.extend([(k, m) for m in v[split]])
smids = np.random.permutation(smids)
return smids
def get_sid(self, sid, mid, idx=None):
return np.array([sid])
def get_view_idx(self, sid, mid, idx):
return idx
def get_mid(self, sid, mid, idx=None):
return np.array([mid])
def get_K(self, sid, mid, idx):
rand_idx = idx
cams = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, 'camera_{:d}.mat'.format(ix))
cams.append(read_camera(f))
camK = np.stack([c[0] for c in cams], axis=0)
return camK
def get_R(self, sid, mid, idx):
rand_idx = idx
cams = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, 'camera_{:d}.mat'.format(ix))
cams.append(read_camera(f))
camR = np.stack([c[1] for c in cams], axis=0)
return camR
def get_quat(self, sid, mid, idx):
rand_idx = idx
cams = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, 'camera_{:d}.mat'.format(ix))
cams.append(read_quat(f))
camq = np.stack(cams, axis=0)
return camq
def get_depth(self, sid, mid, idx):
rand_idx = idx
depths = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, 'depth_{:d}.png'.format(ix))
depths.append(read_depth(f))
return np.stack(depths, axis=0)
def get_im(self, sid, mid, idx):
rand_idx = idx
ims = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, 'render_{:d}.png'.format(ix))
ims.append(read_im(f))
return np.stack(ims, axis=0)
def get_im_128(self, sid, mid, idx):
rand_idx = idx
ims = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, 'render_{:d}_128.png'.format(ix))
ims.append(read_im(f))
return np.stack(ims, axis=0)
def get_gray(self, sid, mid, idx):
rand_idx = idx
ims = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, 'render_{:d}_gray.png'.format(ix))
ims.append(read_gray(f))
return np.stack(ims, axis=0)
def get_gray_128(self, sid, mid, idx):
rand_idx = idx
ims = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, 'render_{:d}_gray_128.png'.format(ix))
ims.append(read_gray(f))
return np.stack(ims, axis=0)
def get_vol(self, sid, mid, idx=None, tsdf=False):
if self.vox_dir is None:
self.logger.error('Voxel dir not defined')
f = osp.join(self.vox_dir, sid, mid)
return read_vol(f, tsdf)
def get_pc(self, sid, mid, idx):
rand_idx = idx
pcs = []
for ix in rand_idx:
f = mid + '.ply'
f = osp.join(self.pc_dir, sid, f)
pc = load_ply(f)
pcs.append(pc)
return np.stack(pcs, axis=0)
def get_view(self, sid, mid, idx):
rand_idx = idx
ims = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, "view.txt")
ims.append(read_view(f, ix))
return np.stack(ims, axis=0)
def get_elev(self, sid, mid, idx):
rand_idx = idx
ims = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, "view.txt")
ims.append(read_view_angle(f, ix))
return np.stack(ims, axis=0)
def get_azim(self, sid, mid, idx):
rand_idx = idx
ims = []
for ix in rand_idx:
f = osp.join(self.im_dir, sid, mid, "view.txt")
ims.append(read_view_angle(f, ix, elev=False))
return np.stack(ims, axis=0)
def fetch_data(self, smids, items, im_batch):
with self.coord.stop_on_exception():
while not self.coord.should_stop():
data = {}
try:
data_idx = self.queue_idx.get(timeout=0.5)
except Empty:
self.logger.debug('Index queue empty - {:s}'.format(
current_thread().name))
print('Index queue empty - {:s}'.format(
current_thread().name))
continue
view_idx = np.random.choice(
self.num_renders, size=(im_batch, ), replace=False)
sid, mid = smids[data_idx]
for i in items:
data[i] = self.load_func[i](sid, mid, view_idx)
self.queue_data.put(data)
if self.loop_data:
self.queue_idx.put(data_idx)
def init_queue(self,
smids,
im_batch,
items,
coord,
nepochs=None,
qsize=32,
nthreads=4):
self.coord = coord
self.queue_data = Queue(maxsize=qsize)
if nepochs is None:
nepochs = 1
self.loop_data = True
else:
self.loop_data = False
self.total_items = nepochs * len(smids)
self.queue_idx = Queue(maxsize=self.total_items)
for nx in range(nepochs):
for rx in range(len(smids)):
self.queue_idx.put(rx)
self.qthreads = []
self.logger.info('Starting {:d} prefetch threads'.format(nthreads))
for qx in range(nthreads):
worker = Thread(
target=self.fetch_data, args=(smids, items, im_batch))
worker.start()
self.coord.register_thread(worker)
self.qthreads.append(worker)
def close_queue(self, e=None):
self.logger.debug('Closing queue')
self.coord.request_stop(e)
try:
while True:
self.queue_idx.get(block=False)
except Empty:
self.logger.debug('Emptied idx queue')
try:
while True:
self.queue_data.get(block=False)
except Empty:
self.logger.debug("Emptied data queue")
def next_batch(self, items, batch_size, timeout=0.5):
data = []
cnt = 0
while cnt < batch_size:
try:
dt = self.queue_data.get(timeout=timeout)
self.total_items -= 1
data.append(dt)
except Empty:
self.logger.debug('Example queue empty')
if self.total_items <= 0 and not self.loop_data:
# Exhausted all data
self.close_queue()
break
else:
continue
cnt += 1
if len(data) == 0:
return
batch_data = {}
for k in items:
batch_data[k] = []
for dt in data:
batch_data[k].append(dt[k])
batched = np.stack(batch_data[k])
batch_data[k] = batched
return batch_data
def reset(self):
np.random.seed(self.rng)
| en | 0.652813 | # Exhausted all data | 2.171426 | 2 |
1 - python/implementacao2/taylor.py | Ellian-aragao/IFB-CN | 0 | 6631602 | <reponame>Ellian-aragao/IFB-CN<filename>1 - python/implementacao2/taylor.py<gh_stars>0
def FdeX(x): # função de x
return (- 0.1 * x ** 4 - 0.15 * x ** 3 - 0.5 * x ** 2 - 0.25 * x + 1.2)
def Taylor(coeficientes, x, i): # função de Taylor
return (coeficientes * x ** i)
# coeficientes dados pela lista
coeficientes = [1.2, -0.25, -0.5, -0.15, -0.1]
# intervalo dado [0,4] que foi dividido em dez partes
x = [0, 0.4, 0.8, 1.2, 1.6, 2.0, 2.4, 2.8, 3.2, 3.6, 4]
# alterando os valores de x
for i in x:
taylor = 0
fdex = FdeX(i)
# realizando as iterações de taylor para chegar no valor de f(x)
for j in range(len(coeficientes)):
taylor += Taylor(coeficientes[j], i, j)
print(j, i, taylor, fdex)
# critério de parada quando elementos são iguais
if abs(taylor - fdex) < 1e-3:
break
print('\n\n')
| - python/implementacao2/taylor.py<gh_stars>0
def FdeX(x): # função de x
return (- 0.1 * x ** 4 - 0.15 * x ** 3 - 0.5 * x ** 2 - 0.25 * x + 1.2)
def Taylor(coeficientes, x, i): # função de Taylor
return (coeficientes * x ** i)
# coeficientes dados pela lista
coeficientes = [1.2, -0.25, -0.5, -0.15, -0.1]
# intervalo dado [0,4] que foi dividido em dez partes
x = [0, 0.4, 0.8, 1.2, 1.6, 2.0, 2.4, 2.8, 3.2, 3.6, 4]
# alterando os valores de x
for i in x:
taylor = 0
fdex = FdeX(i)
# realizando as iterações de taylor para chegar no valor de f(x)
for j in range(len(coeficientes)):
taylor += Taylor(coeficientes[j], i, j)
print(j, i, taylor, fdex)
# critério de parada quando elementos são iguais
if abs(taylor - fdex) < 1e-3:
break
print('\n\n') | pt | 0.982018 | # função de x # função de Taylor # coeficientes dados pela lista # intervalo dado [0,4] que foi dividido em dez partes # alterando os valores de x # realizando as iterações de taylor para chegar no valor de f(x) # critério de parada quando elementos são iguais | 3.716439 | 4 |
tensor2tensor/models/research/r_transformer.py | spacegoing/t2t_caps | 0 | 6631603 | <reponame>spacegoing/t2t_caps
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformers with depthwise recurrency (go/r-transformer).
A high-level explanation on the idea and the architecture:
The vanilla Transformer model has no recurrence and struggles with some tasks
that a fully recurrent model can easily solve. Instead of incorporating
recurrence in time (which has a dependency on sequence length T),
we apply recurrence in depth (which we can set to some fixed length D << T),
and apply self-attention instead of sequential processing to enable the model
to incorporate long-range dependencies.
Structure of the code is explained in r_transformer_util.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_layers
from tensor2tensor.models import transformer
from tensor2tensor.models.research import r_transformer_util
from tensor2tensor.utils import registry
import tensorflow as tf
@registry.register_model
class RTransformer(transformer.Transformer):
"""R-Transformer: Depth-wise recurrent transformer model."""
def encode(self, inputs, target_space, hparams, features=None):
"""Encode r-transformer inputs.
It is similar to "transformer.encode", but it uses
"r_transformer_util.r_transformer_encoder" instead of
"transformer.transformer_encoder".
Args:
inputs: Transformer inputs [batch_size, input_length, input_height,
hidden_dim] which will be flattened along the two spatial dimensions.
target_space: scalar, target space ID.
hparams: hyperparmeters for model.
features: optionally pass the entire features dictionary as well.
This is needed now for "packed" datasets.
Returns:
Tuple of:
encoder_output: Encoder representation.
[batch_size, input_length, hidden_dim]
encoder_decoder_attention_bias: Bias and mask weights for
encoder-decoder attention. [batch_size, input_length]
encoder_extra_output: which is extra encoder output used in some
variants of the model (e.g. in ACT, to pass the ponder-time to body)
"""
inputs = common_layers.flatten4d3d(inputs)
encoder_input, self_attention_bias, encoder_decoder_attention_bias = (
transformer.transformer_prepare_encoder(
inputs, target_space, hparams, features=features))
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
(encoder_output,
encoder_extra_output) = r_transformer_util.r_transformer_encoder(
encoder_input,
self_attention_bias,
hparams,
nonpadding=transformer.features_to_nonpadding(features, "inputs"),
save_weights_to=self.attention_weights)
return encoder_output, encoder_decoder_attention_bias, encoder_extra_output
def decode(self,
decoder_input,
encoder_output,
encoder_decoder_attention_bias,
decoder_self_attention_bias,
hparams,
nonpadding=None):
"""Decode R-Transformer outputs from encoder representation.
It is similar to "transformer.decode", but it uses
"r_transformer_util.r_transformer_decoder" instead of
"transformer.transformer_decoder".
Args:
decoder_input: inputs to bottom of the model. [batch_size, decoder_length,
hidden_dim]
encoder_output: Encoder representation. [batch_size, input_length,
hidden_dim]
encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder
attention. [batch_size, input_length]
decoder_self_attention_bias: Bias and mask weights for decoder
self-attention. [batch_size, decoder_length]
hparams: hyperparmeters for model.
nonpadding: optional Tensor with shape [batch_size, decoder_length]
Returns:
Tuple of:
Final decoder representation. [batch_size, decoder_length,
hidden_dim]
encoder_extra_output: which is extra encoder output used in some
variants of the model (e.g. in ACT, to pass the ponder-time to body)
"""
decoder_input = tf.nn.dropout(decoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
# No caching in r-transformers!
decoder_output, dec_extra_output = r_transformer_util.r_transformer_decoder(
decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
nonpadding=nonpadding,
save_weights_to=self.attention_weights)
# Expand since t2t expects 4d tensors.
return tf.expand_dims(decoder_output, axis=2), dec_extra_output
def body(self, features):
"""R-Transformer main model_fn.
Args:
features: Map of features to the model. Should contain the following:
"inputs": Transformer inputs [batch_size, input_length, hidden_dim]
"targets": Target decoder outputs.
[batch_size, decoder_length, hidden_dim]
"target_space_id"
Returns:
Final decoder representation. [batch_size, decoder_length, hidden_dim]
"""
hparams = self._hparams
if hparams.add_position_timing_signal:
# Turning off addition of positional embedding in the encoder/decoder
# preparation as we do it in the beginning of each step.
hparams.pos = None
if self.has_input:
inputs = features["inputs"]
target_space = features["target_space_id"]
(encoder_output, encoder_decoder_attention_bias,
enc_extra_output) = self.encode(
inputs, target_space, hparams, features=features)
else:
(encoder_output, encoder_decoder_attention_bias,
enc_extra_output) = (None, None, (None, None))
targets = features["targets"]
targets = common_layers.flatten4d3d(targets)
(decoder_input,
decoder_self_attention_bias) = transformer.transformer_prepare_decoder(
targets, hparams, features=features)
decoder_output, dec_extra_output = self.decode(
decoder_input,
encoder_output,
encoder_decoder_attention_bias,
decoder_self_attention_bias,
hparams,
nonpadding=transformer.features_to_nonpadding(features, "targets"))
expected_attentions = features.get("expected_attentions")
if expected_attentions is not None:
attention_loss = common_attention.encoder_decoder_attention_loss(
expected_attentions, self.attention_weights,
hparams.expected_attention_loss_type,
hparams.expected_attention_loss_multiplier)
return decoder_output, {"attention_loss": attention_loss}
if hparams.recurrence_type == "act" and hparams.act_loss_weight != 0:
if self.has_input:
enc_ponder_times, enc_remainders = enc_extra_output
enc_act_loss = (
hparams.act_loss_weight *
tf.reduce_mean(enc_ponder_times + enc_remainders))
else:
enc_act_loss = 0.0
(dec_ponder_times, dec_remainders) = dec_extra_output
dec_act_loss = (
hparams.act_loss_weight *
tf.reduce_mean(dec_ponder_times + dec_remainders))
act_loss = enc_act_loss + dec_act_loss
tf.contrib.summary.scalar("act_loss", act_loss)
return decoder_output, {"act_loss": act_loss}
return decoder_output
def _greedy_infer(self, features, decode_length):
"""Fast version of greedy decoding.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
Raises:
NotImplementedError: If there are multiple data shards.
"""
# TODO(dehghani): Support fast decoding for r-transformer (needs caching)
return self._slow_greedy_infer(features, decode_length)
def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha):
"""Beam search decoding.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
"""
# Caching is not ebabled in r-transformer
# TODO(dehghani): Support fast decoding for r-transformer(needs caching)
return self._beam_decode_slow(features, decode_length, beam_size,
top_beams, alpha)
@registry.register_model
class RTransformerEncoder(transformer.Transformer):
"""R-Transformer Encoder: Depth-wise recurrent transformer encoder-only."""
def encode(self, inputs, target_space, hparams, features=None):
"""Encode transformer inputs.
Args:
inputs: Transformer inputs [batch_size, input_length, input_height,
hidden_dim] which will be flattened along the two spatial dimensions.
target_space: scalar, target space ID.
hparams: hyperparmeters for model.
features: optionally pass the entire features dictionary as well.
This is needed now for "packed" datasets.
Returns:
Tuple of:
encoder_output: Encoder representation.
[batch_size, input_length, hidden_dim]
encoder_extra_output: which is extra encoder output used in some
variants of the model (e.g. in ACT, to pass the ponder-time to body)
"""
inputs = common_layers.flatten4d3d(inputs)
(encoder_input, self_attention_bias, _) = (
transformer.transformer_prepare_encoder(inputs, target_space, hparams))
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
(encoder_output,
encoder_extra_output) = r_transformer_util.r_transformer_encoder(
encoder_input,
self_attention_bias,
hparams,
nonpadding=transformer.features_to_nonpadding(features, "inputs"),
save_weights_to=self.attention_weights)
return encoder_output, encoder_extra_output
def body(self, features):
"""R-Transformer main model_fn.
Args:
features: Map of features to the model. Should contain the following:
"inputs": Transformer inputs [batch_size, input_length, hidden_dim]
"targets": Target decoder outputs.
[batch_size, decoder_length, hidden_dim]
"target_space_id"
Returns:
Final decoder representation. [batch_size, decoder_length, hidden_dim]
"""
hparams = self._hparams
assert self.has_input, ("r_transformer_encoder is applicable on problems"
"with inputs")
inputs = features["inputs"]
target_space = features["target_space_id"]
encoder_output, enc_extra_output = self.encode(
inputs, target_space, hparams, features=features)
encoder_output = tf.expand_dims(encoder_output, 2)
if hparams.recurrence_type == "act" and hparams.act_loss_weight != 0:
ponder_times, remainders = enc_extra_output
act_loss = hparams.act_loss_weight * tf.reduce_mean(ponder_times +
remainders)
tf.contrib.summary.scalar("act_loss", act_loss)
return encoder_output, {"act_loss": act_loss}
return encoder_output
def update_hparams_for_r_transformer(hparams):
"""Adds deault hparams for all of the variants of the R-transformer.
Args:
hparams: default hparams (usually one of the standard hparams from
transformer model (like "transformer_base")
Returns:
hparams with default values for R-Transformers hyper-parameters
"""
# If not None, mixes vanilla transformer with r-transformer.
# Options: None, "before_rt", and "after_rt".
hparams.add_hparam("mix_with_transformer", None)
# Number of vanilla transformer layers used to be mixed with r-transofmer.
hparams.add_hparam("num_mixedin_layers", 2)
# Type of recurrency:
# basic, highway, skip, dwa, act, rnn, gru, lstm.
hparams.add_hparam("recurrence_type", "basic")
# Number of steps (which is equivalent to num layer in transformer).
hparams.add_hparam("num_rec_steps", hparams.num_hidden_layers)
# Add the positional mebedding at each step(horisontal timing)
hparams.add_hparam("add_position_timing_signal", False)
# Logic of position shifting when using timing signal:
# None, "random", "step"
hparams.add_hparam("position_start_index", None)
# Add an step embedding at each step (vertical timing)
hparams.add_hparam("add_step_timing_signal", False)
# Either "learned" or "sinusoid"
hparams.add_hparam("step_timing_signal_type", "learned")
# Add or concat the timing signal (applied both on position and step timing).
# Options: "add" and "concat".
hparams.add_hparam("add_or_concat_timing_signal", "add")
# Add SRU at the beginning of each r-transformer step.
# This can be considered as a position timing signal
hparams.add_hparam("add_sru", False)
# Default ffn layer is separable convolution.
# Options: "fc" and "sepconv".
hparams.add_hparam("transformer_ffn_type", "sepconv")
# Transform bias (in models with highway or skip connection).
hparams.add_hparam("transform_bias_init", -1.0)
hparams.add_hparam("couple_carry_transform_gates", True)
# Depth-wise attention (grid-transformer!) hparams:
# Adds depth embedding, if true.
hparams.add_hparam("depth_embedding", True)
# Learns attention weights for elements (instead of positions), if true.
hparams.add_hparam("dwa_elements", True)
# Type of ffn_layer used for gate in skip, highway, etc.
# "dense" or "dense_dropconnect".
# With dense_relu_dense, the bias/kernel initializations will not be applied.
hparams.add_hparam("gate_ffn_layer", "dense")
# Config for all rnn style recurrencies (rnn, lstm, gru):
# Input of the gate functions: i:input/s:state/t:transformed state.
# or any combination: e.g. is, ts, ist, etc.
hparams.add_hparam("gates_inputs", "i")
# LSTEM forget bias.
hparams.add_hparam("lstm_forget_bias", 1.0)
# How to combine state and input in each step:
# "mh_attention_ffn_add" or "add_mh_attention_ffn" or "dense_mh_attention"
# or "mh_attention_dense".
# Interpretation for e.g. "mh_attention_ffn_add":
# Apply transformer attention then transformer ffn, then add.
hparams.add_hparam("inputs_states_combination", "mh_attention_ffn_add")
# Config for gru_style recurrency:
# What to transform in gru: state/output/candidate/combination of them.
hparams.add_hparam("gru_transformation", ["state_transformation"])
# Config for lstm_style Recurrency:
# What to transform in lstm: state/modulated_input/memory.
hparams.add_hparam("lstm_transformation", ["state_transformation"])
# Uses the mememory at the last step as the final touput, if true.
hparams.add_hparam("use_memory_as_final_state", False)
# Type of act: basic/accumulated/global (instead of position-wise!)/random.
hparams.add_hparam("act_type", "basic")
# Max number of steps (forces halting at this step).
hparams.add_hparam("act_max_steps", 2 * hparams.num_hidden_layers)
hparams.add_hparam("act_halting_bias_init", 1.0)
hparams.add_hparam("act_epsilon", 0.01)
hparams.add_hparam("act_loss_weight", 0.01)
return hparams
@registry.register_hparams
def r_transformer_big():
hparams = transformer.transformer_big()
hparams = update_hparams_for_r_transformer(hparams)
return hparams
@registry.register_hparams
def r_transformer_base():
hparams = transformer.transformer_base()
hparams = update_hparams_for_r_transformer(hparams)
return hparams
@registry.register_hparams
def r_transformer_tiny():
hparams = transformer.transformer_tiny()
hparams = update_hparams_for_r_transformer(hparams)
hparams.num_rec_steps = 8
return hparams
@registry.register_hparams
def transformer_teeny():
hparams = transformer.transformer_base()
hparams.num_rec_steps = 2
hparams.hidden_size = 128
hparams.filter_size = 128
hparams.num_heads = 2
return hparams
@registry.register_hparams
def r_transformer_teeny():
hparams = transformer_teeny()
hparams = update_hparams_for_r_transformer(hparams)
hparams.num_rec_steps = 10
return hparams
@registry.register_hparams
def r_transformer_base_dropconnect():
hparams = r_transformer_base()
hparams.gate_ffn_layer = "dense_dropconnect"
hparams.add_hparam("dropconnect_dropout", 0.5)
return hparams
@registry.register_hparams
def r_transformer_act_base():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
return hparams
@registry.register_hparams
def r_transformer_act_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
return hparams
@registry.register_hparams
def r_transformer_act_big():
hparams = r_transformer_big()
hparams.recurrence_type = "act"
return hparams
@registry.register_hparams
def r_transformer_act_random_base():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.act_type = "random"
return hparams
@registry.register_hparams
def r_transformer_act_accumulated_base():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.act_type = "accumulated"
return hparams
@registry.register_hparams
def r_transformer_act_global_base():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.act_type = "global"
return hparams
@registry.register_hparams
def r_transformer_act_accumulated_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.act_type = "accumulated"
return hparams
@registry.register_hparams
def r_transformer_act_global_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.act_type = "global"
return hparams
@registry.register_hparams
def r_transformer_act_random_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.act_type = "random"
return hparams
@registry.register_hparams
def r_transformer_act_base_sb():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.batch_size = 2048
return hparams
@registry.register_hparams
def r_transformer_act_large():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.hidden_size = 1024
hparams.batch_size = 2048
hparams.filter_size = 2048
return hparams
@registry.register_hparams
def r_transformer_act_tall():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.num_hidden_layers = 16
hparams.batch_size = 1024
hparams.act_max_steps = 24
return hparams
@registry.register_hparams
def r_transformer_act_tall_actlossw0():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.num_hidden_layers = 16
hparams.batch_size = 1024
hparams.act_max_steps = 24
return hparams
@registry.register_hparams
def r_transformer_act_tall_actlossw001():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.num_hidden_layers = 16
hparams.batch_size = 1024
hparams.act_max_steps = 24
return hparams
@registry.register_hparams
def r_transformer_act_base_d03():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.layer_prepostprocess_dropout = 0.3
hparams.attention_dropout = 0.3
hparams.relu_dropout = 0.3
return hparams
@registry.register_hparams
def r_transformer_act_big_d03():
hparams = r_transformer_big()
hparams.recurrence_type = "act"
hparams.layer_prepostprocess_dropout = 0.3
hparams.attention_dropout = 0.3
hparams.relu_dropout = 0.3
return hparams
@registry.register_hparams
def r_transformer_act_tiny_d02():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.layer_prepostprocess_dropout = 0.2
hparams.attention_dropout = 0.2
hparams.relu_dropout = 0.2
return hparams
@registry.register_hparams
def r_transformer_act_tiny_d02_sb():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.layer_prepostprocess_dropout = 0.2
hparams.attention_dropout = 0.2
hparams.relu_dropout = 0.2
hparams.batch_size = 2048
return hparams
@registry.register_hparams
def r_transformer_act_tiny_sb():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.batch_size = 2048
return hparams
@registry.register_hparams
def r_transformer_act_tiny_d05():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.layer_prepostprocess_dropout = 0.5
hparams.attention_dropout = 0.5
hparams.relu_dropout = 0.5
return hparams
@registry.register_hparams
def r_transformer_base_sb():
hparams = r_transformer_base()
hparams.batch_size = 2048
return hparams
@registry.register_hparams
def r_transformer_skip_base():
hparams = r_transformer_base()
hparams.recurrence_type = "skip"
return hparams
@registry.register_hparams
def r_transformer_skip_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "skip"
return hparams
@registry.register_hparams
def r_transformer_highway_base():
hparams = r_transformer_base()
hparams.recurrence_type = "highway"
return hparams
@registry.register_hparams
def r_transformer_highway_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "highway"
return hparams
@registry.register_hparams
def r_transformer_dwa_base():
hparams = r_transformer_base()
hparams.recurrence_type = "dwa"
return hparams
@registry.register_hparams
def r_transformer_dwa_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "dwa"
return hparams
@registry.register_hparams
def r_transformer_dwa_tiny_test():
hparams = r_transformer_tiny()
hparams.recurrence_type = "dwa"
return hparams
@registry.register_hparams
def r_transformer_rnn_base():
hparams = r_transformer_base()
hparams.recurrence_type = "rnn"
return hparams
@registry.register_hparams
def r_transformer_gru_base():
hparams = r_transformer_base()
hparams.recurrence_type = "gru"
return hparams
@registry.register_hparams
def r_transformer_lstm_base():
hparams = r_transformer_base()
hparams.recurrence_type = "lstm"
return hparams
@registry.register_hparams
def r_transformer_position_timing_tiny():
hparams = r_transformer_tiny()
hparams.pos = None
hparams.add_position_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_position_random_timing_base():
hparams = r_transformer_base()
hparams.pos = None
hparams.add_position_timing_signal = True
hparams.position_start_index = "random"
return hparams
@registry.register_hparams
def r_transformer_position_random_timing_tiny():
hparams = r_transformer_tiny()
hparams.pos = None
hparams.add_position_timing_signal = True
hparams.position_start_index = "random"
return hparams
@registry.register_hparams
def r_transformer_position_step_timing_tiny():
hparams = r_transformer_tiny()
hparams.pos = None
hparams.add_position_timing_signal = True
hparams.position_start_index = "step"
return hparams
@registry.register_hparams
def r_transformer_step_timing_tiny():
hparams = r_transformer_tiny()
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_step_sinusoid_timing_tiny():
hparams = r_transformer_tiny()
hparams.add_step_timing_signal = True
hparams.step_timing_signal_type = "sinusoid"
return hparams
@registry.register_hparams
def r_transformer_step_position_random_timing_tiny():
hparams = r_transformer_tiny()
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.position_start_index = "random"
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_act_position_timing_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
return hparams
@registry.register_hparams
def r_transformer_act_position_random_timing_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.position_start_index = "random"
return hparams
@registry.register_hparams
def r_transformer_act_position_step_timing_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.position_start_index = "step"
return hparams
@registry.register_hparams
def r_transformer_act_step_timing_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_act_step_position_random_timing_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.position_start_index = "random"
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_act_step_sinusoid_position_random_timing_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.position_start_index = "random"
hparams.add_step_timing_signal = True
hparams.step_timing_signal_type = "sinusoid"
return hparams
@registry.register_hparams
def r_transformer_act_step_sinusoid_position_timing_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
hparams.step_timing_signal_type = "sinusoid"
return hparams
@registry.register_hparams
def r_transformer_act_step_position_timing_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_step_position_timing_tiny():
hparams = r_transformer_tiny()
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_act_step_position_random_timing_base():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.position_start_index = "random"
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_act_step_position_timing_base():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_step_position_timing_base():
hparams = r_transformer_base()
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_mix_after_rt_base():
hparams = r_transformer_base()
hparams.mix_with_transformer = "before_rt"
return hparams
@registry.register_hparams
def r_transformer_act_step_position_timing_mix_before_rt_base():
hparams = r_transformer_base()
hparams.mix_with_transformer = "before_rt"
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_mix_transformer_act_step_position_timing_mix_after_rt_base():
hparams = r_transformer_base()
hparams.mix_with_transformer = "after_rt"
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_act_step_position_timing_big():
hparams = r_transformer_big()
hparams.batch_size //= 2
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_act_step_position_timing_concat_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
hparams.add_or_concat_timing_signal = "concat"
return hparams
@registry.register_hparams
def r_transformer_act_step_position_timing_concat_base():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
hparams.add_or_concat_timing_signal = "concat"
return hparams
@registry.register_hparams
def r_transformer_act_with_sru_base():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.add_sru = True
return hparams
| # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformers with depthwise recurrency (go/r-transformer).
A high-level explanation on the idea and the architecture:
The vanilla Transformer model has no recurrence and struggles with some tasks
that a fully recurrent model can easily solve. Instead of incorporating
recurrence in time (which has a dependency on sequence length T),
we apply recurrence in depth (which we can set to some fixed length D << T),
and apply self-attention instead of sequential processing to enable the model
to incorporate long-range dependencies.
Structure of the code is explained in r_transformer_util.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_layers
from tensor2tensor.models import transformer
from tensor2tensor.models.research import r_transformer_util
from tensor2tensor.utils import registry
import tensorflow as tf
@registry.register_model
class RTransformer(transformer.Transformer):
"""R-Transformer: Depth-wise recurrent transformer model."""
def encode(self, inputs, target_space, hparams, features=None):
"""Encode r-transformer inputs.
It is similar to "transformer.encode", but it uses
"r_transformer_util.r_transformer_encoder" instead of
"transformer.transformer_encoder".
Args:
inputs: Transformer inputs [batch_size, input_length, input_height,
hidden_dim] which will be flattened along the two spatial dimensions.
target_space: scalar, target space ID.
hparams: hyperparmeters for model.
features: optionally pass the entire features dictionary as well.
This is needed now for "packed" datasets.
Returns:
Tuple of:
encoder_output: Encoder representation.
[batch_size, input_length, hidden_dim]
encoder_decoder_attention_bias: Bias and mask weights for
encoder-decoder attention. [batch_size, input_length]
encoder_extra_output: which is extra encoder output used in some
variants of the model (e.g. in ACT, to pass the ponder-time to body)
"""
inputs = common_layers.flatten4d3d(inputs)
encoder_input, self_attention_bias, encoder_decoder_attention_bias = (
transformer.transformer_prepare_encoder(
inputs, target_space, hparams, features=features))
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
(encoder_output,
encoder_extra_output) = r_transformer_util.r_transformer_encoder(
encoder_input,
self_attention_bias,
hparams,
nonpadding=transformer.features_to_nonpadding(features, "inputs"),
save_weights_to=self.attention_weights)
return encoder_output, encoder_decoder_attention_bias, encoder_extra_output
def decode(self,
decoder_input,
encoder_output,
encoder_decoder_attention_bias,
decoder_self_attention_bias,
hparams,
nonpadding=None):
"""Decode R-Transformer outputs from encoder representation.
It is similar to "transformer.decode", but it uses
"r_transformer_util.r_transformer_decoder" instead of
"transformer.transformer_decoder".
Args:
decoder_input: inputs to bottom of the model. [batch_size, decoder_length,
hidden_dim]
encoder_output: Encoder representation. [batch_size, input_length,
hidden_dim]
encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder
attention. [batch_size, input_length]
decoder_self_attention_bias: Bias and mask weights for decoder
self-attention. [batch_size, decoder_length]
hparams: hyperparmeters for model.
nonpadding: optional Tensor with shape [batch_size, decoder_length]
Returns:
Tuple of:
Final decoder representation. [batch_size, decoder_length,
hidden_dim]
encoder_extra_output: which is extra encoder output used in some
variants of the model (e.g. in ACT, to pass the ponder-time to body)
"""
decoder_input = tf.nn.dropout(decoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
# No caching in r-transformers!
decoder_output, dec_extra_output = r_transformer_util.r_transformer_decoder(
decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
nonpadding=nonpadding,
save_weights_to=self.attention_weights)
# Expand since t2t expects 4d tensors.
return tf.expand_dims(decoder_output, axis=2), dec_extra_output
def body(self, features):
"""R-Transformer main model_fn.
Args:
features: Map of features to the model. Should contain the following:
"inputs": Transformer inputs [batch_size, input_length, hidden_dim]
"targets": Target decoder outputs.
[batch_size, decoder_length, hidden_dim]
"target_space_id"
Returns:
Final decoder representation. [batch_size, decoder_length, hidden_dim]
"""
hparams = self._hparams
if hparams.add_position_timing_signal:
# Turning off addition of positional embedding in the encoder/decoder
# preparation as we do it in the beginning of each step.
hparams.pos = None
if self.has_input:
inputs = features["inputs"]
target_space = features["target_space_id"]
(encoder_output, encoder_decoder_attention_bias,
enc_extra_output) = self.encode(
inputs, target_space, hparams, features=features)
else:
(encoder_output, encoder_decoder_attention_bias,
enc_extra_output) = (None, None, (None, None))
targets = features["targets"]
targets = common_layers.flatten4d3d(targets)
(decoder_input,
decoder_self_attention_bias) = transformer.transformer_prepare_decoder(
targets, hparams, features=features)
decoder_output, dec_extra_output = self.decode(
decoder_input,
encoder_output,
encoder_decoder_attention_bias,
decoder_self_attention_bias,
hparams,
nonpadding=transformer.features_to_nonpadding(features, "targets"))
expected_attentions = features.get("expected_attentions")
if expected_attentions is not None:
attention_loss = common_attention.encoder_decoder_attention_loss(
expected_attentions, self.attention_weights,
hparams.expected_attention_loss_type,
hparams.expected_attention_loss_multiplier)
return decoder_output, {"attention_loss": attention_loss}
if hparams.recurrence_type == "act" and hparams.act_loss_weight != 0:
if self.has_input:
enc_ponder_times, enc_remainders = enc_extra_output
enc_act_loss = (
hparams.act_loss_weight *
tf.reduce_mean(enc_ponder_times + enc_remainders))
else:
enc_act_loss = 0.0
(dec_ponder_times, dec_remainders) = dec_extra_output
dec_act_loss = (
hparams.act_loss_weight *
tf.reduce_mean(dec_ponder_times + dec_remainders))
act_loss = enc_act_loss + dec_act_loss
tf.contrib.summary.scalar("act_loss", act_loss)
return decoder_output, {"act_loss": act_loss}
return decoder_output
def _greedy_infer(self, features, decode_length):
"""Fast version of greedy decoding.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
Raises:
NotImplementedError: If there are multiple data shards.
"""
# TODO(dehghani): Support fast decoding for r-transformer (needs caching)
return self._slow_greedy_infer(features, decode_length)
def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha):
"""Beam search decoding.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
"""
# Caching is not ebabled in r-transformer
# TODO(dehghani): Support fast decoding for r-transformer(needs caching)
return self._beam_decode_slow(features, decode_length, beam_size,
top_beams, alpha)
@registry.register_model
class RTransformerEncoder(transformer.Transformer):
"""R-Transformer Encoder: Depth-wise recurrent transformer encoder-only."""
def encode(self, inputs, target_space, hparams, features=None):
"""Encode transformer inputs.
Args:
inputs: Transformer inputs [batch_size, input_length, input_height,
hidden_dim] which will be flattened along the two spatial dimensions.
target_space: scalar, target space ID.
hparams: hyperparmeters for model.
features: optionally pass the entire features dictionary as well.
This is needed now for "packed" datasets.
Returns:
Tuple of:
encoder_output: Encoder representation.
[batch_size, input_length, hidden_dim]
encoder_extra_output: which is extra encoder output used in some
variants of the model (e.g. in ACT, to pass the ponder-time to body)
"""
inputs = common_layers.flatten4d3d(inputs)
(encoder_input, self_attention_bias, _) = (
transformer.transformer_prepare_encoder(inputs, target_space, hparams))
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
(encoder_output,
encoder_extra_output) = r_transformer_util.r_transformer_encoder(
encoder_input,
self_attention_bias,
hparams,
nonpadding=transformer.features_to_nonpadding(features, "inputs"),
save_weights_to=self.attention_weights)
return encoder_output, encoder_extra_output
def body(self, features):
"""R-Transformer main model_fn.
Args:
features: Map of features to the model. Should contain the following:
"inputs": Transformer inputs [batch_size, input_length, hidden_dim]
"targets": Target decoder outputs.
[batch_size, decoder_length, hidden_dim]
"target_space_id"
Returns:
Final decoder representation. [batch_size, decoder_length, hidden_dim]
"""
hparams = self._hparams
assert self.has_input, ("r_transformer_encoder is applicable on problems"
"with inputs")
inputs = features["inputs"]
target_space = features["target_space_id"]
encoder_output, enc_extra_output = self.encode(
inputs, target_space, hparams, features=features)
encoder_output = tf.expand_dims(encoder_output, 2)
if hparams.recurrence_type == "act" and hparams.act_loss_weight != 0:
ponder_times, remainders = enc_extra_output
act_loss = hparams.act_loss_weight * tf.reduce_mean(ponder_times +
remainders)
tf.contrib.summary.scalar("act_loss", act_loss)
return encoder_output, {"act_loss": act_loss}
return encoder_output
def update_hparams_for_r_transformer(hparams):
"""Adds deault hparams for all of the variants of the R-transformer.
Args:
hparams: default hparams (usually one of the standard hparams from
transformer model (like "transformer_base")
Returns:
hparams with default values for R-Transformers hyper-parameters
"""
# If not None, mixes vanilla transformer with r-transformer.
# Options: None, "before_rt", and "after_rt".
hparams.add_hparam("mix_with_transformer", None)
# Number of vanilla transformer layers used to be mixed with r-transofmer.
hparams.add_hparam("num_mixedin_layers", 2)
# Type of recurrency:
# basic, highway, skip, dwa, act, rnn, gru, lstm.
hparams.add_hparam("recurrence_type", "basic")
# Number of steps (which is equivalent to num layer in transformer).
hparams.add_hparam("num_rec_steps", hparams.num_hidden_layers)
# Add the positional mebedding at each step(horisontal timing)
hparams.add_hparam("add_position_timing_signal", False)
# Logic of position shifting when using timing signal:
# None, "random", "step"
hparams.add_hparam("position_start_index", None)
# Add an step embedding at each step (vertical timing)
hparams.add_hparam("add_step_timing_signal", False)
# Either "learned" or "sinusoid"
hparams.add_hparam("step_timing_signal_type", "learned")
# Add or concat the timing signal (applied both on position and step timing).
# Options: "add" and "concat".
hparams.add_hparam("add_or_concat_timing_signal", "add")
# Add SRU at the beginning of each r-transformer step.
# This can be considered as a position timing signal
hparams.add_hparam("add_sru", False)
# Default ffn layer is separable convolution.
# Options: "fc" and "sepconv".
hparams.add_hparam("transformer_ffn_type", "sepconv")
# Transform bias (in models with highway or skip connection).
hparams.add_hparam("transform_bias_init", -1.0)
hparams.add_hparam("couple_carry_transform_gates", True)
# Depth-wise attention (grid-transformer!) hparams:
# Adds depth embedding, if true.
hparams.add_hparam("depth_embedding", True)
# Learns attention weights for elements (instead of positions), if true.
hparams.add_hparam("dwa_elements", True)
# Type of ffn_layer used for gate in skip, highway, etc.
# "dense" or "dense_dropconnect".
# With dense_relu_dense, the bias/kernel initializations will not be applied.
hparams.add_hparam("gate_ffn_layer", "dense")
# Config for all rnn style recurrencies (rnn, lstm, gru):
# Input of the gate functions: i:input/s:state/t:transformed state.
# or any combination: e.g. is, ts, ist, etc.
hparams.add_hparam("gates_inputs", "i")
# LSTEM forget bias.
hparams.add_hparam("lstm_forget_bias", 1.0)
# How to combine state and input in each step:
# "mh_attention_ffn_add" or "add_mh_attention_ffn" or "dense_mh_attention"
# or "mh_attention_dense".
# Interpretation for e.g. "mh_attention_ffn_add":
# Apply transformer attention then transformer ffn, then add.
hparams.add_hparam("inputs_states_combination", "mh_attention_ffn_add")
# Config for gru_style recurrency:
# What to transform in gru: state/output/candidate/combination of them.
hparams.add_hparam("gru_transformation", ["state_transformation"])
# Config for lstm_style Recurrency:
# What to transform in lstm: state/modulated_input/memory.
hparams.add_hparam("lstm_transformation", ["state_transformation"])
# Uses the mememory at the last step as the final touput, if true.
hparams.add_hparam("use_memory_as_final_state", False)
# Type of act: basic/accumulated/global (instead of position-wise!)/random.
hparams.add_hparam("act_type", "basic")
# Max number of steps (forces halting at this step).
hparams.add_hparam("act_max_steps", 2 * hparams.num_hidden_layers)
hparams.add_hparam("act_halting_bias_init", 1.0)
hparams.add_hparam("act_epsilon", 0.01)
hparams.add_hparam("act_loss_weight", 0.01)
return hparams
@registry.register_hparams
def r_transformer_big():
hparams = transformer.transformer_big()
hparams = update_hparams_for_r_transformer(hparams)
return hparams
@registry.register_hparams
def r_transformer_base():
hparams = transformer.transformer_base()
hparams = update_hparams_for_r_transformer(hparams)
return hparams
@registry.register_hparams
def r_transformer_tiny():
hparams = transformer.transformer_tiny()
hparams = update_hparams_for_r_transformer(hparams)
hparams.num_rec_steps = 8
return hparams
@registry.register_hparams
def transformer_teeny():
hparams = transformer.transformer_base()
hparams.num_rec_steps = 2
hparams.hidden_size = 128
hparams.filter_size = 128
hparams.num_heads = 2
return hparams
@registry.register_hparams
def r_transformer_teeny():
hparams = transformer_teeny()
hparams = update_hparams_for_r_transformer(hparams)
hparams.num_rec_steps = 10
return hparams
@registry.register_hparams
def r_transformer_base_dropconnect():
hparams = r_transformer_base()
hparams.gate_ffn_layer = "dense_dropconnect"
hparams.add_hparam("dropconnect_dropout", 0.5)
return hparams
@registry.register_hparams
def r_transformer_act_base():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
return hparams
@registry.register_hparams
def r_transformer_act_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
return hparams
@registry.register_hparams
def r_transformer_act_big():
hparams = r_transformer_big()
hparams.recurrence_type = "act"
return hparams
@registry.register_hparams
def r_transformer_act_random_base():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.act_type = "random"
return hparams
@registry.register_hparams
def r_transformer_act_accumulated_base():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.act_type = "accumulated"
return hparams
@registry.register_hparams
def r_transformer_act_global_base():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.act_type = "global"
return hparams
@registry.register_hparams
def r_transformer_act_accumulated_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.act_type = "accumulated"
return hparams
@registry.register_hparams
def r_transformer_act_global_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.act_type = "global"
return hparams
@registry.register_hparams
def r_transformer_act_random_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.act_type = "random"
return hparams
@registry.register_hparams
def r_transformer_act_base_sb():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.batch_size = 2048
return hparams
@registry.register_hparams
def r_transformer_act_large():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.hidden_size = 1024
hparams.batch_size = 2048
hparams.filter_size = 2048
return hparams
@registry.register_hparams
def r_transformer_act_tall():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.num_hidden_layers = 16
hparams.batch_size = 1024
hparams.act_max_steps = 24
return hparams
@registry.register_hparams
def r_transformer_act_tall_actlossw0():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.num_hidden_layers = 16
hparams.batch_size = 1024
hparams.act_max_steps = 24
return hparams
@registry.register_hparams
def r_transformer_act_tall_actlossw001():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.num_hidden_layers = 16
hparams.batch_size = 1024
hparams.act_max_steps = 24
return hparams
@registry.register_hparams
def r_transformer_act_base_d03():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.layer_prepostprocess_dropout = 0.3
hparams.attention_dropout = 0.3
hparams.relu_dropout = 0.3
return hparams
@registry.register_hparams
def r_transformer_act_big_d03():
hparams = r_transformer_big()
hparams.recurrence_type = "act"
hparams.layer_prepostprocess_dropout = 0.3
hparams.attention_dropout = 0.3
hparams.relu_dropout = 0.3
return hparams
@registry.register_hparams
def r_transformer_act_tiny_d02():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.layer_prepostprocess_dropout = 0.2
hparams.attention_dropout = 0.2
hparams.relu_dropout = 0.2
return hparams
@registry.register_hparams
def r_transformer_act_tiny_d02_sb():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.layer_prepostprocess_dropout = 0.2
hparams.attention_dropout = 0.2
hparams.relu_dropout = 0.2
hparams.batch_size = 2048
return hparams
@registry.register_hparams
def r_transformer_act_tiny_sb():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.batch_size = 2048
return hparams
@registry.register_hparams
def r_transformer_act_tiny_d05():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.layer_prepostprocess_dropout = 0.5
hparams.attention_dropout = 0.5
hparams.relu_dropout = 0.5
return hparams
@registry.register_hparams
def r_transformer_base_sb():
hparams = r_transformer_base()
hparams.batch_size = 2048
return hparams
@registry.register_hparams
def r_transformer_skip_base():
hparams = r_transformer_base()
hparams.recurrence_type = "skip"
return hparams
@registry.register_hparams
def r_transformer_skip_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "skip"
return hparams
@registry.register_hparams
def r_transformer_highway_base():
hparams = r_transformer_base()
hparams.recurrence_type = "highway"
return hparams
@registry.register_hparams
def r_transformer_highway_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "highway"
return hparams
@registry.register_hparams
def r_transformer_dwa_base():
hparams = r_transformer_base()
hparams.recurrence_type = "dwa"
return hparams
@registry.register_hparams
def r_transformer_dwa_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "dwa"
return hparams
@registry.register_hparams
def r_transformer_dwa_tiny_test():
hparams = r_transformer_tiny()
hparams.recurrence_type = "dwa"
return hparams
@registry.register_hparams
def r_transformer_rnn_base():
hparams = r_transformer_base()
hparams.recurrence_type = "rnn"
return hparams
@registry.register_hparams
def r_transformer_gru_base():
hparams = r_transformer_base()
hparams.recurrence_type = "gru"
return hparams
@registry.register_hparams
def r_transformer_lstm_base():
hparams = r_transformer_base()
hparams.recurrence_type = "lstm"
return hparams
@registry.register_hparams
def r_transformer_position_timing_tiny():
hparams = r_transformer_tiny()
hparams.pos = None
hparams.add_position_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_position_random_timing_base():
hparams = r_transformer_base()
hparams.pos = None
hparams.add_position_timing_signal = True
hparams.position_start_index = "random"
return hparams
@registry.register_hparams
def r_transformer_position_random_timing_tiny():
hparams = r_transformer_tiny()
hparams.pos = None
hparams.add_position_timing_signal = True
hparams.position_start_index = "random"
return hparams
@registry.register_hparams
def r_transformer_position_step_timing_tiny():
hparams = r_transformer_tiny()
hparams.pos = None
hparams.add_position_timing_signal = True
hparams.position_start_index = "step"
return hparams
@registry.register_hparams
def r_transformer_step_timing_tiny():
hparams = r_transformer_tiny()
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_step_sinusoid_timing_tiny():
hparams = r_transformer_tiny()
hparams.add_step_timing_signal = True
hparams.step_timing_signal_type = "sinusoid"
return hparams
@registry.register_hparams
def r_transformer_step_position_random_timing_tiny():
hparams = r_transformer_tiny()
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.position_start_index = "random"
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_act_position_timing_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
return hparams
@registry.register_hparams
def r_transformer_act_position_random_timing_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.position_start_index = "random"
return hparams
@registry.register_hparams
def r_transformer_act_position_step_timing_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.position_start_index = "step"
return hparams
@registry.register_hparams
def r_transformer_act_step_timing_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_act_step_position_random_timing_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.position_start_index = "random"
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_act_step_sinusoid_position_random_timing_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.position_start_index = "random"
hparams.add_step_timing_signal = True
hparams.step_timing_signal_type = "sinusoid"
return hparams
@registry.register_hparams
def r_transformer_act_step_sinusoid_position_timing_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
hparams.step_timing_signal_type = "sinusoid"
return hparams
@registry.register_hparams
def r_transformer_act_step_position_timing_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_step_position_timing_tiny():
hparams = r_transformer_tiny()
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_act_step_position_random_timing_base():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.position_start_index = "random"
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_act_step_position_timing_base():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_step_position_timing_base():
hparams = r_transformer_base()
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_mix_after_rt_base():
hparams = r_transformer_base()
hparams.mix_with_transformer = "before_rt"
return hparams
@registry.register_hparams
def r_transformer_act_step_position_timing_mix_before_rt_base():
hparams = r_transformer_base()
hparams.mix_with_transformer = "before_rt"
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_mix_transformer_act_step_position_timing_mix_after_rt_base():
hparams = r_transformer_base()
hparams.mix_with_transformer = "after_rt"
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_act_step_position_timing_big():
hparams = r_transformer_big()
hparams.batch_size //= 2
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
return hparams
@registry.register_hparams
def r_transformer_act_step_position_timing_concat_tiny():
hparams = r_transformer_tiny()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
hparams.add_or_concat_timing_signal = "concat"
return hparams
@registry.register_hparams
def r_transformer_act_step_position_timing_concat_base():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.add_position_timing_signal = True
hparams.pos = None
hparams.add_step_timing_signal = True
hparams.add_or_concat_timing_signal = "concat"
return hparams
@registry.register_hparams
def r_transformer_act_with_sru_base():
hparams = r_transformer_base()
hparams.recurrence_type = "act"
hparams.add_sru = True
return hparams | en | 0.768994 | # coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Transformers with depthwise recurrency (go/r-transformer). A high-level explanation on the idea and the architecture: The vanilla Transformer model has no recurrence and struggles with some tasks that a fully recurrent model can easily solve. Instead of incorporating recurrence in time (which has a dependency on sequence length T), we apply recurrence in depth (which we can set to some fixed length D << T), and apply self-attention instead of sequential processing to enable the model to incorporate long-range dependencies. Structure of the code is explained in r_transformer_util.py # Dependency imports R-Transformer: Depth-wise recurrent transformer model. Encode r-transformer inputs. It is similar to "transformer.encode", but it uses "r_transformer_util.r_transformer_encoder" instead of "transformer.transformer_encoder". Args: inputs: Transformer inputs [batch_size, input_length, input_height, hidden_dim] which will be flattened along the two spatial dimensions. target_space: scalar, target space ID. hparams: hyperparmeters for model. features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. Returns: Tuple of: encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder attention. [batch_size, input_length] encoder_extra_output: which is extra encoder output used in some variants of the model (e.g. in ACT, to pass the ponder-time to body) Decode R-Transformer outputs from encoder representation. It is similar to "transformer.decode", but it uses "r_transformer_util.r_transformer_decoder" instead of "transformer.transformer_decoder". Args: decoder_input: inputs to bottom of the model. [batch_size, decoder_length, hidden_dim] encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder attention. [batch_size, input_length] decoder_self_attention_bias: Bias and mask weights for decoder self-attention. [batch_size, decoder_length] hparams: hyperparmeters for model. nonpadding: optional Tensor with shape [batch_size, decoder_length] Returns: Tuple of: Final decoder representation. [batch_size, decoder_length, hidden_dim] encoder_extra_output: which is extra encoder output used in some variants of the model (e.g. in ACT, to pass the ponder-time to body) # No caching in r-transformers! # Expand since t2t expects 4d tensors. R-Transformer main model_fn. Args: features: Map of features to the model. Should contain the following: "inputs": Transformer inputs [batch_size, input_length, hidden_dim] "targets": Target decoder outputs. [batch_size, decoder_length, hidden_dim] "target_space_id" Returns: Final decoder representation. [batch_size, decoder_length, hidden_dim] # Turning off addition of positional embedding in the encoder/decoder # preparation as we do it in the beginning of each step. Fast version of greedy decoding. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. Returns: A dict of decoding results { "outputs": integer `Tensor` of decoded ids of shape [batch_size, <= decode_length] if beam_size == 1 or [batch_size, top_beams, <= decode_length] "scores": decoding log probs from the beam search, None if using greedy decoding (beam_size=1) } Raises: NotImplementedError: If there are multiple data shards. # TODO(dehghani): Support fast decoding for r-transformer (needs caching) Beam search decoding. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. beam_size: number of beams. top_beams: an integer. How many of the beams to return. alpha: Float that controls the length penalty. larger the alpha, stronger the preference for longer translations. Returns: A dict of decoding results { "outputs": integer `Tensor` of decoded ids of shape [batch_size, <= decode_length] if beam_size == 1 or [batch_size, top_beams, <= decode_length] "scores": decoding log probs from the beam search, None if using greedy decoding (beam_size=1) } # Caching is not ebabled in r-transformer # TODO(dehghani): Support fast decoding for r-transformer(needs caching) R-Transformer Encoder: Depth-wise recurrent transformer encoder-only. Encode transformer inputs. Args: inputs: Transformer inputs [batch_size, input_length, input_height, hidden_dim] which will be flattened along the two spatial dimensions. target_space: scalar, target space ID. hparams: hyperparmeters for model. features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. Returns: Tuple of: encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] encoder_extra_output: which is extra encoder output used in some variants of the model (e.g. in ACT, to pass the ponder-time to body) R-Transformer main model_fn. Args: features: Map of features to the model. Should contain the following: "inputs": Transformer inputs [batch_size, input_length, hidden_dim] "targets": Target decoder outputs. [batch_size, decoder_length, hidden_dim] "target_space_id" Returns: Final decoder representation. [batch_size, decoder_length, hidden_dim] Adds deault hparams for all of the variants of the R-transformer. Args: hparams: default hparams (usually one of the standard hparams from transformer model (like "transformer_base") Returns: hparams with default values for R-Transformers hyper-parameters # If not None, mixes vanilla transformer with r-transformer. # Options: None, "before_rt", and "after_rt". # Number of vanilla transformer layers used to be mixed with r-transofmer. # Type of recurrency: # basic, highway, skip, dwa, act, rnn, gru, lstm. # Number of steps (which is equivalent to num layer in transformer). # Add the positional mebedding at each step(horisontal timing) # Logic of position shifting when using timing signal: # None, "random", "step" # Add an step embedding at each step (vertical timing) # Either "learned" or "sinusoid" # Add or concat the timing signal (applied both on position and step timing). # Options: "add" and "concat". # Add SRU at the beginning of each r-transformer step. # This can be considered as a position timing signal # Default ffn layer is separable convolution. # Options: "fc" and "sepconv". # Transform bias (in models with highway or skip connection). # Depth-wise attention (grid-transformer!) hparams: # Adds depth embedding, if true. # Learns attention weights for elements (instead of positions), if true. # Type of ffn_layer used for gate in skip, highway, etc. # "dense" or "dense_dropconnect". # With dense_relu_dense, the bias/kernel initializations will not be applied. # Config for all rnn style recurrencies (rnn, lstm, gru): # Input of the gate functions: i:input/s:state/t:transformed state. # or any combination: e.g. is, ts, ist, etc. # LSTEM forget bias. # How to combine state and input in each step: # "mh_attention_ffn_add" or "add_mh_attention_ffn" or "dense_mh_attention" # or "mh_attention_dense". # Interpretation for e.g. "mh_attention_ffn_add": # Apply transformer attention then transformer ffn, then add. # Config for gru_style recurrency: # What to transform in gru: state/output/candidate/combination of them. # Config for lstm_style Recurrency: # What to transform in lstm: state/modulated_input/memory. # Uses the mememory at the last step as the final touput, if true. # Type of act: basic/accumulated/global (instead of position-wise!)/random. # Max number of steps (forces halting at this step). | 1.97163 | 2 |
test/torchaudio_unittest/backend/soundfile/load_test.py | underdogliu/audio | 0 | 6631604 | import os
import tarfile
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal import module_utils as _mod_utils
from torchaudio.backend import soundfile_backend
from torchaudio_unittest.common_utils import (
get_wav_data,
load_wav,
normalize_wav,
PytorchTestCase,
save_wav,
skipIfNoModule,
TempDirMixin,
)
from .common import dtype2subtype, parameterize, skipIfFormatNotSupported
if _mod_utils.is_module_available("soundfile"):
import soundfile
def _get_mock_path(
ext: str,
dtype: str,
sample_rate: int,
num_channels: int,
num_frames: int,
):
return f"{dtype}_{sample_rate}_{num_channels}_{num_frames}.{ext}"
def _get_mock_params(path: str):
filename, ext = path.split(".")
parts = filename.split("_")
return {
"ext": ext,
"dtype": parts[0],
"sample_rate": int(parts[1]),
"num_channels": int(parts[2]),
"num_frames": int(parts[3]),
}
class SoundFileMock:
def __init__(self, path, mode):
assert mode == "r"
self.path = path
self._params = _get_mock_params(path)
self._start = None
@property
def samplerate(self):
return self._params["sample_rate"]
@property
def format(self):
if self._params["ext"] == "wav":
return "WAV"
if self._params["ext"] == "flac":
return "FLAC"
if self._params["ext"] == "ogg":
return "OGG"
if self._params["ext"] in ["sph", "nis", "nist"]:
return "NIST"
@property
def subtype(self):
if self._params["ext"] == "ogg":
return "VORBIS"
return dtype2subtype(self._params["dtype"])
def _prepare_read(self, start, stop, frames):
assert stop is None
self._start = start
return frames
def read(self, frames, dtype, always_2d):
assert always_2d
data = get_wav_data(
dtype,
self._params["num_channels"],
normalize=False,
num_frames=self._params["num_frames"],
channels_first=False,
).numpy()
return data[self._start : self._start + frames]
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
pass
class MockedLoadTest(PytorchTestCase):
def assert_dtype(self, ext, dtype, sample_rate, num_channels, normalize, channels_first):
"""When format is WAV or NIST, normalize=False will return the native dtype Tensor, otherwise float32"""
num_frames = 3 * sample_rate
path = _get_mock_path(ext, dtype, sample_rate, num_channels, num_frames)
expected_dtype = torch.float32 if normalize or ext not in ["wav", "nist"] else getattr(torch, dtype)
with patch("soundfile.SoundFile", SoundFileMock):
found, sr = soundfile_backend.load(path, normalize=normalize, channels_first=channels_first)
assert found.dtype == expected_dtype
assert sample_rate == sr
@parameterize(
["uint8", "int16", "int32", "float32", "float64"],
[8000, 16000],
[1, 2],
[True, False],
[True, False],
)
def test_wav(self, dtype, sample_rate, num_channels, normalize, channels_first):
"""Returns native dtype when normalize=False else float32"""
self.assert_dtype("wav", dtype, sample_rate, num_channels, normalize, channels_first)
@parameterize(
["int8", "int16", "int32"],
[8000, 16000],
[1, 2],
[True, False],
[True, False],
)
def test_sphere(self, dtype, sample_rate, num_channels, normalize, channels_first):
"""Returns float32 always"""
self.assert_dtype("sph", dtype, sample_rate, num_channels, normalize, channels_first)
@parameterize([8000, 16000], [1, 2], [True, False], [True, False])
def test_ogg(self, sample_rate, num_channels, normalize, channels_first):
"""Returns float32 always"""
self.assert_dtype("ogg", "int16", sample_rate, num_channels, normalize, channels_first)
@parameterize([8000, 16000], [1, 2], [True, False], [True, False])
def test_flac(self, sample_rate, num_channels, normalize, channels_first):
"""`soundfile_backend.load` can load ogg format."""
self.assert_dtype("flac", "int16", sample_rate, num_channels, normalize, channels_first)
class LoadTestBase(TempDirMixin, PytorchTestCase):
def assert_wav(
self,
dtype,
sample_rate,
num_channels,
normalize,
channels_first=True,
duration=1,
):
"""`soundfile_backend.load` can load wav format correctly.
Wav data loaded with soundfile backend should match those with scipy
"""
path = self.get_temp_path("reference.wav")
num_frames = duration * sample_rate
data = get_wav_data(
dtype,
num_channels,
normalize=normalize,
num_frames=num_frames,
channels_first=channels_first,
)
save_wav(path, data, sample_rate, channels_first=channels_first)
expected = load_wav(path, normalize=normalize, channels_first=channels_first)[0]
data, sr = soundfile_backend.load(path, normalize=normalize, channels_first=channels_first)
assert sr == sample_rate
self.assertEqual(data, expected)
def assert_sphere(
self,
dtype,
sample_rate,
num_channels,
channels_first=True,
duration=1,
):
"""`soundfile_backend.load` can load SPHERE format correctly."""
path = self.get_temp_path("reference.sph")
num_frames = duration * sample_rate
raw = get_wav_data(
dtype,
num_channels,
num_frames=num_frames,
normalize=False,
channels_first=False,
)
soundfile.write(path, raw, sample_rate, subtype=dtype2subtype(dtype), format="NIST")
expected = normalize_wav(raw.t() if channels_first else raw)
data, sr = soundfile_backend.load(path, channels_first=channels_first)
assert sr == sample_rate
self.assertEqual(data, expected, atol=1e-4, rtol=1e-8)
def assert_flac(
self,
dtype,
sample_rate,
num_channels,
channels_first=True,
duration=1,
):
"""`soundfile_backend.load` can load FLAC format correctly."""
path = self.get_temp_path("reference.flac")
num_frames = duration * sample_rate
raw = get_wav_data(
dtype,
num_channels,
num_frames=num_frames,
normalize=False,
channels_first=False,
)
soundfile.write(path, raw, sample_rate)
expected = normalize_wav(raw.t() if channels_first else raw)
data, sr = soundfile_backend.load(path, channels_first=channels_first)
assert sr == sample_rate
self.assertEqual(data, expected, atol=1e-4, rtol=1e-8)
@skipIfNoModule("soundfile")
class TestLoad(LoadTestBase):
"""Test the correctness of `soundfile_backend.load` for various formats"""
@parameterize(
["float32", "int32", "int16"],
[8000, 16000],
[1, 2],
[False, True],
[False, True],
)
def test_wav(self, dtype, sample_rate, num_channels, normalize, channels_first):
"""`soundfile_backend.load` can load wav format correctly."""
self.assert_wav(dtype, sample_rate, num_channels, normalize, channels_first)
@parameterize(
["int16"],
[16000],
[2],
[False],
)
def test_wav_large(self, dtype, sample_rate, num_channels, normalize):
"""`soundfile_backend.load` can load large wav file correctly."""
two_hours = 2 * 60 * 60
self.assert_wav(dtype, sample_rate, num_channels, normalize, duration=two_hours)
@parameterize(["float32", "int32", "int16"], [4, 8, 16, 32], [False, True])
def test_multiple_channels(self, dtype, num_channels, channels_first):
"""`soundfile_backend.load` can load wav file with more than 2 channels."""
sample_rate = 8000
normalize = False
self.assert_wav(dtype, sample_rate, num_channels, normalize, channels_first)
@parameterize(["int32", "int16"], [8000, 16000], [1, 2], [False, True])
@skipIfFormatNotSupported("NIST")
def test_sphere(self, dtype, sample_rate, num_channels, channels_first):
"""`soundfile_backend.load` can load sphere format correctly."""
self.assert_sphere(dtype, sample_rate, num_channels, channels_first)
@parameterize(["int32", "int16"], [8000, 16000], [1, 2], [False, True])
@skipIfFormatNotSupported("FLAC")
def test_flac(self, dtype, sample_rate, num_channels, channels_first):
"""`soundfile_backend.load` can load flac format correctly."""
self.assert_flac(dtype, sample_rate, num_channels, channels_first)
@skipIfNoModule("soundfile")
class TestLoadFormat(TempDirMixin, PytorchTestCase):
"""Given `format` parameter, `so.load` can load files without extension"""
original = None
path = None
def _make_file(self, format_):
sample_rate = 8000
path_with_ext = self.get_temp_path(f"test.{format_}")
data = get_wav_data("float32", num_channels=2).numpy().T
soundfile.write(path_with_ext, data, sample_rate)
expected = soundfile.read(path_with_ext, dtype="float32")[0].T
path = os.path.splitext(path_with_ext)[0]
os.rename(path_with_ext, path)
return path, expected
def _test_format(self, format_):
"""Providing format allows to read file without extension"""
path, expected = self._make_file(format_)
found, _ = soundfile_backend.load(path)
self.assertEqual(found, expected)
@parameterized.expand(
[
("WAV",),
("wav",),
]
)
def test_wav(self, format_):
self._test_format(format_)
@parameterized.expand(
[
("FLAC",),
("flac",),
]
)
@skipIfFormatNotSupported("FLAC")
def test_flac(self, format_):
self._test_format(format_)
@skipIfNoModule("soundfile")
class TestFileObject(TempDirMixin, PytorchTestCase):
def _test_fileobj(self, ext):
"""Loading audio via file-like object works"""
sample_rate = 16000
path = self.get_temp_path(f"test.{ext}")
data = get_wav_data("float32", num_channels=2).numpy().T
soundfile.write(path, data, sample_rate)
expected = soundfile.read(path, dtype="float32")[0].T
with open(path, "rb") as fileobj:
found, sr = soundfile_backend.load(fileobj)
assert sr == sample_rate
self.assertEqual(expected, found)
def test_fileobj_wav(self):
"""Loading audio via file-like object works"""
self._test_fileobj("wav")
@skipIfFormatNotSupported("FLAC")
def test_fileobj_flac(self):
"""Loading audio via file-like object works"""
self._test_fileobj("flac")
def _test_tarfile(self, ext):
"""Loading audio via file-like object works"""
sample_rate = 16000
audio_file = f"test.{ext}"
audio_path = self.get_temp_path(audio_file)
archive_path = self.get_temp_path("archive.tar.gz")
data = get_wav_data("float32", num_channels=2).numpy().T
soundfile.write(audio_path, data, sample_rate)
expected = soundfile.read(audio_path, dtype="float32")[0].T
with tarfile.TarFile(archive_path, "w") as tarobj:
tarobj.add(audio_path, arcname=audio_file)
with tarfile.TarFile(archive_path, "r") as tarobj:
fileobj = tarobj.extractfile(audio_file)
found, sr = soundfile_backend.load(fileobj)
assert sr == sample_rate
self.assertEqual(expected, found)
def test_tarfile_wav(self):
"""Loading audio via file-like object works"""
self._test_tarfile("wav")
@skipIfFormatNotSupported("FLAC")
def test_tarfile_flac(self):
"""Loading audio via file-like object works"""
self._test_tarfile("flac")
| import os
import tarfile
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal import module_utils as _mod_utils
from torchaudio.backend import soundfile_backend
from torchaudio_unittest.common_utils import (
get_wav_data,
load_wav,
normalize_wav,
PytorchTestCase,
save_wav,
skipIfNoModule,
TempDirMixin,
)
from .common import dtype2subtype, parameterize, skipIfFormatNotSupported
if _mod_utils.is_module_available("soundfile"):
import soundfile
def _get_mock_path(
ext: str,
dtype: str,
sample_rate: int,
num_channels: int,
num_frames: int,
):
return f"{dtype}_{sample_rate}_{num_channels}_{num_frames}.{ext}"
def _get_mock_params(path: str):
filename, ext = path.split(".")
parts = filename.split("_")
return {
"ext": ext,
"dtype": parts[0],
"sample_rate": int(parts[1]),
"num_channels": int(parts[2]),
"num_frames": int(parts[3]),
}
class SoundFileMock:
def __init__(self, path, mode):
assert mode == "r"
self.path = path
self._params = _get_mock_params(path)
self._start = None
@property
def samplerate(self):
return self._params["sample_rate"]
@property
def format(self):
if self._params["ext"] == "wav":
return "WAV"
if self._params["ext"] == "flac":
return "FLAC"
if self._params["ext"] == "ogg":
return "OGG"
if self._params["ext"] in ["sph", "nis", "nist"]:
return "NIST"
@property
def subtype(self):
if self._params["ext"] == "ogg":
return "VORBIS"
return dtype2subtype(self._params["dtype"])
def _prepare_read(self, start, stop, frames):
assert stop is None
self._start = start
return frames
def read(self, frames, dtype, always_2d):
assert always_2d
data = get_wav_data(
dtype,
self._params["num_channels"],
normalize=False,
num_frames=self._params["num_frames"],
channels_first=False,
).numpy()
return data[self._start : self._start + frames]
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
pass
class MockedLoadTest(PytorchTestCase):
def assert_dtype(self, ext, dtype, sample_rate, num_channels, normalize, channels_first):
"""When format is WAV or NIST, normalize=False will return the native dtype Tensor, otherwise float32"""
num_frames = 3 * sample_rate
path = _get_mock_path(ext, dtype, sample_rate, num_channels, num_frames)
expected_dtype = torch.float32 if normalize or ext not in ["wav", "nist"] else getattr(torch, dtype)
with patch("soundfile.SoundFile", SoundFileMock):
found, sr = soundfile_backend.load(path, normalize=normalize, channels_first=channels_first)
assert found.dtype == expected_dtype
assert sample_rate == sr
@parameterize(
["uint8", "int16", "int32", "float32", "float64"],
[8000, 16000],
[1, 2],
[True, False],
[True, False],
)
def test_wav(self, dtype, sample_rate, num_channels, normalize, channels_first):
"""Returns native dtype when normalize=False else float32"""
self.assert_dtype("wav", dtype, sample_rate, num_channels, normalize, channels_first)
@parameterize(
["int8", "int16", "int32"],
[8000, 16000],
[1, 2],
[True, False],
[True, False],
)
def test_sphere(self, dtype, sample_rate, num_channels, normalize, channels_first):
"""Returns float32 always"""
self.assert_dtype("sph", dtype, sample_rate, num_channels, normalize, channels_first)
@parameterize([8000, 16000], [1, 2], [True, False], [True, False])
def test_ogg(self, sample_rate, num_channels, normalize, channels_first):
"""Returns float32 always"""
self.assert_dtype("ogg", "int16", sample_rate, num_channels, normalize, channels_first)
@parameterize([8000, 16000], [1, 2], [True, False], [True, False])
def test_flac(self, sample_rate, num_channels, normalize, channels_first):
"""`soundfile_backend.load` can load ogg format."""
self.assert_dtype("flac", "int16", sample_rate, num_channels, normalize, channels_first)
class LoadTestBase(TempDirMixin, PytorchTestCase):
def assert_wav(
self,
dtype,
sample_rate,
num_channels,
normalize,
channels_first=True,
duration=1,
):
"""`soundfile_backend.load` can load wav format correctly.
Wav data loaded with soundfile backend should match those with scipy
"""
path = self.get_temp_path("reference.wav")
num_frames = duration * sample_rate
data = get_wav_data(
dtype,
num_channels,
normalize=normalize,
num_frames=num_frames,
channels_first=channels_first,
)
save_wav(path, data, sample_rate, channels_first=channels_first)
expected = load_wav(path, normalize=normalize, channels_first=channels_first)[0]
data, sr = soundfile_backend.load(path, normalize=normalize, channels_first=channels_first)
assert sr == sample_rate
self.assertEqual(data, expected)
def assert_sphere(
self,
dtype,
sample_rate,
num_channels,
channels_first=True,
duration=1,
):
"""`soundfile_backend.load` can load SPHERE format correctly."""
path = self.get_temp_path("reference.sph")
num_frames = duration * sample_rate
raw = get_wav_data(
dtype,
num_channels,
num_frames=num_frames,
normalize=False,
channels_first=False,
)
soundfile.write(path, raw, sample_rate, subtype=dtype2subtype(dtype), format="NIST")
expected = normalize_wav(raw.t() if channels_first else raw)
data, sr = soundfile_backend.load(path, channels_first=channels_first)
assert sr == sample_rate
self.assertEqual(data, expected, atol=1e-4, rtol=1e-8)
def assert_flac(
self,
dtype,
sample_rate,
num_channels,
channels_first=True,
duration=1,
):
"""`soundfile_backend.load` can load FLAC format correctly."""
path = self.get_temp_path("reference.flac")
num_frames = duration * sample_rate
raw = get_wav_data(
dtype,
num_channels,
num_frames=num_frames,
normalize=False,
channels_first=False,
)
soundfile.write(path, raw, sample_rate)
expected = normalize_wav(raw.t() if channels_first else raw)
data, sr = soundfile_backend.load(path, channels_first=channels_first)
assert sr == sample_rate
self.assertEqual(data, expected, atol=1e-4, rtol=1e-8)
@skipIfNoModule("soundfile")
class TestLoad(LoadTestBase):
"""Test the correctness of `soundfile_backend.load` for various formats"""
@parameterize(
["float32", "int32", "int16"],
[8000, 16000],
[1, 2],
[False, True],
[False, True],
)
def test_wav(self, dtype, sample_rate, num_channels, normalize, channels_first):
"""`soundfile_backend.load` can load wav format correctly."""
self.assert_wav(dtype, sample_rate, num_channels, normalize, channels_first)
@parameterize(
["int16"],
[16000],
[2],
[False],
)
def test_wav_large(self, dtype, sample_rate, num_channels, normalize):
"""`soundfile_backend.load` can load large wav file correctly."""
two_hours = 2 * 60 * 60
self.assert_wav(dtype, sample_rate, num_channels, normalize, duration=two_hours)
@parameterize(["float32", "int32", "int16"], [4, 8, 16, 32], [False, True])
def test_multiple_channels(self, dtype, num_channels, channels_first):
"""`soundfile_backend.load` can load wav file with more than 2 channels."""
sample_rate = 8000
normalize = False
self.assert_wav(dtype, sample_rate, num_channels, normalize, channels_first)
@parameterize(["int32", "int16"], [8000, 16000], [1, 2], [False, True])
@skipIfFormatNotSupported("NIST")
def test_sphere(self, dtype, sample_rate, num_channels, channels_first):
"""`soundfile_backend.load` can load sphere format correctly."""
self.assert_sphere(dtype, sample_rate, num_channels, channels_first)
@parameterize(["int32", "int16"], [8000, 16000], [1, 2], [False, True])
@skipIfFormatNotSupported("FLAC")
def test_flac(self, dtype, sample_rate, num_channels, channels_first):
"""`soundfile_backend.load` can load flac format correctly."""
self.assert_flac(dtype, sample_rate, num_channels, channels_first)
@skipIfNoModule("soundfile")
class TestLoadFormat(TempDirMixin, PytorchTestCase):
"""Given `format` parameter, `so.load` can load files without extension"""
original = None
path = None
def _make_file(self, format_):
sample_rate = 8000
path_with_ext = self.get_temp_path(f"test.{format_}")
data = get_wav_data("float32", num_channels=2).numpy().T
soundfile.write(path_with_ext, data, sample_rate)
expected = soundfile.read(path_with_ext, dtype="float32")[0].T
path = os.path.splitext(path_with_ext)[0]
os.rename(path_with_ext, path)
return path, expected
def _test_format(self, format_):
"""Providing format allows to read file without extension"""
path, expected = self._make_file(format_)
found, _ = soundfile_backend.load(path)
self.assertEqual(found, expected)
@parameterized.expand(
[
("WAV",),
("wav",),
]
)
def test_wav(self, format_):
self._test_format(format_)
@parameterized.expand(
[
("FLAC",),
("flac",),
]
)
@skipIfFormatNotSupported("FLAC")
def test_flac(self, format_):
self._test_format(format_)
@skipIfNoModule("soundfile")
class TestFileObject(TempDirMixin, PytorchTestCase):
def _test_fileobj(self, ext):
"""Loading audio via file-like object works"""
sample_rate = 16000
path = self.get_temp_path(f"test.{ext}")
data = get_wav_data("float32", num_channels=2).numpy().T
soundfile.write(path, data, sample_rate)
expected = soundfile.read(path, dtype="float32")[0].T
with open(path, "rb") as fileobj:
found, sr = soundfile_backend.load(fileobj)
assert sr == sample_rate
self.assertEqual(expected, found)
def test_fileobj_wav(self):
"""Loading audio via file-like object works"""
self._test_fileobj("wav")
@skipIfFormatNotSupported("FLAC")
def test_fileobj_flac(self):
"""Loading audio via file-like object works"""
self._test_fileobj("flac")
def _test_tarfile(self, ext):
"""Loading audio via file-like object works"""
sample_rate = 16000
audio_file = f"test.{ext}"
audio_path = self.get_temp_path(audio_file)
archive_path = self.get_temp_path("archive.tar.gz")
data = get_wav_data("float32", num_channels=2).numpy().T
soundfile.write(audio_path, data, sample_rate)
expected = soundfile.read(audio_path, dtype="float32")[0].T
with tarfile.TarFile(archive_path, "w") as tarobj:
tarobj.add(audio_path, arcname=audio_file)
with tarfile.TarFile(archive_path, "r") as tarobj:
fileobj = tarobj.extractfile(audio_file)
found, sr = soundfile_backend.load(fileobj)
assert sr == sample_rate
self.assertEqual(expected, found)
def test_tarfile_wav(self):
"""Loading audio via file-like object works"""
self._test_tarfile("wav")
@skipIfFormatNotSupported("FLAC")
def test_tarfile_flac(self):
"""Loading audio via file-like object works"""
self._test_tarfile("flac")
| en | 0.384736 | When format is WAV or NIST, normalize=False will return the native dtype Tensor, otherwise float32 Returns native dtype when normalize=False else float32 Returns float32 always Returns float32 always `soundfile_backend.load` can load ogg format. `soundfile_backend.load` can load wav format correctly. Wav data loaded with soundfile backend should match those with scipy `soundfile_backend.load` can load SPHERE format correctly. `soundfile_backend.load` can load FLAC format correctly. Test the correctness of `soundfile_backend.load` for various formats `soundfile_backend.load` can load wav format correctly. `soundfile_backend.load` can load large wav file correctly. `soundfile_backend.load` can load wav file with more than 2 channels. `soundfile_backend.load` can load sphere format correctly. `soundfile_backend.load` can load flac format correctly. Given `format` parameter, `so.load` can load files without extension Providing format allows to read file without extension Loading audio via file-like object works Loading audio via file-like object works Loading audio via file-like object works Loading audio via file-like object works Loading audio via file-like object works Loading audio via file-like object works | 2.313033 | 2 |
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/python_api/process/io/TestProcessIO.py | Polidea/SiriusObfuscator | 427 | 6631605 | <reponame>Polidea/SiriusObfuscator<gh_stars>100-1000
"""Test Python APIs for process IO."""
from __future__ import print_function
import os
import sys
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ProcessIOTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Get the full path to our executable to be debugged.
self.exe = os.path.join(os.getcwd(), "process_io")
self.local_input_file = os.path.join(os.getcwd(), "input.txt")
self.local_output_file = os.path.join(os.getcwd(), "output.txt")
self.local_error_file = os.path.join(os.getcwd(), "error.txt")
self.input_file = os.path.join(
self.get_process_working_directory(), "input.txt")
self.output_file = os.path.join(
self.get_process_working_directory(), "output.txt")
self.error_file = os.path.join(
self.get_process_working_directory(), "error.txt")
self.lines = ["Line 1", "Line 2", "Line 3"]
@skipIfWindows # stdio manipulation unsupported on Windows
@add_test_categories(['pyapi'])
@expectedFlakeyLinux(bugnumber="llvm.org/pr26437")
def test_stdin_by_api(self):
"""Exercise SBProcess.PutSTDIN()."""
self.build()
self.create_target()
self.run_process(True)
output = self.process.GetSTDOUT(1000)
self.check_process_output(output, output)
@skipIfWindows # stdio manipulation unsupported on Windows
@add_test_categories(['pyapi'])
@expectedFlakeyLinux(bugnumber="llvm.org/pr26437")
def test_stdin_redirection(self):
"""Exercise SBLaunchInfo::AddOpenFileAction() for STDIN without specifying STDOUT or STDERR."""
self.build()
self.create_target()
self.redirect_stdin()
self.run_process(False)
output = self.process.GetSTDOUT(1000)
self.check_process_output(output, output)
@skipIfWindows # stdio manipulation unsupported on Windows
@add_test_categories(['pyapi'])
@expectedFlakeyLinux(bugnumber="llvm.org/pr26437")
def test_stdout_redirection(self):
"""Exercise SBLaunchInfo::AddOpenFileAction() for STDOUT without specifying STDIN or STDERR."""
self.build()
self.create_target()
self.redirect_stdout()
self.run_process(True)
output = self.read_output_file_and_delete()
error = self.process.GetSTDOUT(1000)
self.check_process_output(output, error)
@skipIfWindows # stdio manipulation unsupported on Windows
@add_test_categories(['pyapi'])
@expectedFlakeyLinux(bugnumber="llvm.org/pr26437")
def test_stderr_redirection(self):
"""Exercise SBLaunchInfo::AddOpenFileAction() for STDERR without specifying STDIN or STDOUT."""
self.build()
self.create_target()
self.redirect_stderr()
self.run_process(True)
output = self.process.GetSTDOUT(1000)
error = self.read_error_file_and_delete()
self.check_process_output(output, error)
@skipIfWindows # stdio manipulation unsupported on Windows
@add_test_categories(['pyapi'])
@expectedFlakeyLinux(bugnumber="llvm.org/pr26437")
def test_stdout_stderr_redirection(self):
"""Exercise SBLaunchInfo::AddOpenFileAction() for STDOUT and STDERR without redirecting STDIN."""
self.build()
self.create_target()
self.redirect_stdout()
self.redirect_stderr()
self.run_process(True)
output = self.read_output_file_and_delete()
error = self.read_error_file_and_delete()
self.check_process_output(output, error)
# target_file - path on local file system or remote file system if running remote
# local_file - path on local system
def read_file_and_delete(self, target_file, local_file):
if lldb.remote_platform:
self.runCmd('platform get-file "{remote}" "{local}"'.format(
remote=target_file, local=local_file))
self.assertTrue(
os.path.exists(local_file),
'Make sure "{local}" file exists'.format(
local=local_file))
f = open(local_file, 'r')
contents = f.read()
f.close()
# TODO: add 'platform delete-file' file command
# if lldb.remote_platform:
# self.runCmd('platform delete-file "{remote}"'.format(remote=target_file))
os.unlink(local_file)
return contents
def read_output_file_and_delete(self):
return self.read_file_and_delete(
self.output_file, self.local_output_file)
def read_error_file_and_delete(self):
return self.read_file_and_delete(
self.error_file, self.local_error_file)
def create_target(self):
'''Create the target and launch info that will be used by all tests'''
self.target = self.dbg.CreateTarget(self.exe)
self.launch_info = lldb.SBLaunchInfo([self.exe])
self.launch_info.SetWorkingDirectory(
self.get_process_working_directory())
def redirect_stdin(self):
'''Redirect STDIN (file descriptor 0) to use our input.txt file
Make the input.txt file to use when redirecting STDIN, setup a cleanup action
to delete the input.txt at the end of the test in case exceptions are thrown,
and redirect STDIN in the launch info.'''
f = open(self.local_input_file, 'w')
for line in self.lines:
f.write(line + "\n")
f.close()
if lldb.remote_platform:
self.runCmd('platform put-file "{local}" "{remote}"'.format(
local=self.local_input_file, remote=self.input_file))
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
os.unlink(self.local_input_file)
# TODO: add 'platform delete-file' file command
# if lldb.remote_platform:
# self.runCmd('platform delete-file "{remote}"'.format(remote=self.input_file))
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
self.launch_info.AddOpenFileAction(0, self.input_file, True, False)
def redirect_stdout(self):
'''Redirect STDOUT (file descriptor 1) to use our output.txt file'''
self.launch_info.AddOpenFileAction(1, self.output_file, False, True)
def redirect_stderr(self):
'''Redirect STDERR (file descriptor 2) to use our error.txt file'''
self.launch_info.AddOpenFileAction(2, self.error_file, False, True)
def run_process(self, put_stdin):
'''Run the process to completion and optionally put lines to STDIN via the API if "put_stdin" is True'''
# Set the breakpoints
self.breakpoint = self.target.BreakpointCreateBySourceRegex(
'Set breakpoint here', lldb.SBFileSpec("main.c"))
self.assertTrue(
self.breakpoint.GetNumLocations() > 0,
VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
error = lldb.SBError()
# This should launch the process and it should exit by the time we get back
# because we have synchronous mode enabled
self.process = self.target.Launch(self.launch_info, error)
self.assertTrue(
error.Success(),
"Make sure process launched successfully")
self.assertTrue(self.process, PROCESS_IS_VALID)
if self.TraceOn():
print("process launched.")
# Frame #0 should be at our breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint(
self.process, self.breakpoint)
self.assertTrue(len(threads) == 1)
self.thread = threads[0]
self.frame = self.thread.frames[0]
self.assertTrue(self.frame, "Frame 0 is valid.")
if self.TraceOn():
print("process stopped at breakpoint, sending STDIN via LLDB API.")
# Write data to stdin via the public API if we were asked to
if put_stdin:
for line in self.lines:
self.process.PutSTDIN(line + "\n")
# Let process continue so it will exit
self.process.Continue()
state = self.process.GetState()
self.assertTrue(state == lldb.eStateExited, PROCESS_IS_VALID)
def check_process_output(self, output, error):
# Since we launched the process without specifying stdin/out/err,
# a pseudo terminal is used for stdout/err, and we are satisfied
# once "input line=>1" appears in stdout.
# See also main.c.
if self.TraceOn():
print("output = '%s'" % output)
print("error = '%s'" % error)
for line in self.lines:
check_line = 'input line to stdout: %s' % (line)
self.assertTrue(
check_line in output,
"verify stdout line shows up in STDOUT")
for line in self.lines:
check_line = 'input line to stderr: %s' % (line)
self.assertTrue(
check_line in error,
"verify stderr line shows up in STDERR")
| """Test Python APIs for process IO."""
from __future__ import print_function
import os
import sys
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ProcessIOTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Get the full path to our executable to be debugged.
self.exe = os.path.join(os.getcwd(), "process_io")
self.local_input_file = os.path.join(os.getcwd(), "input.txt")
self.local_output_file = os.path.join(os.getcwd(), "output.txt")
self.local_error_file = os.path.join(os.getcwd(), "error.txt")
self.input_file = os.path.join(
self.get_process_working_directory(), "input.txt")
self.output_file = os.path.join(
self.get_process_working_directory(), "output.txt")
self.error_file = os.path.join(
self.get_process_working_directory(), "error.txt")
self.lines = ["Line 1", "Line 2", "Line 3"]
@skipIfWindows # stdio manipulation unsupported on Windows
@add_test_categories(['pyapi'])
@expectedFlakeyLinux(bugnumber="llvm.org/pr26437")
def test_stdin_by_api(self):
"""Exercise SBProcess.PutSTDIN()."""
self.build()
self.create_target()
self.run_process(True)
output = self.process.GetSTDOUT(1000)
self.check_process_output(output, output)
@skipIfWindows # stdio manipulation unsupported on Windows
@add_test_categories(['pyapi'])
@expectedFlakeyLinux(bugnumber="llvm.org/pr26437")
def test_stdin_redirection(self):
"""Exercise SBLaunchInfo::AddOpenFileAction() for STDIN without specifying STDOUT or STDERR."""
self.build()
self.create_target()
self.redirect_stdin()
self.run_process(False)
output = self.process.GetSTDOUT(1000)
self.check_process_output(output, output)
@skipIfWindows # stdio manipulation unsupported on Windows
@add_test_categories(['pyapi'])
@expectedFlakeyLinux(bugnumber="llvm.org/pr26437")
def test_stdout_redirection(self):
"""Exercise SBLaunchInfo::AddOpenFileAction() for STDOUT without specifying STDIN or STDERR."""
self.build()
self.create_target()
self.redirect_stdout()
self.run_process(True)
output = self.read_output_file_and_delete()
error = self.process.GetSTDOUT(1000)
self.check_process_output(output, error)
@skipIfWindows # stdio manipulation unsupported on Windows
@add_test_categories(['pyapi'])
@expectedFlakeyLinux(bugnumber="llvm.org/pr26437")
def test_stderr_redirection(self):
"""Exercise SBLaunchInfo::AddOpenFileAction() for STDERR without specifying STDIN or STDOUT."""
self.build()
self.create_target()
self.redirect_stderr()
self.run_process(True)
output = self.process.GetSTDOUT(1000)
error = self.read_error_file_and_delete()
self.check_process_output(output, error)
@skipIfWindows # stdio manipulation unsupported on Windows
@add_test_categories(['pyapi'])
@expectedFlakeyLinux(bugnumber="llvm.org/pr26437")
def test_stdout_stderr_redirection(self):
"""Exercise SBLaunchInfo::AddOpenFileAction() for STDOUT and STDERR without redirecting STDIN."""
self.build()
self.create_target()
self.redirect_stdout()
self.redirect_stderr()
self.run_process(True)
output = self.read_output_file_and_delete()
error = self.read_error_file_and_delete()
self.check_process_output(output, error)
# target_file - path on local file system or remote file system if running remote
# local_file - path on local system
def read_file_and_delete(self, target_file, local_file):
if lldb.remote_platform:
self.runCmd('platform get-file "{remote}" "{local}"'.format(
remote=target_file, local=local_file))
self.assertTrue(
os.path.exists(local_file),
'Make sure "{local}" file exists'.format(
local=local_file))
f = open(local_file, 'r')
contents = f.read()
f.close()
# TODO: add 'platform delete-file' file command
# if lldb.remote_platform:
# self.runCmd('platform delete-file "{remote}"'.format(remote=target_file))
os.unlink(local_file)
return contents
def read_output_file_and_delete(self):
return self.read_file_and_delete(
self.output_file, self.local_output_file)
def read_error_file_and_delete(self):
return self.read_file_and_delete(
self.error_file, self.local_error_file)
def create_target(self):
'''Create the target and launch info that will be used by all tests'''
self.target = self.dbg.CreateTarget(self.exe)
self.launch_info = lldb.SBLaunchInfo([self.exe])
self.launch_info.SetWorkingDirectory(
self.get_process_working_directory())
def redirect_stdin(self):
'''Redirect STDIN (file descriptor 0) to use our input.txt file
Make the input.txt file to use when redirecting STDIN, setup a cleanup action
to delete the input.txt at the end of the test in case exceptions are thrown,
and redirect STDIN in the launch info.'''
f = open(self.local_input_file, 'w')
for line in self.lines:
f.write(line + "\n")
f.close()
if lldb.remote_platform:
self.runCmd('platform put-file "{local}" "{remote}"'.format(
local=self.local_input_file, remote=self.input_file))
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
os.unlink(self.local_input_file)
# TODO: add 'platform delete-file' file command
# if lldb.remote_platform:
# self.runCmd('platform delete-file "{remote}"'.format(remote=self.input_file))
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
self.launch_info.AddOpenFileAction(0, self.input_file, True, False)
def redirect_stdout(self):
'''Redirect STDOUT (file descriptor 1) to use our output.txt file'''
self.launch_info.AddOpenFileAction(1, self.output_file, False, True)
def redirect_stderr(self):
'''Redirect STDERR (file descriptor 2) to use our error.txt file'''
self.launch_info.AddOpenFileAction(2, self.error_file, False, True)
def run_process(self, put_stdin):
'''Run the process to completion and optionally put lines to STDIN via the API if "put_stdin" is True'''
# Set the breakpoints
self.breakpoint = self.target.BreakpointCreateBySourceRegex(
'Set breakpoint here', lldb.SBFileSpec("main.c"))
self.assertTrue(
self.breakpoint.GetNumLocations() > 0,
VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
error = lldb.SBError()
# This should launch the process and it should exit by the time we get back
# because we have synchronous mode enabled
self.process = self.target.Launch(self.launch_info, error)
self.assertTrue(
error.Success(),
"Make sure process launched successfully")
self.assertTrue(self.process, PROCESS_IS_VALID)
if self.TraceOn():
print("process launched.")
# Frame #0 should be at our breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint(
self.process, self.breakpoint)
self.assertTrue(len(threads) == 1)
self.thread = threads[0]
self.frame = self.thread.frames[0]
self.assertTrue(self.frame, "Frame 0 is valid.")
if self.TraceOn():
print("process stopped at breakpoint, sending STDIN via LLDB API.")
# Write data to stdin via the public API if we were asked to
if put_stdin:
for line in self.lines:
self.process.PutSTDIN(line + "\n")
# Let process continue so it will exit
self.process.Continue()
state = self.process.GetState()
self.assertTrue(state == lldb.eStateExited, PROCESS_IS_VALID)
def check_process_output(self, output, error):
# Since we launched the process without specifying stdin/out/err,
# a pseudo terminal is used for stdout/err, and we are satisfied
# once "input line=>1" appears in stdout.
# See also main.c.
if self.TraceOn():
print("output = '%s'" % output)
print("error = '%s'" % error)
for line in self.lines:
check_line = 'input line to stdout: %s' % (line)
self.assertTrue(
check_line in output,
"verify stdout line shows up in STDOUT")
for line in self.lines:
check_line = 'input line to stderr: %s' % (line)
self.assertTrue(
check_line in error,
"verify stderr line shows up in STDERR") | en | 0.766122 | Test Python APIs for process IO. # Call super's setUp(). # Get the full path to our executable to be debugged. # stdio manipulation unsupported on Windows Exercise SBProcess.PutSTDIN(). # stdio manipulation unsupported on Windows Exercise SBLaunchInfo::AddOpenFileAction() for STDIN without specifying STDOUT or STDERR. # stdio manipulation unsupported on Windows Exercise SBLaunchInfo::AddOpenFileAction() for STDOUT without specifying STDIN or STDERR. # stdio manipulation unsupported on Windows Exercise SBLaunchInfo::AddOpenFileAction() for STDERR without specifying STDIN or STDOUT. # stdio manipulation unsupported on Windows Exercise SBLaunchInfo::AddOpenFileAction() for STDOUT and STDERR without redirecting STDIN. # target_file - path on local file system or remote file system if running remote # local_file - path on local system # TODO: add 'platform delete-file' file command # if lldb.remote_platform: # self.runCmd('platform delete-file "{remote}"'.format(remote=target_file)) Create the target and launch info that will be used by all tests Redirect STDIN (file descriptor 0) to use our input.txt file Make the input.txt file to use when redirecting STDIN, setup a cleanup action to delete the input.txt at the end of the test in case exceptions are thrown, and redirect STDIN in the launch info. # This is the function to remove the custom formats in order to have a # clean slate for the next test case. # TODO: add 'platform delete-file' file command # if lldb.remote_platform: # self.runCmd('platform delete-file "{remote}"'.format(remote=self.input_file)) # Execute the cleanup function during test case tear down. Redirect STDOUT (file descriptor 1) to use our output.txt file Redirect STDERR (file descriptor 2) to use our error.txt file Run the process to completion and optionally put lines to STDIN via the API if "put_stdin" is True # Set the breakpoints # Launch the process, and do not stop at the entry point. # This should launch the process and it should exit by the time we get back # because we have synchronous mode enabled # Frame #0 should be at our breakpoint. # Write data to stdin via the public API if we were asked to # Let process continue so it will exit # Since we launched the process without specifying stdin/out/err, # a pseudo terminal is used for stdout/err, and we are satisfied # once "input line=>1" appears in stdout. # See also main.c. | 2.046483 | 2 |
budgetml/gcp/addresses.py | strickvl/budgetml | 1,316 | 6631606 | <gh_stars>1000+
import logging
import time
def promote_ephemeral_ip(
compute,
project,
region,
ephemeral_ip,
address_name,
subnetwork):
config = {
"addressType": "INTERNAL",
"address": ephemeral_ip,
"name": address_name,
"subnetwork": subnetwork
}
logging.debug(f'Promoting IP with config: {config}')
res = compute.addresses().insert(
project=project,
region=region,
body=config).execute()
logging.debug(f'Ephemeral IP {ephemeral_ip} promoted. Response: {res}')
return res
def create_static_ip(compute, project, region, static_ip_name):
config = {
'name': static_ip_name
}
compute.addresses().insert(
project=project,
region=region,
body=config).execute()
time.sleep(3)
req = compute.addresses().get(
project=project,
region=region,
address=static_ip_name)
res = req.execute()
logging.debug(f'Static IP {static_ip_name} created with response: {res}')
return res
def release_static_ip(compute, project, region, static_ip):
req = compute.addresses().delete(
project=project,
region=region,
address=static_ip)
res = req.execute()
logging.debug(f'Static IP {static_ip} deleted with response: {res}')
return res
| import logging
import time
def promote_ephemeral_ip(
compute,
project,
region,
ephemeral_ip,
address_name,
subnetwork):
config = {
"addressType": "INTERNAL",
"address": ephemeral_ip,
"name": address_name,
"subnetwork": subnetwork
}
logging.debug(f'Promoting IP with config: {config}')
res = compute.addresses().insert(
project=project,
region=region,
body=config).execute()
logging.debug(f'Ephemeral IP {ephemeral_ip} promoted. Response: {res}')
return res
def create_static_ip(compute, project, region, static_ip_name):
config = {
'name': static_ip_name
}
compute.addresses().insert(
project=project,
region=region,
body=config).execute()
time.sleep(3)
req = compute.addresses().get(
project=project,
region=region,
address=static_ip_name)
res = req.execute()
logging.debug(f'Static IP {static_ip_name} created with response: {res}')
return res
def release_static_ip(compute, project, region, static_ip):
req = compute.addresses().delete(
project=project,
region=region,
address=static_ip)
res = req.execute()
logging.debug(f'Static IP {static_ip} deleted with response: {res}')
return res | none | 1 | 2.352772 | 2 |
|
pytaon/decorators.py | rodrigocam/pytaon | 5 | 6631607 | def vector_argument(func):
"""
Decorador que transforma função que recebe um vetor como único argumento
posicional (e qualquer número de argumentos passados por nome) e retorna
uma função que aceita tanto um vetor ou tupla como único argumento ou
dois argumentos posicionais com cada coordenada.
Examples:
>>> @vector_argument
... def length_sqr(vec):
... return vec.x**2 + vec.y**2
Agora a função aceita várias assinaturas:
>>> length_sqr(Vec2d(3, 4))
25.0
>>> length_sqr((3, 4))
25.0
>>> length_sqr(3, 4)
25.0
"""
... # Implementar!
return func
def vector_argument_method(method):
"""
Similar à função :func:`vector_argument`, mas aplicável a métodos
de classe que recebem "self" como primeiro argumento.
"""
... # Implementar!
return method
| def vector_argument(func):
"""
Decorador que transforma função que recebe um vetor como único argumento
posicional (e qualquer número de argumentos passados por nome) e retorna
uma função que aceita tanto um vetor ou tupla como único argumento ou
dois argumentos posicionais com cada coordenada.
Examples:
>>> @vector_argument
... def length_sqr(vec):
... return vec.x**2 + vec.y**2
Agora a função aceita várias assinaturas:
>>> length_sqr(Vec2d(3, 4))
25.0
>>> length_sqr((3, 4))
25.0
>>> length_sqr(3, 4)
25.0
"""
... # Implementar!
return func
def vector_argument_method(method):
"""
Similar à função :func:`vector_argument`, mas aplicável a métodos
de classe que recebem "self" como primeiro argumento.
"""
... # Implementar!
return method
| pt | 0.81312 | Decorador que transforma função que recebe um vetor como único argumento posicional (e qualquer número de argumentos passados por nome) e retorna uma função que aceita tanto um vetor ou tupla como único argumento ou dois argumentos posicionais com cada coordenada. Examples: >>> @vector_argument ... def length_sqr(vec): ... return vec.x**2 + vec.y**2 Agora a função aceita várias assinaturas: >>> length_sqr(Vec2d(3, 4)) 25.0 >>> length_sqr((3, 4)) 25.0 >>> length_sqr(3, 4) 25.0 # Implementar! Similar à função :func:`vector_argument`, mas aplicável a métodos de classe que recebem "self" como primeiro argumento. # Implementar! | 4.382422 | 4 |
model_monitor_template.py | cwiecha/eaisystems2022 | 0 | 6631608 | <reponame>cwiecha/eaisystems2022<filename>model_monitor_template.py<gh_stars>0
import boto3
from botocore.config import Config
from boto3.dynamodb.conditions import Key, Attr
import time
import csv
from datetime import datetime
import requests
import sys
my_config = Config(
region_name = '<your region>'
)
# Get the service resource.
session = boto3.Session(
aws_access_key_id='<key>',
aws_secret_access_key='<secret access key>'
)
dynamodb = session.resource('dynamodb', config=my_config)
update_table = dynamodb.Table('<retraining table name>')
import ast
import shutil
def build_training_update():
list_of_lists = []
response = update_table.scan()
items = response['Items']
print(items)
for item in items:
# build the training feature set
features_str = item['Features']
features = ast.literal_eval(features_str)
#features.append(item['Label'])
features.insert(0, item['partition_key'])
print(features)
list_of_lists.append( features )
# copy original training data to new training_file_name.csv
# check https://docs.python.org/3/library/shutil.html for info on how to do the file system copy!
with open("<new training file name>", "a") as f:
wr = csv.writer(f)
wr.writerows( list_of_lists )
return
# use the example REST invocations in the model driver python script to then reprocess your updated training data.
# be sure to do the "context" step as well as the retraining step
# then run a set of scoring tests to check the service is still operational
def do_model_update():
# use the pattern from model_drive.py to pre-process and retrain you model, calling the credit service using the REST API
return
| import boto3
from botocore.config import Config
from boto3.dynamodb.conditions import Key, Attr
import time
import csv
from datetime import datetime
import requests
import sys
my_config = Config(
region_name = '<your region>'
)
# Get the service resource.
session = boto3.Session(
aws_access_key_id='<key>',
aws_secret_access_key='<secret access key>'
)
dynamodb = session.resource('dynamodb', config=my_config)
update_table = dynamodb.Table('<retraining table name>')
import ast
import shutil
def build_training_update():
list_of_lists = []
response = update_table.scan()
items = response['Items']
print(items)
for item in items:
# build the training feature set
features_str = item['Features']
features = ast.literal_eval(features_str)
#features.append(item['Label'])
features.insert(0, item['partition_key'])
print(features)
list_of_lists.append( features )
# copy original training data to new training_file_name.csv
# check https://docs.python.org/3/library/shutil.html for info on how to do the file system copy!
with open("<new training file name>", "a") as f:
wr = csv.writer(f)
wr.writerows( list_of_lists )
return
# use the example REST invocations in the model driver python script to then reprocess your updated training data.
# be sure to do the "context" step as well as the retraining step
# then run a set of scoring tests to check the service is still operational
def do_model_update():
# use the pattern from model_drive.py to pre-process and retrain you model, calling the credit service using the REST API
return | en | 0.856682 | # Get the service resource. # build the training feature set #features.append(item['Label']) # copy original training data to new training_file_name.csv # check https://docs.python.org/3/library/shutil.html for info on how to do the file system copy! # use the example REST invocations in the model driver python script to then reprocess your updated training data. # be sure to do the "context" step as well as the retraining step # then run a set of scoring tests to check the service is still operational # use the pattern from model_drive.py to pre-process and retrain you model, calling the credit service using the REST API | 2.277534 | 2 |
fuckcovid/auth/views.py | jespino/hospitales-covid19 | 0 | 6631609 | from fuckcovid.auth.models import User
from django.shortcuts import get_object_or_404
from .serializers import UserSerializer
from rest_framework import viewsets
from rest_framework.response import Response
class UserViewSet(viewsets.ModelViewSet):
"""
A ViewSet for listing and retrieving users.
"""
serializer_class = UserSerializer
queryset = User.objects.all()
| from fuckcovid.auth.models import User
from django.shortcuts import get_object_or_404
from .serializers import UserSerializer
from rest_framework import viewsets
from rest_framework.response import Response
class UserViewSet(viewsets.ModelViewSet):
"""
A ViewSet for listing and retrieving users.
"""
serializer_class = UserSerializer
queryset = User.objects.all()
| en | 0.680043 | A ViewSet for listing and retrieving users. | 1.923653 | 2 |
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/g/generic_alias/generic_alias_collections_py37_with_typing.py | ciskoinch8/vimrc | 463 | 6631610 | <reponame>ciskoinch8/vimrc
"""Test generic alias support for stdlib types (added in PY39).
Raise [unsubscriptable-object] error for PY37 and PY38.
Make sure `import typing` doesn't change anything.
"""
# flake8: noqa
# pylint: disable=missing-docstring,pointless-statement,unused-import
# pylint: disable=too-few-public-methods,multiple-statements,line-too-long
import abc
import collections
import collections.abc
import contextlib
import re
import typing
# special
tuple[int, int] # [unsubscriptable-object]
type[int] # [unsubscriptable-object]
collections.abc.Callable[[int], str] # [unsubscriptable-object]
# builtins
dict[int, str] # [unsubscriptable-object]
list[int] # [unsubscriptable-object]
set[int] # [unsubscriptable-object]
frozenset[int] # [unsubscriptable-object]
# collections
collections.defaultdict[int, str] # [unsubscriptable-object]
collections.OrderedDict[int, str] # [unsubscriptable-object]
collections.ChainMap[int, str] # [unsubscriptable-object]
collections.Counter[int] # [unsubscriptable-object]
collections.deque[int] # [unsubscriptable-object]
# collections.abc
collections.abc.Set[int] # [unsubscriptable-object]
collections.abc.Collection[int] # [unsubscriptable-object]
collections.abc.Container[int] # [unsubscriptable-object]
collections.abc.ItemsView[int, str] # [unsubscriptable-object]
collections.abc.KeysView[int] # [unsubscriptable-object]
collections.abc.Mapping[int, str] # [unsubscriptable-object]
collections.abc.MappingView[int] # [unsubscriptable-object]
collections.abc.MutableMapping[int, str] # [unsubscriptable-object]
collections.abc.MutableSequence[int] # [unsubscriptable-object]
collections.abc.MutableSet[int] # [unsubscriptable-object]
collections.abc.Sequence[int] # [unsubscriptable-object]
collections.abc.ValuesView[int] # [unsubscriptable-object]
collections.abc.Iterable[int] # [unsubscriptable-object]
collections.abc.Iterator[int] # [unsubscriptable-object]
collections.abc.Generator[int, None, None] # [unsubscriptable-object]
collections.abc.Reversible[int] # [unsubscriptable-object]
collections.abc.Coroutine[list[str], str, int] # [unsubscriptable-object,unsubscriptable-object]
collections.abc.AsyncGenerator[int, None] # [unsubscriptable-object]
collections.abc.AsyncIterable[int] # [unsubscriptable-object]
collections.abc.AsyncIterator[int] # [unsubscriptable-object]
collections.abc.Awaitable[int] # [unsubscriptable-object]
# contextlib
contextlib.AbstractContextManager[int] # [unsubscriptable-object]
contextlib.AbstractAsyncContextManager[int] # [unsubscriptable-object]
# re
re.Pattern[str] # [unsubscriptable-object]
re.Match[str] # [unsubscriptable-object]
# unsubscriptable types
collections.abc.Hashable
collections.abc.Sized
collections.abc.Hashable[int] # [unsubscriptable-object]
collections.abc.Sized[int] # [unsubscriptable-object]
# subscriptable with Python 3.9
collections.abc.ByteString[int] # [unsubscriptable-object]
# Missing implementation for 'collections.abc' derived classes
class DerivedHashable(collections.abc.Hashable): # [abstract-method] # __hash__
pass
class DerivedIterable(collections.abc.Iterable[int]): # [unsubscriptable-object]
pass
class DerivedCollection(collections.abc.Collection[int]): # [unsubscriptable-object]
pass
# No implementation required for 'builtins' and 'collections' types
class DerivedList(list[int]): # [unsubscriptable-object]
pass
class DerivedSet(set[int]): # [unsubscriptable-object]
pass
class DerivedOrderedDict(collections.OrderedDict[int, str]): # [unsubscriptable-object]
pass
class DerivedListIterable(list[collections.abc.Iterable[int]]): # [unsubscriptable-object,unsubscriptable-object]
pass
# Multiple generic base classes
class DerivedMultiple(collections.abc.Sized, collections.abc.Hashable): # [abstract-method,abstract-method]
pass
class CustomAbstractCls1(abc.ABC):
pass
class CustomAbstractCls2(collections.abc.Sized, collections.abc.Iterable[CustomAbstractCls1]): # [abstract-method,unsubscriptable-object] # __len__
pass
class CustomImplementation(CustomAbstractCls2): # [abstract-method] # __len__
pass
# Type annotations
var_tuple: tuple[int, int] # [unsubscriptable-object]
var_dict: dict[int, str] # [unsubscriptable-object]
var_orderedDict: collections.OrderedDict[int, str] # [unsubscriptable-object]
var_container: collections.abc.Container[int] # [unsubscriptable-object]
var_sequence: collections.abc.Sequence[int] # [unsubscriptable-object]
var_iterable: collections.abc.Iterable[int] # [unsubscriptable-object]
var_awaitable: collections.abc.Awaitable[int] # [unsubscriptable-object]
var_contextmanager: contextlib.AbstractContextManager[int] # [unsubscriptable-object]
var_pattern: re.Pattern[int] # [unsubscriptable-object]
var_bytestring: collections.abc.ByteString
var_hashable: collections.abc.Hashable
var_sized: collections.abc.Sized
# Type annotation with unsubscriptable type
var_int: int[int] # [unsubscriptable-object]
var_hashable2: collections.abc.Hashable[int] # [unsubscriptable-object]
var_sized2: collections.abc.Sized[int] # [unsubscriptable-object]
# subscriptable with Python 3.9
var_bytestring2: collections.abc.ByteString[int] # [unsubscriptable-object]
| """Test generic alias support for stdlib types (added in PY39).
Raise [unsubscriptable-object] error for PY37 and PY38.
Make sure `import typing` doesn't change anything.
"""
# flake8: noqa
# pylint: disable=missing-docstring,pointless-statement,unused-import
# pylint: disable=too-few-public-methods,multiple-statements,line-too-long
import abc
import collections
import collections.abc
import contextlib
import re
import typing
# special
tuple[int, int] # [unsubscriptable-object]
type[int] # [unsubscriptable-object]
collections.abc.Callable[[int], str] # [unsubscriptable-object]
# builtins
dict[int, str] # [unsubscriptable-object]
list[int] # [unsubscriptable-object]
set[int] # [unsubscriptable-object]
frozenset[int] # [unsubscriptable-object]
# collections
collections.defaultdict[int, str] # [unsubscriptable-object]
collections.OrderedDict[int, str] # [unsubscriptable-object]
collections.ChainMap[int, str] # [unsubscriptable-object]
collections.Counter[int] # [unsubscriptable-object]
collections.deque[int] # [unsubscriptable-object]
# collections.abc
collections.abc.Set[int] # [unsubscriptable-object]
collections.abc.Collection[int] # [unsubscriptable-object]
collections.abc.Container[int] # [unsubscriptable-object]
collections.abc.ItemsView[int, str] # [unsubscriptable-object]
collections.abc.KeysView[int] # [unsubscriptable-object]
collections.abc.Mapping[int, str] # [unsubscriptable-object]
collections.abc.MappingView[int] # [unsubscriptable-object]
collections.abc.MutableMapping[int, str] # [unsubscriptable-object]
collections.abc.MutableSequence[int] # [unsubscriptable-object]
collections.abc.MutableSet[int] # [unsubscriptable-object]
collections.abc.Sequence[int] # [unsubscriptable-object]
collections.abc.ValuesView[int] # [unsubscriptable-object]
collections.abc.Iterable[int] # [unsubscriptable-object]
collections.abc.Iterator[int] # [unsubscriptable-object]
collections.abc.Generator[int, None, None] # [unsubscriptable-object]
collections.abc.Reversible[int] # [unsubscriptable-object]
collections.abc.Coroutine[list[str], str, int] # [unsubscriptable-object,unsubscriptable-object]
collections.abc.AsyncGenerator[int, None] # [unsubscriptable-object]
collections.abc.AsyncIterable[int] # [unsubscriptable-object]
collections.abc.AsyncIterator[int] # [unsubscriptable-object]
collections.abc.Awaitable[int] # [unsubscriptable-object]
# contextlib
contextlib.AbstractContextManager[int] # [unsubscriptable-object]
contextlib.AbstractAsyncContextManager[int] # [unsubscriptable-object]
# re
re.Pattern[str] # [unsubscriptable-object]
re.Match[str] # [unsubscriptable-object]
# unsubscriptable types
collections.abc.Hashable
collections.abc.Sized
collections.abc.Hashable[int] # [unsubscriptable-object]
collections.abc.Sized[int] # [unsubscriptable-object]
# subscriptable with Python 3.9
collections.abc.ByteString[int] # [unsubscriptable-object]
# Missing implementation for 'collections.abc' derived classes
class DerivedHashable(collections.abc.Hashable): # [abstract-method] # __hash__
pass
class DerivedIterable(collections.abc.Iterable[int]): # [unsubscriptable-object]
pass
class DerivedCollection(collections.abc.Collection[int]): # [unsubscriptable-object]
pass
# No implementation required for 'builtins' and 'collections' types
class DerivedList(list[int]): # [unsubscriptable-object]
pass
class DerivedSet(set[int]): # [unsubscriptable-object]
pass
class DerivedOrderedDict(collections.OrderedDict[int, str]): # [unsubscriptable-object]
pass
class DerivedListIterable(list[collections.abc.Iterable[int]]): # [unsubscriptable-object,unsubscriptable-object]
pass
# Multiple generic base classes
class DerivedMultiple(collections.abc.Sized, collections.abc.Hashable): # [abstract-method,abstract-method]
pass
class CustomAbstractCls1(abc.ABC):
pass
class CustomAbstractCls2(collections.abc.Sized, collections.abc.Iterable[CustomAbstractCls1]): # [abstract-method,unsubscriptable-object] # __len__
pass
class CustomImplementation(CustomAbstractCls2): # [abstract-method] # __len__
pass
# Type annotations
var_tuple: tuple[int, int] # [unsubscriptable-object]
var_dict: dict[int, str] # [unsubscriptable-object]
var_orderedDict: collections.OrderedDict[int, str] # [unsubscriptable-object]
var_container: collections.abc.Container[int] # [unsubscriptable-object]
var_sequence: collections.abc.Sequence[int] # [unsubscriptable-object]
var_iterable: collections.abc.Iterable[int] # [unsubscriptable-object]
var_awaitable: collections.abc.Awaitable[int] # [unsubscriptable-object]
var_contextmanager: contextlib.AbstractContextManager[int] # [unsubscriptable-object]
var_pattern: re.Pattern[int] # [unsubscriptable-object]
var_bytestring: collections.abc.ByteString
var_hashable: collections.abc.Hashable
var_sized: collections.abc.Sized
# Type annotation with unsubscriptable type
var_int: int[int] # [unsubscriptable-object]
var_hashable2: collections.abc.Hashable[int] # [unsubscriptable-object]
var_sized2: collections.abc.Sized[int] # [unsubscriptable-object]
# subscriptable with Python 3.9
var_bytestring2: collections.abc.ByteString[int] # [unsubscriptable-object] | en | 0.444118 | Test generic alias support for stdlib types (added in PY39). Raise [unsubscriptable-object] error for PY37 and PY38. Make sure `import typing` doesn't change anything. # flake8: noqa # pylint: disable=missing-docstring,pointless-statement,unused-import # pylint: disable=too-few-public-methods,multiple-statements,line-too-long # special # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # builtins # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # collections # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # collections.abc # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object,unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # contextlib # [unsubscriptable-object] # [unsubscriptable-object] # re # [unsubscriptable-object] # [unsubscriptable-object] # unsubscriptable types # [unsubscriptable-object] # [unsubscriptable-object] # subscriptable with Python 3.9 # [unsubscriptable-object] # Missing implementation for 'collections.abc' derived classes # [abstract-method] # __hash__ # [unsubscriptable-object] # [unsubscriptable-object] # No implementation required for 'builtins' and 'collections' types # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object,unsubscriptable-object] # Multiple generic base classes # [abstract-method,abstract-method] # [abstract-method,unsubscriptable-object] # __len__ # [abstract-method] # __len__ # Type annotations # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # Type annotation with unsubscriptable type # [unsubscriptable-object] # [unsubscriptable-object] # [unsubscriptable-object] # subscriptable with Python 3.9 # [unsubscriptable-object] | 1.961525 | 2 |
test/ds_utilities_test.py | jordantcarlisle/Lambdata-DSPT6-JTC | 0 | 6631611 | <reponame>jordantcarlisle/Lambdata-DSPT6-JTC
import unittest
from my_lambdata.ds_utilities import enlarge
class TestDsUtilities(unittest.TestCase):
def test_enlarge(self):
self.assertEqual(enlarge(3), 300)
if __name__ == '__main__':
unittest.main()
| import unittest
from my_lambdata.ds_utilities import enlarge
class TestDsUtilities(unittest.TestCase):
def test_enlarge(self):
self.assertEqual(enlarge(3), 300)
if __name__ == '__main__':
unittest.main() | none | 1 | 2.379987 | 2 |
|
tests/stats_logger_tests.py | franksam007/incubator-superset | 108 | 6631612 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for Superset"""
from unittest import TestCase
from unittest.mock import Mock, patch
from superset.stats_logger import StatsdStatsLogger
class StatsdStatsLoggerTest(TestCase):
def verify_client_calls(self, logger, client):
logger.incr('foo1')
client.incr.assert_called_once()
client.incr.assert_called_with('foo1')
logger.decr('foo2')
client.decr.assert_called_once()
client.decr.assert_called_with('foo2')
logger.gauge('foo3')
client.gauge.assert_called_once()
client.gauge.assert_called_with('foo3')
logger.timing('foo4', 1.234)
client.timing.assert_called_once()
client.timing.assert_called_with('foo4', 1.234)
def test_init_with_statsd_client(self):
client = Mock()
stats_logger = StatsdStatsLogger(statsd_client=client)
self.verify_client_calls(stats_logger, client)
def test_init_with_params(self):
with patch('superset.stats_logger.StatsClient') as MockStatsdClient:
mock_client = MockStatsdClient.return_value
stats_logger = StatsdStatsLogger()
self.verify_client_calls(stats_logger, mock_client)
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for Superset"""
from unittest import TestCase
from unittest.mock import Mock, patch
from superset.stats_logger import StatsdStatsLogger
class StatsdStatsLoggerTest(TestCase):
def verify_client_calls(self, logger, client):
logger.incr('foo1')
client.incr.assert_called_once()
client.incr.assert_called_with('foo1')
logger.decr('foo2')
client.decr.assert_called_once()
client.decr.assert_called_with('foo2')
logger.gauge('foo3')
client.gauge.assert_called_once()
client.gauge.assert_called_with('foo3')
logger.timing('foo4', 1.234)
client.timing.assert_called_once()
client.timing.assert_called_with('foo4', 1.234)
def test_init_with_statsd_client(self):
client = Mock()
stats_logger = StatsdStatsLogger(statsd_client=client)
self.verify_client_calls(stats_logger, client)
def test_init_with_params(self):
with patch('superset.stats_logger.StatsClient') as MockStatsdClient:
mock_client = MockStatsdClient.return_value
stats_logger = StatsdStatsLogger()
self.verify_client_calls(stats_logger, mock_client)
| en | 0.864943 | # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. Unit tests for Superset | 2.088899 | 2 |
tr.py | TshakeA/TshakeV2-files | 0 | 6631613 | from utlis.rank import setrank,isrank,remrank,remsudos,setsudo, GPranks,IDrank
from utlis.send import send_msg, BYusers, GetLink,Name,Glang
from utlis.locks import st,getOR
from utlis.tg import Bot
from config import *
from pyrogram import ReplyKeyboardMarkup, InlineKeyboardMarkup, InlineKeyboardButton
import threading, requests, time, random, re, json
import importlib
from pyrogram.api.types import InputPeerChat
def updateMsgs(client, message,redis):
type = message.chat.type
userID = message.from_user.id
chatID = message.chat.id
rank = isrank(redis,userID,chatID)
text = message.text
title = message.chat.title
userFN = message.from_user.first_name
type = message.chat.type
if text and text == "نقل البيانات" and rank == "sudo" and message.reply_to_message.document:
msgID = Bot("sendMessage",{"chat_id":chatID,"text":"انتظر قليلاً يتم تحميل الملف ℹ️","reply_to_message_id":message.message_id,"parse_mode":"html","disable_web_page_preview":True})["result"]["message_id"]
fileName = message.reply_to_message.download()
JsonDate = json.load(open(fileName))
if int(JsonDate["BOT_ID"]) != int(BOT_ID):
Bot("editMessageText",{"chat_id":chatID,"text":"عذراً هذه الملف ليس لي ⚠️","message_id":msgID,"disable_web_page_preview":True,"parse_mode":"html"})
return 0
co = len(JsonDate["GP_BOT"])
Bot("editMessageText",{"chat_id":chatID,"text":f"تم ايجاد {co} مجموعه في الملف ℹ️","message_id":msgID,"disable_web_page_preview":True,"parse_mode":"html"})
for chatID in JsonDate["GP_BOT"].keys():
try:
time.sleep(0.1)
print(chatID)
Bot("exportChatInviteLink",{"chat_id":chatID})
add = redis.sadd("{}Nbot:groups".format(BOT_ID),chatID)
locksarray = {'Llink','Llongtext','Lmarkdown','Linline','Lfiles','Lcontact','Lbots','Lfwd','Lnote'}
for lock in locksarray:
redis.sadd("{}Nbot:{}".format(BOT_ID,lock),chatID)
ads = Bot("getChatAdministrators",{"chat_id":chatID})
for ad in ads['result']:
userId = ad["user"]["id"]
userFn = ad["user"]["first_name"]
if ad['status'] == "administrator" and int(userId) != int(BOT_ID):
setrank(redis,"admin",userId,chatID,"array")
if ad['status'] == "creator":
setrank(redis,"malk",userId,chatID,"one")
gpDate = JsonDate["GP_BOT"][chatID]
if "ASAS" in gpDate:
for userId in gpDate["ASAS"]:
setrank(redis,"acreator",userId,chatID,"array")
if "MNSH" in gpDate:
for userId in gpDate["MNSH"]:
setrank(redis,"creator",userId,chatID,"array")
if "MDER" in gpDate:
for userId in gpDate["MDER"]:
setrank(redis,"owner",userId,chatID,"array")
if "MOD" in gpDate:
for userId in gpDate["MOD"]:
setrank(redis,"admin",userId,chatID,"array")
except Exception as e:
print(e)
Bot("editMessageText",{"chat_id":chatID,"text":f"تم نقل المجموعات ✅","message_id":msgID,"disable_web_page_preview":True,"parse_mode":"html"})
| from utlis.rank import setrank,isrank,remrank,remsudos,setsudo, GPranks,IDrank
from utlis.send import send_msg, BYusers, GetLink,Name,Glang
from utlis.locks import st,getOR
from utlis.tg import Bot
from config import *
from pyrogram import ReplyKeyboardMarkup, InlineKeyboardMarkup, InlineKeyboardButton
import threading, requests, time, random, re, json
import importlib
from pyrogram.api.types import InputPeerChat
def updateMsgs(client, message,redis):
type = message.chat.type
userID = message.from_user.id
chatID = message.chat.id
rank = isrank(redis,userID,chatID)
text = message.text
title = message.chat.title
userFN = message.from_user.first_name
type = message.chat.type
if text and text == "نقل البيانات" and rank == "sudo" and message.reply_to_message.document:
msgID = Bot("sendMessage",{"chat_id":chatID,"text":"انتظر قليلاً يتم تحميل الملف ℹ️","reply_to_message_id":message.message_id,"parse_mode":"html","disable_web_page_preview":True})["result"]["message_id"]
fileName = message.reply_to_message.download()
JsonDate = json.load(open(fileName))
if int(JsonDate["BOT_ID"]) != int(BOT_ID):
Bot("editMessageText",{"chat_id":chatID,"text":"عذراً هذه الملف ليس لي ⚠️","message_id":msgID,"disable_web_page_preview":True,"parse_mode":"html"})
return 0
co = len(JsonDate["GP_BOT"])
Bot("editMessageText",{"chat_id":chatID,"text":f"تم ايجاد {co} مجموعه في الملف ℹ️","message_id":msgID,"disable_web_page_preview":True,"parse_mode":"html"})
for chatID in JsonDate["GP_BOT"].keys():
try:
time.sleep(0.1)
print(chatID)
Bot("exportChatInviteLink",{"chat_id":chatID})
add = redis.sadd("{}Nbot:groups".format(BOT_ID),chatID)
locksarray = {'Llink','Llongtext','Lmarkdown','Linline','Lfiles','Lcontact','Lbots','Lfwd','Lnote'}
for lock in locksarray:
redis.sadd("{}Nbot:{}".format(BOT_ID,lock),chatID)
ads = Bot("getChatAdministrators",{"chat_id":chatID})
for ad in ads['result']:
userId = ad["user"]["id"]
userFn = ad["user"]["first_name"]
if ad['status'] == "administrator" and int(userId) != int(BOT_ID):
setrank(redis,"admin",userId,chatID,"array")
if ad['status'] == "creator":
setrank(redis,"malk",userId,chatID,"one")
gpDate = JsonDate["GP_BOT"][chatID]
if "ASAS" in gpDate:
for userId in gpDate["ASAS"]:
setrank(redis,"acreator",userId,chatID,"array")
if "MNSH" in gpDate:
for userId in gpDate["MNSH"]:
setrank(redis,"creator",userId,chatID,"array")
if "MDER" in gpDate:
for userId in gpDate["MDER"]:
setrank(redis,"owner",userId,chatID,"array")
if "MOD" in gpDate:
for userId in gpDate["MOD"]:
setrank(redis,"admin",userId,chatID,"array")
except Exception as e:
print(e)
Bot("editMessageText",{"chat_id":chatID,"text":f"تم نقل المجموعات ✅","message_id":msgID,"disable_web_page_preview":True,"parse_mode":"html"})
| none | 1 | 2.257157 | 2 |
|
src/managed_deployment/subnet_lambda.py | anotherhobby/carve | 0 | 6631614 | import os
import urllib3
import socket
import json
'''
this subnet lambda code file is kept separate from the VPC stack CFN template for easier
editing/testing and is injected into the CFN template at deploy time by carve-core lambda
'''
def hc(beacon):
http = urllib3.PoolManager()
try:
r = http.request('GET', f'http://{beacon}/up', timeout=0.1)
if r.status == 200:
result = 'up'
else:
result = 'down'
except:
result = 'down'
return result
def get_results(beacon):
print(f'getting results for beacon: {beacon}')
http = urllib3.PoolManager()
result = None
try:
r = http.request('GET', f'http://{beacon}/results', timeout=0.1)
ts = http.request('GET', f'http://{beacon}/ts', timeout=0.1)
if r.status == 200:
result = {
'beacon': beacon,
'subnet': os.environ['VpcSubnetIds'],
'status': r.status,
'fping': format_fping(beacon, r.data),
'health': hc(beacon),
'ts': ts.data
}
else:
result = {
'beacon': beacon,
'subnet': os.environ['VpcSubnetIds'],
'status': r.status, 'result':
'error',
'health': hc(beacon)
}
except urllib3.exceptions.ConnectTimeoutError:
result = {
'beacon': beacon,
'subnet': os.environ['VpcSubnetIds'],
'status': 'ConnectTimeoutError',
'result': 'timeout',
'health': hc(beacon)
}
except urllib3.exceptions.MaxRetryError:
result = {
'beacon': beacon,
'subnet': os.environ['VpcSubnetIds'],
'status': 'MaxRetryError',
'result': 'timeout',
'health': hc(beacon)
}
except urllib3.exceptions.HTTPError:
result = {
'beacon': beacon,
'subnet': os.environ['VpcSubnetIds'],
'status': 'HTTPError',
'result': 'timeout',
'health': hc(beacon)
}
print(result)
return result
def format_fping(beacon, data):
result = {}
for d in data.decode().split('\n'):
if ':' in d:
target = d.split(' :')[0].strip()
if target != beacon:
pings = d.split(': ')[1].split(' ')
p = [0 if x=='-' else float(x) for x in pings]
result[target] = round((sum(p) / len(p)), 3)
return result
def update_beacon(beacon, beacons):
config = json.dumps({'beacons': beacons}).encode()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.connect((beacon, 8008))
s.sendall(config)
data = s.recv(1024)
except socket.error as e:
data = e
if data == config:
print('beacon update successful')
else:
print(f'ERROR: beacon update confirmation failed for {beacon}')
print(f'submitted: {config}')
print(f'returned: {data}')
def lambda_handler(event, context):
print(event)
if event['action'] == 'results':
result = get_results(event['beacon'])
elif event['action'] == 'update':
result = update_beacon(event['beacon'], event['beacons'])
return result
| import os
import urllib3
import socket
import json
'''
this subnet lambda code file is kept separate from the VPC stack CFN template for easier
editing/testing and is injected into the CFN template at deploy time by carve-core lambda
'''
def hc(beacon):
http = urllib3.PoolManager()
try:
r = http.request('GET', f'http://{beacon}/up', timeout=0.1)
if r.status == 200:
result = 'up'
else:
result = 'down'
except:
result = 'down'
return result
def get_results(beacon):
print(f'getting results for beacon: {beacon}')
http = urllib3.PoolManager()
result = None
try:
r = http.request('GET', f'http://{beacon}/results', timeout=0.1)
ts = http.request('GET', f'http://{beacon}/ts', timeout=0.1)
if r.status == 200:
result = {
'beacon': beacon,
'subnet': os.environ['VpcSubnetIds'],
'status': r.status,
'fping': format_fping(beacon, r.data),
'health': hc(beacon),
'ts': ts.data
}
else:
result = {
'beacon': beacon,
'subnet': os.environ['VpcSubnetIds'],
'status': r.status, 'result':
'error',
'health': hc(beacon)
}
except urllib3.exceptions.ConnectTimeoutError:
result = {
'beacon': beacon,
'subnet': os.environ['VpcSubnetIds'],
'status': 'ConnectTimeoutError',
'result': 'timeout',
'health': hc(beacon)
}
except urllib3.exceptions.MaxRetryError:
result = {
'beacon': beacon,
'subnet': os.environ['VpcSubnetIds'],
'status': 'MaxRetryError',
'result': 'timeout',
'health': hc(beacon)
}
except urllib3.exceptions.HTTPError:
result = {
'beacon': beacon,
'subnet': os.environ['VpcSubnetIds'],
'status': 'HTTPError',
'result': 'timeout',
'health': hc(beacon)
}
print(result)
return result
def format_fping(beacon, data):
result = {}
for d in data.decode().split('\n'):
if ':' in d:
target = d.split(' :')[0].strip()
if target != beacon:
pings = d.split(': ')[1].split(' ')
p = [0 if x=='-' else float(x) for x in pings]
result[target] = round((sum(p) / len(p)), 3)
return result
def update_beacon(beacon, beacons):
config = json.dumps({'beacons': beacons}).encode()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.connect((beacon, 8008))
s.sendall(config)
data = s.recv(1024)
except socket.error as e:
data = e
if data == config:
print('beacon update successful')
else:
print(f'ERROR: beacon update confirmation failed for {beacon}')
print(f'submitted: {config}')
print(f'returned: {data}')
def lambda_handler(event, context):
print(event)
if event['action'] == 'results':
result = get_results(event['beacon'])
elif event['action'] == 'update':
result = update_beacon(event['beacon'], event['beacons'])
return result
| en | 0.892084 | this subnet lambda code file is kept separate from the VPC stack CFN template for easier editing/testing and is injected into the CFN template at deploy time by carve-core lambda | 2.181369 | 2 |
contrib/rackspace/rackspace/clients.py | NeCTAR-RC/heat | 1 | 6631615 | <filename>contrib/rackspace/rackspace/clients.py
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from heat.common import exception
from heat.engine import clients
from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging
logger = logging.getLogger(__name__)
try:
import pyrax
except ImportError:
logger.info(_('pyrax not available'))
try:
from swiftclient import client as swiftclient
except ImportError:
swiftclient = None
logger.info(_('swiftclient not available'))
try:
from ceilometerclient import client as ceilometerclient
except ImportError:
ceilometerclient = None
logger.info(_('ceilometerclient not available'))
cloud_opts = [
cfg.StrOpt('region_name',
default=None,
help=_('Region for connecting to services.'))
]
cfg.CONF.register_opts(cloud_opts)
class Clients(clients.OpenStackClients):
'''
Convenience class to create and cache client instances.
'''
def __init__(self, context):
super(Clients, self).__init__(context)
self.pyrax = None
def _get_client(self, name):
if not self.pyrax:
self.__authenticate()
return self.pyrax.get(name)
def auto_scale(self):
"""Rackspace Auto Scale client."""
return self._get_client("autoscale")
def cloud_db(self):
'''Rackspace cloud database client.'''
return self._get_client("database")
def cloud_lb(self):
'''Rackspace cloud loadbalancer client.'''
return self._get_client("load_balancer")
def cloud_dns(self):
'''Rackspace cloud dns client.'''
return self._get_client("dns")
def nova(self, service_type="compute"):
'''Rackspace cloudservers client. Specifying the service type is to
maintain compatibility with clients.OpenStackClients. It is not
actually a valid option to change within pyrax.
'''
if service_type is not "compute":
raise ValueError(_("service_type should be compute."))
return self._get_client(service_type)
def cloud_networks(self):
'''Rackspace cloud networks client.'''
return self._get_client("network")
def trove(self):
'''Rackspace trove client.'''
if not self._trove:
super(Clients, self).trove(service_type='rax:database')
management_url = self.url_for(service_type='rax:database',
region_name=cfg.CONF.region_name)
self._trove.client.management_url = management_url
return self._trove
def cinder(self):
"""Override the region for the cinder client."""
if not self._cinder:
super(Clients, self).cinder()
management_url = self.url_for(service_type='volume',
region_name=cfg.CONF.region_name)
self._cinder.client.management_url = management_url
return self._cinder
def __authenticate(self):
pyrax.set_setting("identity_type", "keystone")
pyrax.set_setting("auth_endpoint", self.context.auth_url)
logger.info(_("Authenticating username:%s") %
self.context.username)
self.pyrax = pyrax.auth_with_token(self.context.auth_token,
tenant_id=self.context.tenant_id,
tenant_name=self.context.tenant,
region=(cfg.CONF.region_name
or None))
if not self.pyrax:
raise exception.AuthorizationFailure("No services available.")
logger.info(_("User %s authenticated successfully.")
% self.context.username)
| <filename>contrib/rackspace/rackspace/clients.py
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from heat.common import exception
from heat.engine import clients
from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging
logger = logging.getLogger(__name__)
try:
import pyrax
except ImportError:
logger.info(_('pyrax not available'))
try:
from swiftclient import client as swiftclient
except ImportError:
swiftclient = None
logger.info(_('swiftclient not available'))
try:
from ceilometerclient import client as ceilometerclient
except ImportError:
ceilometerclient = None
logger.info(_('ceilometerclient not available'))
cloud_opts = [
cfg.StrOpt('region_name',
default=None,
help=_('Region for connecting to services.'))
]
cfg.CONF.register_opts(cloud_opts)
class Clients(clients.OpenStackClients):
'''
Convenience class to create and cache client instances.
'''
def __init__(self, context):
super(Clients, self).__init__(context)
self.pyrax = None
def _get_client(self, name):
if not self.pyrax:
self.__authenticate()
return self.pyrax.get(name)
def auto_scale(self):
"""Rackspace Auto Scale client."""
return self._get_client("autoscale")
def cloud_db(self):
'''Rackspace cloud database client.'''
return self._get_client("database")
def cloud_lb(self):
'''Rackspace cloud loadbalancer client.'''
return self._get_client("load_balancer")
def cloud_dns(self):
'''Rackspace cloud dns client.'''
return self._get_client("dns")
def nova(self, service_type="compute"):
'''Rackspace cloudservers client. Specifying the service type is to
maintain compatibility with clients.OpenStackClients. It is not
actually a valid option to change within pyrax.
'''
if service_type is not "compute":
raise ValueError(_("service_type should be compute."))
return self._get_client(service_type)
def cloud_networks(self):
'''Rackspace cloud networks client.'''
return self._get_client("network")
def trove(self):
'''Rackspace trove client.'''
if not self._trove:
super(Clients, self).trove(service_type='rax:database')
management_url = self.url_for(service_type='rax:database',
region_name=cfg.CONF.region_name)
self._trove.client.management_url = management_url
return self._trove
def cinder(self):
"""Override the region for the cinder client."""
if not self._cinder:
super(Clients, self).cinder()
management_url = self.url_for(service_type='volume',
region_name=cfg.CONF.region_name)
self._cinder.client.management_url = management_url
return self._cinder
def __authenticate(self):
pyrax.set_setting("identity_type", "keystone")
pyrax.set_setting("auth_endpoint", self.context.auth_url)
logger.info(_("Authenticating username:%s") %
self.context.username)
self.pyrax = pyrax.auth_with_token(self.context.auth_token,
tenant_id=self.context.tenant_id,
tenant_name=self.context.tenant,
region=(cfg.CONF.region_name
or None))
if not self.pyrax:
raise exception.AuthorizationFailure("No services available.")
logger.info(_("User %s authenticated successfully.")
% self.context.username)
| en | 0.793317 | # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Convenience class to create and cache client instances. Rackspace Auto Scale client. Rackspace cloud database client. Rackspace cloud loadbalancer client. Rackspace cloud dns client. Rackspace cloudservers client. Specifying the service type is to maintain compatibility with clients.OpenStackClients. It is not actually a valid option to change within pyrax. Rackspace cloud networks client. Rackspace trove client. Override the region for the cinder client. | 1.970859 | 2 |
resources/PyInstaller-3.0/tests/old_suite/basic/pkg2/extra/b.py | dvt32/mypymodoro | 0 | 6631616 | #-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
""" b.py lives in extra, but shows as pkg2.b (and pkg1.b)"""
def b_func():
return "b_func from pkg2.b (pkg2/extra/b.py)"
| #-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
""" b.py lives in extra, but shows as pkg2.b (and pkg1.b)"""
def b_func():
return "b_func from pkg2.b (pkg2/extra/b.py)"
| en | 0.597055 | #----------------------------------------------------------------------------- # Copyright (c) 2013, PyInstaller Development Team. # # Distributed under the terms of the GNU General Public License with exception # for distributing bootloader. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- b.py lives in extra, but shows as pkg2.b (and pkg1.b) | 2.595455 | 3 |
examples/class_image_TEST.py | rengezri/imsis | 1 | 6631617 | #!/usr/bin/env python
'''
Class Image test
'''
import os
import imsis as ims
import numpy as np
print("Starting...")
fn = r".\images\bberry.jpg"
im_blueberry = ims.Image.load(fn)
fn = r".\images\rice.jpg"
im_rice = ims.Image.load(fn)
fn = r".\images\spa_rice.tif"
im_spa_rice = ims.Image.load(fn)
im_blueberry_noise = ims.Image.Process.gaussian_noise(im_blueberry)
im_blueberry_shifted = ims.Image.Transform.translate(im_blueberry, 30, 50)
im_rice_gray = ims.Image.Convert.toGray(im_rice)
im_rice_gray_noise = ims.Image.Process.gaussian_noise(im_rice_gray, 0.1)
img = im_rice
autoclose=1.2
info1 = ims.Image.unique_colours(im_blueberry)
info2 = ims.Image.unique_colours(im_rice)
ims.View.plot_list_with_histogram([im_blueberry, im_rice], ['cols {}'.format(info1), 'cols {}'.format(info2)],autoclose=autoclose)
img2_1 = ims.Image.Tools.add_blackmask(img, [50, 50, 250, 250])
ims.View.plot_list([img, img2_1], ['Source', 'With black mask'], window_title='Add Mask',autoclose=autoclose)
img2_1 = ims.Image.Tools.add_blackborder(img, 25)
ims.View.plot_list([img, img2_1], ['Source', 'With black border'], window_title='Add Border',autoclose=autoclose)
img2_1 = ims.Image.crop_percentage(img, 0.5)
img2_2 = ims.Image.zoom(img, 1.5, 0.1, 0.1)
img2_3 = ims.Image.resize(ims.Image.Adjust.bin(img2_1, 2), 2) # first bin than resize to original size
ims.View.plot_list([img, img2_1, img2_2, img2_3], ['Source', 'Crop', 'Zoom', 'Bin'],
window_title='Image Crop, Zoom, Bin',autoclose=autoclose)
img2_1 = ims.Image.Process.poisson_noise(img, 0.25)
img2_2 = ims.Image.Process.gaussian_noise(img, 0.25)
img2_3 = ims.Image.Process.salt_and_pepper_noise(img, 0.25)
ims.View.plot_list([img, img2_1, img2_2, img2_3],
['Source', 'Poisson noise', 'Gaussian noise', 'Salt and pepper noise'],
window_title='Image Add Noise',autoclose=autoclose)
img2_1 = ims.Image.Process.gaussian_blur(img, 3)
img2_2 = ims.Image.Process.median(img)
img2_3 = ims.Image.Process.nonlocalmeans(img, h=14, templatewindowsize=9, searchwindowsize=21)
ims.View.plot_list([img, img2_1, img2_2, img2_3], ['Source with noise', 'Gaussianblur', 'Median', 'NonLocalMeans'],
window_title='Image Reduce Noise',autoclose=autoclose)
img2_1 = ims.Image.Process.unsharp_mask(img, kernel_size=7, sigma=1.0, amount=1.0, threshold=0)
img2_2 = ims.Image.Process.deconvolution_wiener(img, d=5, noise=11)
ims.View.plot_list([img, img2_1, img2_2], ['Source', 'Unsharpenmask', 'Deconv'], window_title='Image Sharpen',autoclose=autoclose)
img2_1 = ims.Image.Transform.flip_vertical(img)
img2_2 = ims.Image.Transform.flip_horizontal(img)
img2_3 = ims.Image.Transform.translate(img, 25, 25)
img2_4 = ims.Image.Transform.rotate(img, 45)
ims.View.plot_list([img2_1, img2_2, img2_3, img2_4], ['Flip vertical', 'Flip horizontal', 'Translate image',
'Rotate image'], window_title='Image Transformation',autoclose=autoclose)
img2_1 = ims.Image.Process.cannyedge_auto(img)
img2_2, thetaq = ims.Image.Process.gradient_image_nonmaxsuppressed(img2_1, 5, 40)
img2_3 = ims.Image.Process.pencilsketch((img))
ims.View.plot_list([img2_1, img2_2, img2_3], ['Canny edge auto', 'Gradientnonmaxsupp', 'Pencilsketch'],
window_title='Image Edge Enhancement',autoclose=autoclose)
img2_1, angle = ims.Image.Process.gradient_image(img)
ims.View.plot_list([img2_1, angle], ['Gradient_mag', 'Gradient_angle'], window_title='Image Gradient',autoclose=autoclose)
im2_1 = ims.Image.Process.cannyedge_auto(img)
im2_1 = ims.Image.Binary.morphology_dilate(im2_1, 5)
im_th, im_floodfill, im_floodfill_inv, im_out = ims.Image.Binary.morphology_fillholes(im2_1)
im2_2 = ims.Image.Binary.morphology_erode(im_out, 5)
ims.View.plot_list([img, im2_2], window_title='Image Morphological Filter',autoclose=autoclose)
img2_1 = ims.Image.Binary.morphology_erode(img, 5)
img2_2 = ims.Image.Binary.morphology_dilate(img, 5)
img2_3 = ims.Image.Binary.morphology_open(img, 5)
img2_4 = ims.Image.Binary.morphology_close(img, 5)
ims.View.plot_list([img2_1, img2_2, img2_3, img2_4], ['Erode', 'Dilate', 'Open', 'Close'],
window_title='Image Morphological Filter',autoclose=autoclose)
img2_1 = ims.Image.Adjust.thresholdrange(img, 75, 128)
img2_2 = ims.Image.Adjust.threshold(img, 75)
img2_3 = ims.Image.Adjust.invert(img)
ims.View.plot_list([img2_1, img2_2, img2_3], ['Threshold range', 'Threshold binary', 'Invert'],
window_title='Image Thresholding, Invert',autoclose=autoclose)
img2_1 = ims.Image.Adjust.histostretch_equalized(img)
img2_2 = ims.Image.Adjust.histostretch_clahe(img)
ims.View.plot_list_with_histogram([img, img2_1, img2_2], ['Source', 'Equalized Histogram', 'Clahe histogram'],
window_title='Image Histogram Optimization',autoclose=autoclose)
img3_1 = ims.Image.Process.Falsecolor.falsecolor_jet(img)
img3_2 = ims.Image.Process.Falsecolor.falsecolor_rainbow(img)
ims.View.plot_list([img3_1, img3_2], ['Falsecolor jet', 'Falsecolor rainbow'], window_title='Image False Colour',autoclose=autoclose)
img3_1 = ims.Image.Tools.create_checkerboard()
img3_2 = ims.Image.Tools.image_with_2_closeups(img)
img3_3 = ims.Image.Tools.squared(img)
ims.View.plot_list([img3_1, img3_2, img3_3], ['Checkerboard', '2close-ups', 'Squared'], window_title='Image Misc',autoclose=autoclose)
# fisheye
K = np.array(
[[781.3524863867165, 0.0, 794.7118000552183], [0.0, 779.5071163774452, 561.3314451453386], [0.0, 0.0, 1.0]])
fx = 1200
fy = 1200
K = np.array(
[[fx, 0.0, img3_1.shape[0] * 0.5], [0.0, fy, img3_1.shape[1] * 0.5], [0.0, 0.0, 1.0]])
D = np.array([[0.0], [0.0], [0.0], [1.0]])
img1_1 = ims.Image.Tools.fisheye_correction(img3_1, K, D, DIM=(img3_1.shape[0], img3_1.shape[1]))
ims.View.plot_list([img3_1, img1_1], ["Source", "Fisheye"], window_title='Image Fisheye',autoclose=autoclose)
img2_1 = im_rice_gray
mx, my, grad, theta = ims.Image.Process.directionalsharpness(img2_1)
ims.View.plot_list([grad, theta], ["Directional sharpness gradient", "Theta"],
window_title='Image Directional Sharpness',autoclose=autoclose)
img2_1 = im_blueberry
img2_2 = ims.Image.Transform.rotate(img2_1, 5)
img2_3 = ims.Image.Tools.imageregistration(img2_1, img2_2)
ims.View.plot_list([img2_1, img2_2, img2_3], ['Source', 'Rotated', 'Image Registration of rotated image'],
window_title='Image Transformation, Rotation, Registration',autoclose=autoclose)
im_4 = ims.Image.Process.sepia(im_blueberry)
ims.View.plot_list([im_blueberry, im_4], ['Source', 'Sepia'], window_title="Sepia",autoclose=autoclose)
im_4 = ims.Image.Adjust.adjust_auto_whitebalance(im_blueberry)
ims.View.plot_list([im_blueberry, im_4], ['Source', 'Auto whitebalance'], window_title="Auto Whitebalance",autoclose=autoclose)
im_4 = ims.Image.Process.k_means(im_blueberry, k=4)
ims.View.plot_list([im_blueberry, im_4], ['Source', 'K-Means Clustering'], window_title="K-Means Clustering",autoclose=autoclose)
im_4 = ims.Image.Tools.create_hsv_map()
ims.Image.save(im_4 * 255, r'.\output\hsv_map.jpg')
ims.Image.save_withuniquetimestamp(im_4 * 255)
dft_shift, img2_1 = ims.Image.Process.FFT(img)
img2_2 = ims.Image.Process.IFFT(dft_shift)
ims.View.plot_list([img, img2_1, img2_2], ['Source', 'FFT', 'IFFT'], window_title='Image FFT',autoclose=autoclose)
print('Ready.')
| #!/usr/bin/env python
'''
Class Image test
'''
import os
import imsis as ims
import numpy as np
print("Starting...")
fn = r".\images\bberry.jpg"
im_blueberry = ims.Image.load(fn)
fn = r".\images\rice.jpg"
im_rice = ims.Image.load(fn)
fn = r".\images\spa_rice.tif"
im_spa_rice = ims.Image.load(fn)
im_blueberry_noise = ims.Image.Process.gaussian_noise(im_blueberry)
im_blueberry_shifted = ims.Image.Transform.translate(im_blueberry, 30, 50)
im_rice_gray = ims.Image.Convert.toGray(im_rice)
im_rice_gray_noise = ims.Image.Process.gaussian_noise(im_rice_gray, 0.1)
img = im_rice
autoclose=1.2
info1 = ims.Image.unique_colours(im_blueberry)
info2 = ims.Image.unique_colours(im_rice)
ims.View.plot_list_with_histogram([im_blueberry, im_rice], ['cols {}'.format(info1), 'cols {}'.format(info2)],autoclose=autoclose)
img2_1 = ims.Image.Tools.add_blackmask(img, [50, 50, 250, 250])
ims.View.plot_list([img, img2_1], ['Source', 'With black mask'], window_title='Add Mask',autoclose=autoclose)
img2_1 = ims.Image.Tools.add_blackborder(img, 25)
ims.View.plot_list([img, img2_1], ['Source', 'With black border'], window_title='Add Border',autoclose=autoclose)
img2_1 = ims.Image.crop_percentage(img, 0.5)
img2_2 = ims.Image.zoom(img, 1.5, 0.1, 0.1)
img2_3 = ims.Image.resize(ims.Image.Adjust.bin(img2_1, 2), 2) # first bin than resize to original size
ims.View.plot_list([img, img2_1, img2_2, img2_3], ['Source', 'Crop', 'Zoom', 'Bin'],
window_title='Image Crop, Zoom, Bin',autoclose=autoclose)
img2_1 = ims.Image.Process.poisson_noise(img, 0.25)
img2_2 = ims.Image.Process.gaussian_noise(img, 0.25)
img2_3 = ims.Image.Process.salt_and_pepper_noise(img, 0.25)
ims.View.plot_list([img, img2_1, img2_2, img2_3],
['Source', 'Poisson noise', 'Gaussian noise', 'Salt and pepper noise'],
window_title='Image Add Noise',autoclose=autoclose)
img2_1 = ims.Image.Process.gaussian_blur(img, 3)
img2_2 = ims.Image.Process.median(img)
img2_3 = ims.Image.Process.nonlocalmeans(img, h=14, templatewindowsize=9, searchwindowsize=21)
ims.View.plot_list([img, img2_1, img2_2, img2_3], ['Source with noise', 'Gaussianblur', 'Median', 'NonLocalMeans'],
window_title='Image Reduce Noise',autoclose=autoclose)
img2_1 = ims.Image.Process.unsharp_mask(img, kernel_size=7, sigma=1.0, amount=1.0, threshold=0)
img2_2 = ims.Image.Process.deconvolution_wiener(img, d=5, noise=11)
ims.View.plot_list([img, img2_1, img2_2], ['Source', 'Unsharpenmask', 'Deconv'], window_title='Image Sharpen',autoclose=autoclose)
img2_1 = ims.Image.Transform.flip_vertical(img)
img2_2 = ims.Image.Transform.flip_horizontal(img)
img2_3 = ims.Image.Transform.translate(img, 25, 25)
img2_4 = ims.Image.Transform.rotate(img, 45)
ims.View.plot_list([img2_1, img2_2, img2_3, img2_4], ['Flip vertical', 'Flip horizontal', 'Translate image',
'Rotate image'], window_title='Image Transformation',autoclose=autoclose)
img2_1 = ims.Image.Process.cannyedge_auto(img)
img2_2, thetaq = ims.Image.Process.gradient_image_nonmaxsuppressed(img2_1, 5, 40)
img2_3 = ims.Image.Process.pencilsketch((img))
ims.View.plot_list([img2_1, img2_2, img2_3], ['Canny edge auto', 'Gradientnonmaxsupp', 'Pencilsketch'],
window_title='Image Edge Enhancement',autoclose=autoclose)
img2_1, angle = ims.Image.Process.gradient_image(img)
ims.View.plot_list([img2_1, angle], ['Gradient_mag', 'Gradient_angle'], window_title='Image Gradient',autoclose=autoclose)
im2_1 = ims.Image.Process.cannyedge_auto(img)
im2_1 = ims.Image.Binary.morphology_dilate(im2_1, 5)
im_th, im_floodfill, im_floodfill_inv, im_out = ims.Image.Binary.morphology_fillholes(im2_1)
im2_2 = ims.Image.Binary.morphology_erode(im_out, 5)
ims.View.plot_list([img, im2_2], window_title='Image Morphological Filter',autoclose=autoclose)
img2_1 = ims.Image.Binary.morphology_erode(img, 5)
img2_2 = ims.Image.Binary.morphology_dilate(img, 5)
img2_3 = ims.Image.Binary.morphology_open(img, 5)
img2_4 = ims.Image.Binary.morphology_close(img, 5)
ims.View.plot_list([img2_1, img2_2, img2_3, img2_4], ['Erode', 'Dilate', 'Open', 'Close'],
window_title='Image Morphological Filter',autoclose=autoclose)
img2_1 = ims.Image.Adjust.thresholdrange(img, 75, 128)
img2_2 = ims.Image.Adjust.threshold(img, 75)
img2_3 = ims.Image.Adjust.invert(img)
ims.View.plot_list([img2_1, img2_2, img2_3], ['Threshold range', 'Threshold binary', 'Invert'],
window_title='Image Thresholding, Invert',autoclose=autoclose)
img2_1 = ims.Image.Adjust.histostretch_equalized(img)
img2_2 = ims.Image.Adjust.histostretch_clahe(img)
ims.View.plot_list_with_histogram([img, img2_1, img2_2], ['Source', 'Equalized Histogram', 'Clahe histogram'],
window_title='Image Histogram Optimization',autoclose=autoclose)
img3_1 = ims.Image.Process.Falsecolor.falsecolor_jet(img)
img3_2 = ims.Image.Process.Falsecolor.falsecolor_rainbow(img)
ims.View.plot_list([img3_1, img3_2], ['Falsecolor jet', 'Falsecolor rainbow'], window_title='Image False Colour',autoclose=autoclose)
img3_1 = ims.Image.Tools.create_checkerboard()
img3_2 = ims.Image.Tools.image_with_2_closeups(img)
img3_3 = ims.Image.Tools.squared(img)
ims.View.plot_list([img3_1, img3_2, img3_3], ['Checkerboard', '2close-ups', 'Squared'], window_title='Image Misc',autoclose=autoclose)
# fisheye
K = np.array(
[[781.3524863867165, 0.0, 794.7118000552183], [0.0, 779.5071163774452, 561.3314451453386], [0.0, 0.0, 1.0]])
fx = 1200
fy = 1200
K = np.array(
[[fx, 0.0, img3_1.shape[0] * 0.5], [0.0, fy, img3_1.shape[1] * 0.5], [0.0, 0.0, 1.0]])
D = np.array([[0.0], [0.0], [0.0], [1.0]])
img1_1 = ims.Image.Tools.fisheye_correction(img3_1, K, D, DIM=(img3_1.shape[0], img3_1.shape[1]))
ims.View.plot_list([img3_1, img1_1], ["Source", "Fisheye"], window_title='Image Fisheye',autoclose=autoclose)
img2_1 = im_rice_gray
mx, my, grad, theta = ims.Image.Process.directionalsharpness(img2_1)
ims.View.plot_list([grad, theta], ["Directional sharpness gradient", "Theta"],
window_title='Image Directional Sharpness',autoclose=autoclose)
img2_1 = im_blueberry
img2_2 = ims.Image.Transform.rotate(img2_1, 5)
img2_3 = ims.Image.Tools.imageregistration(img2_1, img2_2)
ims.View.plot_list([img2_1, img2_2, img2_3], ['Source', 'Rotated', 'Image Registration of rotated image'],
window_title='Image Transformation, Rotation, Registration',autoclose=autoclose)
im_4 = ims.Image.Process.sepia(im_blueberry)
ims.View.plot_list([im_blueberry, im_4], ['Source', 'Sepia'], window_title="Sepia",autoclose=autoclose)
im_4 = ims.Image.Adjust.adjust_auto_whitebalance(im_blueberry)
ims.View.plot_list([im_blueberry, im_4], ['Source', 'Auto whitebalance'], window_title="Auto Whitebalance",autoclose=autoclose)
im_4 = ims.Image.Process.k_means(im_blueberry, k=4)
ims.View.plot_list([im_blueberry, im_4], ['Source', 'K-Means Clustering'], window_title="K-Means Clustering",autoclose=autoclose)
im_4 = ims.Image.Tools.create_hsv_map()
ims.Image.save(im_4 * 255, r'.\output\hsv_map.jpg')
ims.Image.save_withuniquetimestamp(im_4 * 255)
dft_shift, img2_1 = ims.Image.Process.FFT(img)
img2_2 = ims.Image.Process.IFFT(dft_shift)
ims.View.plot_list([img, img2_1, img2_2], ['Source', 'FFT', 'IFFT'], window_title='Image FFT',autoclose=autoclose)
print('Ready.')
| en | 0.696281 | #!/usr/bin/env python Class Image test # first bin than resize to original size # fisheye | 2.549991 | 3 |
accounting_tech/migrations/0052_auto_20200305_1017.py | Tim-Ilin/asup_corp_site | 0 | 6631618 | # Generated by Django 2.1.7 on 2020-03-05 07:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounting_tech', '0051_auto_20191218_0950'),
]
operations = [
migrations.AlterField(
model_name='equipment',
name='inventory_number',
field=models.CharField(blank=True, max_length=100, verbose_name='ИНВ №'),
),
]
| # Generated by Django 2.1.7 on 2020-03-05 07:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounting_tech', '0051_auto_20191218_0950'),
]
operations = [
migrations.AlterField(
model_name='equipment',
name='inventory_number',
field=models.CharField(blank=True, max_length=100, verbose_name='ИНВ №'),
),
]
| en | 0.742884 | # Generated by Django 2.1.7 on 2020-03-05 07:17 | 1.31602 | 1 |
tools/network_vulnerability.py | andre-morelli/Urban-Analytics | 0 | 6631619 | import random
import numpy as np
from .utils import get_igraph, get_full_igraph
import networkx as nx
def remove_nodes_by_attr(G, attr, remove_proportion, ascending=False):
"""
Remove some proportion of nodes (and attached edges) from a graph based on
an atrribute's numeric order.
Parameters
----------
G : NetworkX Graph structure
Graph of the network.
attr : string
Reference attribute (must be on nodes of the graph).
remove_proportion : float between 0 and 1
proportion of nodes to be removed
ascending : boolean
If True, remove nodes from lower-to-higher attribute value, else
do it from higher-to-lower attribute value.
Returns
-------
NetworkX Graph structure
"""
assert 0<=remove_proportion<=1, 'remove_proportion must be between 0 and 1'
G_new = G.copy()
lst = [(G.nodes[n][attr], n) for n in G.nodes]
if ascending:
lst= sorted(lst)
else:
lst= sorted(lst, reverse=True)
delete_nodes = [n for m,n in lst[:int(remove_proportion*len(lst))]]
G_new.remove_nodes_from(delete_nodes)
return G_new
def remove_nodes_random(G, remove_proportion, random_seed=None):
"""
Remove some proportion of nodes (and attached edges) randomly.
Parameters
----------
G : NetworkX Graph structure
Graph of the network.
remove_proportion : float between 0 and 1
proportion of nodes to be removed
random_seed : int or None
Random seed for removal. If None, the results will be different
each time the functions is used
Returns
-------
NetworkX Graph structure
"""
random.seed(random_seed)
G_new = G.copy()
delete_nodes = random.sample(list(G.nodes), int(remove_proportion*len(G.nodes)))
G_new.remove_nodes_from(delete_nodes)
return G_new
def remove_edges_random(G, remove_proportion, random_seed=None,
direction_sensitive=False):
"""
Remove some proportion of edges randomly.
Parameters
----------
G : NetworkX Graph structure
Graph of the network.
remove_proportion : float between 0 and 1
proportion of edges to be removed
random_seed : int or None
Random seed for removal. If None, the results will be different
each time the functions is used
Returns
-------
NetworkX Graph structure
"""
random.seed(random_seed)
G_new = G.copy()
if direction_sensitive:
delete_edges = random.sample(list(G.edges), int(remove_proportion*len(G.edges)))
else:
delete_edges = []
for entry in random.sample(_concat_streets(G), int(remove_proportion*len(G.edges))):
delete_edges += list(entry)
G_new.remove_edges_from(delete_edges)
return G_new
def remove_edges_by_attr(G, attr, remove_proportion, ascending=False,
direction_sensitive=False):
"""
Remove some proportion of edges from a graph based on an atrribute's
numeric order.
Parameters
----------
G : NetworkX Graph structure
Graph of the network.
attr : string
Reference attribute (must be on nodes of the graph).
remove_proportion : float between 0 and 1
proportion of edges to be removed
ascending : boolean
If True, remove egdes from lower-to-higher attribute value, else
do it from higher-to-lower attribute value.
Returns
-------
NetworkX Graph structure
"""
assert 0<=remove_proportion<=1, 'remove_proportion must be between 0 and 1'
G_new = G.copy()
if direction_sensitive:
lst = [(G.edges[e][attr], (e,)) for e in G.edges]
else:
lst = _concat_streets(G, attr)
if ascending:
lst= sorted(lst)
else:
lst= sorted(lst, reverse=True)
delete_edges = []
for _,entry in lst[:int(remove_proportion*len(lst))]:
delete_edges += list(entry)
G_new.remove_edges_from(delete_edges)
return G_new
def _concat_streets(G,attr=None):
es = {}
G_new = nx.DiGraph(G).copy()
for e1,e2 in G_new.edges():
if (e2,e1) in G_new.edges:
if ((e2,e1),(e1,e2)) in es.keys(): continue
if attr != None:
es[((e1,e2),(e2,e1))] = G_new.edges[(e1,e2)][attr]+G_new.edges[(e2,e1)][attr]
else:
es[((e1,e2),(e2,e1))] = 1
else:
if attr != None:
es[((e1,e2),)] = G_new.edges[(e1,e2)][attr]
else:
es[((e1,e2),)] = 1
if attr == None:
return [m for m in es.keys()]
else:
return [(n,m) for m,n in es.items()]
def get_efficiency(G, weight = None, track_progress=False):
"""
Calculate "efficiency of alternative" metric for vulnerability analysis.
source: <NAME>., & <NAME>. (2021). Measuring urban road network
vulnerability to extreme events: an application for urban floods. Transportation
research part D: transport and environment, 93, 102770.
https://doi.org/10.1016/j.trd.2021.102770
***
This function tests for all nodes in the system. A function targeted to traffic zones
and travel behavior, as done in the paper, data is under construction.
***
Parameters
----------
G : NetworkX Graph structure
Graph of the network.
weight : string
Attribute to weight shortest distance algorithm. If None, each edge have
weight 1
track_progress : boolean
For tracking progress on Jupyter-Notebooks ONLY
Returns
-------
dictionary in form {node:efficiency}
"""
Gig = get_full_igraph(G)
efficiency = {}
for node in Gig.vs:
shrt = Gig.shortest_paths_dijkstra(node,weights=weight)
total_proximity=0
for l in shrt[0]:
if l == 0:
continue
total_proximity += 1/l
total_proximity = total_proximity/len(Gig.vs)
efficiency[int(node['osmid'])] = total_proximity
return efficiency
def get_number_of_valid_paths(G,weight=None):
"""
Calculate number of valid paths for the "continuity" metric for vulnerability analysis.
cite: <NAME>., & <NAME>. (2021). Verificação de vulnerabilidades em
redes de transporte: uma abordagem pela teoria dos grafos. TRANSPORTES, 29(1), 161–172.
https://doi.org/10.14295/transportes.v29i1.2250
Parameters
----------
G : NetworkX Graph structure
Graph of the network.
weight : string
Attribute to weight shortest distance algorithm. If None, each edge have weight=1
Returns
-------
dictionary in form {node:valid_paths}
"""
Gig = get_full_igraph(G)
valid_paths = {}
for node in Gig.vs:
shrt = Gig.shortest_paths(node,weights=weight)
valid = 0
for l in shrt[0]:
if l == 0: # if destination equals origin
continue
if l!=np.inf:
valid+=1
valid_paths[int(node['osmid'])] = valid
return valid_paths
def get_continuity(G1,G2,nan_values=np.nan,on_graph=False):
"""
Calculate "continuity" metric for vulnerability analysis.
cite: <NAME>., & <NAME>. (2021). Verificação de vulnerabilidades em
redes de transporte: uma abordagem pela teoria dos grafos. TRANSPORTES, 29(1), 161–172.
https://doi.org/10.14295/transportes.v29i1.2250
Parameters
----------
G : NetworkX Graph structure
Graph of the network.
weight : string
Attribute to weight shortest distance algorithm. If None, each edge have
weight 1
track_progress : boolean
For tracking progress on Jupyter-Notebooks ONLY
Returns
-------
dictionary in form {node:continuity}
"""
assert {sorted(list(G1.nodes)) == sorted(list(G2.nodes)),
'G1 and G2 must have the same set of nodes'}
#get valid paths for both
cont_initial = get_number_of_valid_paths(G1)
cont_final = get_number_of_valid_paths(G2)
#populate dictionary
cont = {}
for n in cont_initial:
if cont_initial[n]!=0:
cont[n] = cont_final[n]/cont_initial[n]
else:
cont[n] = nan_values
if on_graph:
G1 = G1.copy()
nx.set_node_attributes(G1,cont,'continuity')
return G1
else:
return cont | import random
import numpy as np
from .utils import get_igraph, get_full_igraph
import networkx as nx
def remove_nodes_by_attr(G, attr, remove_proportion, ascending=False):
"""
Remove some proportion of nodes (and attached edges) from a graph based on
an atrribute's numeric order.
Parameters
----------
G : NetworkX Graph structure
Graph of the network.
attr : string
Reference attribute (must be on nodes of the graph).
remove_proportion : float between 0 and 1
proportion of nodes to be removed
ascending : boolean
If True, remove nodes from lower-to-higher attribute value, else
do it from higher-to-lower attribute value.
Returns
-------
NetworkX Graph structure
"""
assert 0<=remove_proportion<=1, 'remove_proportion must be between 0 and 1'
G_new = G.copy()
lst = [(G.nodes[n][attr], n) for n in G.nodes]
if ascending:
lst= sorted(lst)
else:
lst= sorted(lst, reverse=True)
delete_nodes = [n for m,n in lst[:int(remove_proportion*len(lst))]]
G_new.remove_nodes_from(delete_nodes)
return G_new
def remove_nodes_random(G, remove_proportion, random_seed=None):
"""
Remove some proportion of nodes (and attached edges) randomly.
Parameters
----------
G : NetworkX Graph structure
Graph of the network.
remove_proportion : float between 0 and 1
proportion of nodes to be removed
random_seed : int or None
Random seed for removal. If None, the results will be different
each time the functions is used
Returns
-------
NetworkX Graph structure
"""
random.seed(random_seed)
G_new = G.copy()
delete_nodes = random.sample(list(G.nodes), int(remove_proportion*len(G.nodes)))
G_new.remove_nodes_from(delete_nodes)
return G_new
def remove_edges_random(G, remove_proportion, random_seed=None,
direction_sensitive=False):
"""
Remove some proportion of edges randomly.
Parameters
----------
G : NetworkX Graph structure
Graph of the network.
remove_proportion : float between 0 and 1
proportion of edges to be removed
random_seed : int or None
Random seed for removal. If None, the results will be different
each time the functions is used
Returns
-------
NetworkX Graph structure
"""
random.seed(random_seed)
G_new = G.copy()
if direction_sensitive:
delete_edges = random.sample(list(G.edges), int(remove_proportion*len(G.edges)))
else:
delete_edges = []
for entry in random.sample(_concat_streets(G), int(remove_proportion*len(G.edges))):
delete_edges += list(entry)
G_new.remove_edges_from(delete_edges)
return G_new
def remove_edges_by_attr(G, attr, remove_proportion, ascending=False,
direction_sensitive=False):
"""
Remove some proportion of edges from a graph based on an atrribute's
numeric order.
Parameters
----------
G : NetworkX Graph structure
Graph of the network.
attr : string
Reference attribute (must be on nodes of the graph).
remove_proportion : float between 0 and 1
proportion of edges to be removed
ascending : boolean
If True, remove egdes from lower-to-higher attribute value, else
do it from higher-to-lower attribute value.
Returns
-------
NetworkX Graph structure
"""
assert 0<=remove_proportion<=1, 'remove_proportion must be between 0 and 1'
G_new = G.copy()
if direction_sensitive:
lst = [(G.edges[e][attr], (e,)) for e in G.edges]
else:
lst = _concat_streets(G, attr)
if ascending:
lst= sorted(lst)
else:
lst= sorted(lst, reverse=True)
delete_edges = []
for _,entry in lst[:int(remove_proportion*len(lst))]:
delete_edges += list(entry)
G_new.remove_edges_from(delete_edges)
return G_new
def _concat_streets(G,attr=None):
es = {}
G_new = nx.DiGraph(G).copy()
for e1,e2 in G_new.edges():
if (e2,e1) in G_new.edges:
if ((e2,e1),(e1,e2)) in es.keys(): continue
if attr != None:
es[((e1,e2),(e2,e1))] = G_new.edges[(e1,e2)][attr]+G_new.edges[(e2,e1)][attr]
else:
es[((e1,e2),(e2,e1))] = 1
else:
if attr != None:
es[((e1,e2),)] = G_new.edges[(e1,e2)][attr]
else:
es[((e1,e2),)] = 1
if attr == None:
return [m for m in es.keys()]
else:
return [(n,m) for m,n in es.items()]
def get_efficiency(G, weight = None, track_progress=False):
"""
Calculate "efficiency of alternative" metric for vulnerability analysis.
source: <NAME>., & <NAME>. (2021). Measuring urban road network
vulnerability to extreme events: an application for urban floods. Transportation
research part D: transport and environment, 93, 102770.
https://doi.org/10.1016/j.trd.2021.102770
***
This function tests for all nodes in the system. A function targeted to traffic zones
and travel behavior, as done in the paper, data is under construction.
***
Parameters
----------
G : NetworkX Graph structure
Graph of the network.
weight : string
Attribute to weight shortest distance algorithm. If None, each edge have
weight 1
track_progress : boolean
For tracking progress on Jupyter-Notebooks ONLY
Returns
-------
dictionary in form {node:efficiency}
"""
Gig = get_full_igraph(G)
efficiency = {}
for node in Gig.vs:
shrt = Gig.shortest_paths_dijkstra(node,weights=weight)
total_proximity=0
for l in shrt[0]:
if l == 0:
continue
total_proximity += 1/l
total_proximity = total_proximity/len(Gig.vs)
efficiency[int(node['osmid'])] = total_proximity
return efficiency
def get_number_of_valid_paths(G,weight=None):
"""
Calculate number of valid paths for the "continuity" metric for vulnerability analysis.
cite: <NAME>., & <NAME>. (2021). Verificação de vulnerabilidades em
redes de transporte: uma abordagem pela teoria dos grafos. TRANSPORTES, 29(1), 161–172.
https://doi.org/10.14295/transportes.v29i1.2250
Parameters
----------
G : NetworkX Graph structure
Graph of the network.
weight : string
Attribute to weight shortest distance algorithm. If None, each edge have weight=1
Returns
-------
dictionary in form {node:valid_paths}
"""
Gig = get_full_igraph(G)
valid_paths = {}
for node in Gig.vs:
shrt = Gig.shortest_paths(node,weights=weight)
valid = 0
for l in shrt[0]:
if l == 0: # if destination equals origin
continue
if l!=np.inf:
valid+=1
valid_paths[int(node['osmid'])] = valid
return valid_paths
def get_continuity(G1,G2,nan_values=np.nan,on_graph=False):
"""
Calculate "continuity" metric for vulnerability analysis.
cite: <NAME>., & <NAME>. (2021). Verificação de vulnerabilidades em
redes de transporte: uma abordagem pela teoria dos grafos. TRANSPORTES, 29(1), 161–172.
https://doi.org/10.14295/transportes.v29i1.2250
Parameters
----------
G : NetworkX Graph structure
Graph of the network.
weight : string
Attribute to weight shortest distance algorithm. If None, each edge have
weight 1
track_progress : boolean
For tracking progress on Jupyter-Notebooks ONLY
Returns
-------
dictionary in form {node:continuity}
"""
assert {sorted(list(G1.nodes)) == sorted(list(G2.nodes)),
'G1 and G2 must have the same set of nodes'}
#get valid paths for both
cont_initial = get_number_of_valid_paths(G1)
cont_final = get_number_of_valid_paths(G2)
#populate dictionary
cont = {}
for n in cont_initial:
if cont_initial[n]!=0:
cont[n] = cont_final[n]/cont_initial[n]
else:
cont[n] = nan_values
if on_graph:
G1 = G1.copy()
nx.set_node_attributes(G1,cont,'continuity')
return G1
else:
return cont | en | 0.6676 | Remove some proportion of nodes (and attached edges) from a graph based on an atrribute's numeric order. Parameters ---------- G : NetworkX Graph structure Graph of the network. attr : string Reference attribute (must be on nodes of the graph). remove_proportion : float between 0 and 1 proportion of nodes to be removed ascending : boolean If True, remove nodes from lower-to-higher attribute value, else do it from higher-to-lower attribute value. Returns ------- NetworkX Graph structure Remove some proportion of nodes (and attached edges) randomly. Parameters ---------- G : NetworkX Graph structure Graph of the network. remove_proportion : float between 0 and 1 proportion of nodes to be removed random_seed : int or None Random seed for removal. If None, the results will be different each time the functions is used Returns ------- NetworkX Graph structure Remove some proportion of edges randomly. Parameters ---------- G : NetworkX Graph structure Graph of the network. remove_proportion : float between 0 and 1 proportion of edges to be removed random_seed : int or None Random seed for removal. If None, the results will be different each time the functions is used Returns ------- NetworkX Graph structure Remove some proportion of edges from a graph based on an atrribute's numeric order. Parameters ---------- G : NetworkX Graph structure Graph of the network. attr : string Reference attribute (must be on nodes of the graph). remove_proportion : float between 0 and 1 proportion of edges to be removed ascending : boolean If True, remove egdes from lower-to-higher attribute value, else do it from higher-to-lower attribute value. Returns ------- NetworkX Graph structure Calculate "efficiency of alternative" metric for vulnerability analysis. source: <NAME>., & <NAME>. (2021). Measuring urban road network vulnerability to extreme events: an application for urban floods. Transportation research part D: transport and environment, 93, 102770. https://doi.org/10.1016/j.trd.2021.102770 *** This function tests for all nodes in the system. A function targeted to traffic zones and travel behavior, as done in the paper, data is under construction. *** Parameters ---------- G : NetworkX Graph structure Graph of the network. weight : string Attribute to weight shortest distance algorithm. If None, each edge have weight 1 track_progress : boolean For tracking progress on Jupyter-Notebooks ONLY Returns ------- dictionary in form {node:efficiency} Calculate number of valid paths for the "continuity" metric for vulnerability analysis. cite: <NAME>., & <NAME>. (2021). Verificação de vulnerabilidades em redes de transporte: uma abordagem pela teoria dos grafos. TRANSPORTES, 29(1), 161–172. https://doi.org/10.14295/transportes.v29i1.2250 Parameters ---------- G : NetworkX Graph structure Graph of the network. weight : string Attribute to weight shortest distance algorithm. If None, each edge have weight=1 Returns ------- dictionary in form {node:valid_paths} # if destination equals origin Calculate "continuity" metric for vulnerability analysis. cite: <NAME>., & <NAME>. (2021). Verificação de vulnerabilidades em redes de transporte: uma abordagem pela teoria dos grafos. TRANSPORTES, 29(1), 161–172. https://doi.org/10.14295/transportes.v29i1.2250 Parameters ---------- G : NetworkX Graph structure Graph of the network. weight : string Attribute to weight shortest distance algorithm. If None, each edge have weight 1 track_progress : boolean For tracking progress on Jupyter-Notebooks ONLY Returns ------- dictionary in form {node:continuity} #get valid paths for both #populate dictionary | 3.177043 | 3 |
implementations/week5/primitive_calculator.py | MichelML/edx_algos_micromaster | 0 | 6631620 | <reponame>MichelML/edx_algos_micromaster
# Uses python3
def optimal_sequence(n):
sequence = []
a = [0]*(n+1)
for i in range(1, len(a)):
a[i] = a[i-1] + 1
if i % 2 == 0:
a[i] = min(1+a[i//2], a[i])
if i % 3 == 0:
a[i] = min(1+a[i//3], a[i])
while n > 1:
sequence.append(n)
if a[n-1] == a[n]-1:
n = n-1
elif n % 2 == 0 and a[n//2] == a[n]-1:
n = n//2
elif n % 3 == 0 and a[n//3] == a[n]-1:
n = n//3
sequence.append(1)
return reversed(sequence)
n = int(input())
sequence = list(optimal_sequence(n))
print(len(sequence) - 1)
for x in sequence:
print(x, end=' ')
| # Uses python3
def optimal_sequence(n):
sequence = []
a = [0]*(n+1)
for i in range(1, len(a)):
a[i] = a[i-1] + 1
if i % 2 == 0:
a[i] = min(1+a[i//2], a[i])
if i % 3 == 0:
a[i] = min(1+a[i//3], a[i])
while n > 1:
sequence.append(n)
if a[n-1] == a[n]-1:
n = n-1
elif n % 2 == 0 and a[n//2] == a[n]-1:
n = n//2
elif n % 3 == 0 and a[n//3] == a[n]-1:
n = n//3
sequence.append(1)
return reversed(sequence)
n = int(input())
sequence = list(optimal_sequence(n))
print(len(sequence) - 1)
for x in sequence:
print(x, end=' ') | en | 0.163637 | # Uses python3 | 3.490116 | 3 |
day09/solution1.py | evanbrumley/aoc2021 | 0 | 6631621 | <filename>day09/solution1.py
import statistics
class TubeMap:
def __init__(self, rows):
self.rows = rows
self.cols = list(map(list, zip(*self.rows)))
self.width = len(self.rows[0])
self.height = len(self.cols[0])
@classmethod
def from_raw_lines(cls, lines):
rows = []
for line in lines:
if line.strip():
rows.append([int(c) for c in line])
return cls(rows)
@property
def minima(self):
minima = []
for y in range(self.height):
for x in range(self.width):
val = self.rows[y][x]
if x == 0:
is_x_minima = val < self.rows[y][x+1]
elif x == self.width - 1:
is_x_minima = val < self.rows[y][x-1]
else:
is_x_minima = self.rows[y][x-1] > val < self.rows[y][x+1]
if y == 0:
is_y_minima = val < self.rows[y+1][x]
elif y == self.height - 1:
is_y_minima = val < self.rows[y-1][x]
else:
is_y_minima = self.rows[y+1][x] > val < self.rows[y-1][x]
if is_x_minima and is_y_minima:
minima.append((x, y, val))
return minima
@property
def total_risk_factor(self):
total = 0
for x, y, val in self.minima:
total += (1 + val)
return total
def main():
with open("input", "r") as f:
lines = f.read().splitlines()
tube_map = TubeMap.from_raw_lines(lines)
print(tube_map.width, tube_map.height)
print(tube_map.minima)
print(tube_map.total_risk_factor)
if __name__ == "__main__":
main()
| <filename>day09/solution1.py
import statistics
class TubeMap:
def __init__(self, rows):
self.rows = rows
self.cols = list(map(list, zip(*self.rows)))
self.width = len(self.rows[0])
self.height = len(self.cols[0])
@classmethod
def from_raw_lines(cls, lines):
rows = []
for line in lines:
if line.strip():
rows.append([int(c) for c in line])
return cls(rows)
@property
def minima(self):
minima = []
for y in range(self.height):
for x in range(self.width):
val = self.rows[y][x]
if x == 0:
is_x_minima = val < self.rows[y][x+1]
elif x == self.width - 1:
is_x_minima = val < self.rows[y][x-1]
else:
is_x_minima = self.rows[y][x-1] > val < self.rows[y][x+1]
if y == 0:
is_y_minima = val < self.rows[y+1][x]
elif y == self.height - 1:
is_y_minima = val < self.rows[y-1][x]
else:
is_y_minima = self.rows[y+1][x] > val < self.rows[y-1][x]
if is_x_minima and is_y_minima:
minima.append((x, y, val))
return minima
@property
def total_risk_factor(self):
total = 0
for x, y, val in self.minima:
total += (1 + val)
return total
def main():
with open("input", "r") as f:
lines = f.read().splitlines()
tube_map = TubeMap.from_raw_lines(lines)
print(tube_map.width, tube_map.height)
print(tube_map.minima)
print(tube_map.total_risk_factor)
if __name__ == "__main__":
main()
| none | 1 | 3.313037 | 3 |
|
vaa/urls.py | arnists/vaa | 1 | 6631622 | <filename>vaa/urls.py
"""vaa URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import urls as authurls
from django.contrib.auth.views import login as authlogin
from django.conf.urls.static import static
from django.views.generic import RedirectView
from vaa.questions import views as vaav
from vaa.staticpages import views as vaas
urlpatterns = [
url(r'^userpage/$', vaav.userpage),
url(r'^$', vaav.home),
url(r'^userupdate/', vaav.userupdate),
url(r'^candans/(?P<election>[\w\d]+)/$', vaav.candanswer),
url(r'^candanswer/(?P<election>[\w\d]+)/$', vaav.candreply),
url(r'^voterform/(?P<election>[\w\d]+)/$', vaav.voterform),
url(r'^voterform/(?P<election>[\w\d]+)/(?P<hashcode>[\w\d]+)/$', vaav.voterform),
url(r'^compare/(?P<election>[\w\d]+)/$', vaav.compare),
url(r'^compare/(?P<election>[\w\d]+)/(?P<hashcode>[\w\d]+)/$', vaav.compare_load),
url(r'^oldanswers/(?P<election>[\w\d]+)/$', vaav.oldanswers),
url(r'^candidate/(?P<pk>\d+)/$', vaav.candidate_page),
#url(r'^candidate/(?P<pk>\d+)/', vaav.candidate_page),
url('^', include(authurls)),
url(r'^accounts/login/$', authlogin, { 'extra_context':{'next':'/userpage/'}}),
url(r'^accounts/profile/$', RedirectView.as_view(url="/userpage/")),
url(r'^admin/', admin.site.urls),
url(r'^absents/(?P<election>[\w\d]+)/$', vaav.absents),
url(r'^clearsession/', vaav.clear_session),
url(r'^page/(?P<slug>[\w\d_-]+)', vaas.page),
]
#+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + [
#]
#+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| <filename>vaa/urls.py
"""vaa URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import urls as authurls
from django.contrib.auth.views import login as authlogin
from django.conf.urls.static import static
from django.views.generic import RedirectView
from vaa.questions import views as vaav
from vaa.staticpages import views as vaas
urlpatterns = [
url(r'^userpage/$', vaav.userpage),
url(r'^$', vaav.home),
url(r'^userupdate/', vaav.userupdate),
url(r'^candans/(?P<election>[\w\d]+)/$', vaav.candanswer),
url(r'^candanswer/(?P<election>[\w\d]+)/$', vaav.candreply),
url(r'^voterform/(?P<election>[\w\d]+)/$', vaav.voterform),
url(r'^voterform/(?P<election>[\w\d]+)/(?P<hashcode>[\w\d]+)/$', vaav.voterform),
url(r'^compare/(?P<election>[\w\d]+)/$', vaav.compare),
url(r'^compare/(?P<election>[\w\d]+)/(?P<hashcode>[\w\d]+)/$', vaav.compare_load),
url(r'^oldanswers/(?P<election>[\w\d]+)/$', vaav.oldanswers),
url(r'^candidate/(?P<pk>\d+)/$', vaav.candidate_page),
#url(r'^candidate/(?P<pk>\d+)/', vaav.candidate_page),
url('^', include(authurls)),
url(r'^accounts/login/$', authlogin, { 'extra_context':{'next':'/userpage/'}}),
url(r'^accounts/profile/$', RedirectView.as_view(url="/userpage/")),
url(r'^admin/', admin.site.urls),
url(r'^absents/(?P<election>[\w\d]+)/$', vaav.absents),
url(r'^clearsession/', vaav.clear_session),
url(r'^page/(?P<slug>[\w\d_-]+)', vaas.page),
]
#+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + [
#]
#+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| en | 0.540335 | vaa URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.9/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) #url(r'^candidate/(?P<pk>\d+)/', vaav.candidate_page), #+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + [ #] #+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | 2.541102 | 3 |
server/app.py | 4-1-2/BIOBOT | 0 | 6631623 | # TODO: DEPRECATED
# use rest_app.py
from cloudant import Cloudant
from flask import Flask, render_template, request, jsonify
from ibm_botocore.client import Config
import ibm_boto3
import numpy as np
import atexit
import os
import json
import io as libio
from PIL import Image
app = Flask(__name__)
from biobot.model import predict, get_model
import base64
from io import BytesIO
# Write image STORAGE IBM
def cgsWriteImage(client, bucket, file, image):
n = image.ndim
if (n==3):
img = Image.fromarray(image,'RGB')
else:
if (image.max()==1):
img = Image.fromarray(image,'1').convert('RGB')
else:
img = Image.fromarray(image,'L').convert('RGB')
bufImage = libio.BytesIO()
img.save(bufImage,"JPEG")
bufImage.seek(0)
isr = client.put_object(Bucket=bucket,
Body = bufImage,
Key = file,
ContentType = 'image/jpeg')
print("""cgsWriteImage:
\n\tBucket=%s
\n\tFile=%s
\n\tArraySize=%d %s
RawSize=%d\n""" % (
bucket, file, image.size, image.shape, bufImage.getbuffer().nbytes))
# DB IBM
client = Cloudant.iam(
"0543c3c0-716a-4fe4-8deb-bb2fd61dcd8e-bluemix",
"<KEY>",
connect=True
)
database_bot = client['biobot']
# STORAGE IBM
cgsClient = ibm_boto3.client(service_name='s3',
ibm_api_key_id = '<KEY>',
ibm_auth_endpoint='https://iam.cloud.ibm.com/identity/token',
config=Config(signature_version='oauth'),
endpoint_url='https://s3.ap.cloud-object-storage.appdomain.cloud')
#!im = numpy.array(pic)
# On IBM Cloud Cloud Foundry, get the port number from the environment variable
# PORT when running this app on the local machine, default the port to 8000
# Create the model:
# ResNet9 : classifier
# Input size : [56 x 256 x 3]
# Output size : [38]
model = get_model()
port = int(os.getenv('PORT', 8000))
@app.route('/', methods=['GET', 'POST'])
def basic():
if request.method == 'POST':
name = request.form['name']
partition_key = 'Humans'
document_key = 'julia30'
database_bot.create_document({
'_id': ':'.join((partition_key, document_key)),
'name': name
})
return render_template('index.html', t=name)
return render_template('index.html')
# Diagnosis
@app.route('/diagnosis', methods=['GET', 'POST'])
def run_diagnosis():
if request.method == 'POST':
#import pdb; pdb.set_trace()
image = request.files['img']
image_ = Image.open(image)
new_width, new_height = 256, 256
width, height = image_.size # Get dimensions
left = (width - new_width)/2
top = (height - new_height)/2
right = (width + new_width)/2
bottom = (height + new_height)/2
# Crop the center of the image
image_cropped = image_.crop((left, top, right, bottom))
im_file = BytesIO()
# -*- coding: utf-8 -*-
image_cropped.save(im_file, format='JPEG')
binary_data = im_file.getvalue()
io_image = base64.b64encode(binary_data)
#io_image = base64.b64encode(image_cropped.read()).decode('utf-8')
res1, res2 = predict(model, io_image)
return render_template('upload_image.html', image_up= res1 +' - '+ res2)
return render_template('upload_image.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port, debug=True)
| # TODO: DEPRECATED
# use rest_app.py
from cloudant import Cloudant
from flask import Flask, render_template, request, jsonify
from ibm_botocore.client import Config
import ibm_boto3
import numpy as np
import atexit
import os
import json
import io as libio
from PIL import Image
app = Flask(__name__)
from biobot.model import predict, get_model
import base64
from io import BytesIO
# Write image STORAGE IBM
def cgsWriteImage(client, bucket, file, image):
n = image.ndim
if (n==3):
img = Image.fromarray(image,'RGB')
else:
if (image.max()==1):
img = Image.fromarray(image,'1').convert('RGB')
else:
img = Image.fromarray(image,'L').convert('RGB')
bufImage = libio.BytesIO()
img.save(bufImage,"JPEG")
bufImage.seek(0)
isr = client.put_object(Bucket=bucket,
Body = bufImage,
Key = file,
ContentType = 'image/jpeg')
print("""cgsWriteImage:
\n\tBucket=%s
\n\tFile=%s
\n\tArraySize=%d %s
RawSize=%d\n""" % (
bucket, file, image.size, image.shape, bufImage.getbuffer().nbytes))
# DB IBM
client = Cloudant.iam(
"0543c3c0-716a-4fe4-8deb-bb2fd61dcd8e-bluemix",
"<KEY>",
connect=True
)
database_bot = client['biobot']
# STORAGE IBM
cgsClient = ibm_boto3.client(service_name='s3',
ibm_api_key_id = '<KEY>',
ibm_auth_endpoint='https://iam.cloud.ibm.com/identity/token',
config=Config(signature_version='oauth'),
endpoint_url='https://s3.ap.cloud-object-storage.appdomain.cloud')
#!im = numpy.array(pic)
# On IBM Cloud Cloud Foundry, get the port number from the environment variable
# PORT when running this app on the local machine, default the port to 8000
# Create the model:
# ResNet9 : classifier
# Input size : [56 x 256 x 3]
# Output size : [38]
model = get_model()
port = int(os.getenv('PORT', 8000))
@app.route('/', methods=['GET', 'POST'])
def basic():
if request.method == 'POST':
name = request.form['name']
partition_key = 'Humans'
document_key = 'julia30'
database_bot.create_document({
'_id': ':'.join((partition_key, document_key)),
'name': name
})
return render_template('index.html', t=name)
return render_template('index.html')
# Diagnosis
@app.route('/diagnosis', methods=['GET', 'POST'])
def run_diagnosis():
if request.method == 'POST':
#import pdb; pdb.set_trace()
image = request.files['img']
image_ = Image.open(image)
new_width, new_height = 256, 256
width, height = image_.size # Get dimensions
left = (width - new_width)/2
top = (height - new_height)/2
right = (width + new_width)/2
bottom = (height + new_height)/2
# Crop the center of the image
image_cropped = image_.crop((left, top, right, bottom))
im_file = BytesIO()
# -*- coding: utf-8 -*-
image_cropped.save(im_file, format='JPEG')
binary_data = im_file.getvalue()
io_image = base64.b64encode(binary_data)
#io_image = base64.b64encode(image_cropped.read()).decode('utf-8')
res1, res2 = predict(model, io_image)
return render_template('upload_image.html', image_up= res1 +' - '+ res2)
return render_template('upload_image.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port, debug=True)
| en | 0.51113 | # TODO: DEPRECATED # use rest_app.py # Write image STORAGE IBM cgsWriteImage: \n\tBucket=%s \n\tFile=%s \n\tArraySize=%d %s RawSize=%d\n # DB IBM # STORAGE IBM #!im = numpy.array(pic) # On IBM Cloud Cloud Foundry, get the port number from the environment variable # PORT when running this app on the local machine, default the port to 8000 # Create the model: # ResNet9 : classifier # Input size : [56 x 256 x 3] # Output size : [38] # Diagnosis #import pdb; pdb.set_trace() # Get dimensions # Crop the center of the image # -*- coding: utf-8 -*- #io_image = base64.b64encode(image_cropped.read()).decode('utf-8') | 2.215753 | 2 |
methods/detection/utils.py | ciampluca/counting_perineuronal_nets | 6 | 6631624 | import torch
import torch.distributed as dist
def collate_fn(batch):
return list(zip(*batch))
def build_coco_compliant_batch(image_and_target_batch):
images, bboxes = zip(*image_and_target_batch)
def _get_coco_target(bboxes):
n_boxes = len(bboxes)
boxes = [[x0, y0, x1, y1] for y0, x0, y1, x1 in bboxes] if n_boxes else [[]]
shape = (n_boxes,) if n_boxes else (1, 0)
return {
'boxes': torch.as_tensor(boxes, dtype=torch.float32),
'labels': torch.ones(shape, dtype=torch.int64), # there is only one class
'iscrowd': torch.zeros(shape, dtype=torch.int64) # suppose all instances are not crowd
}
targets = [_get_coco_target(b) for b in bboxes]
return images, targets
def check_empty_images(targets):
if targets[0]['boxes'].is_cuda:
device = targets[0]['boxes'].get_device()
else:
device = torch.device("cpu")
for target in targets:
if target['boxes'].nelement() == 0:
target['boxes'] = torch.as_tensor([[0, 1, 2, 3]], dtype=torch.float32, device=device)
target['labels'] = torch.zeros((1,), dtype=torch.int64, device=device)
target['iscrowd'] = torch.zeros((1,), dtype=torch.int64, device=device)
return targets
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
def get_world_size():
if dist.is_available() and dist.is_initialized():
return dist.get_world_size()
return 1
| import torch
import torch.distributed as dist
def collate_fn(batch):
return list(zip(*batch))
def build_coco_compliant_batch(image_and_target_batch):
images, bboxes = zip(*image_and_target_batch)
def _get_coco_target(bboxes):
n_boxes = len(bboxes)
boxes = [[x0, y0, x1, y1] for y0, x0, y1, x1 in bboxes] if n_boxes else [[]]
shape = (n_boxes,) if n_boxes else (1, 0)
return {
'boxes': torch.as_tensor(boxes, dtype=torch.float32),
'labels': torch.ones(shape, dtype=torch.int64), # there is only one class
'iscrowd': torch.zeros(shape, dtype=torch.int64) # suppose all instances are not crowd
}
targets = [_get_coco_target(b) for b in bboxes]
return images, targets
def check_empty_images(targets):
if targets[0]['boxes'].is_cuda:
device = targets[0]['boxes'].get_device()
else:
device = torch.device("cpu")
for target in targets:
if target['boxes'].nelement() == 0:
target['boxes'] = torch.as_tensor([[0, 1, 2, 3]], dtype=torch.float32, device=device)
target['labels'] = torch.zeros((1,), dtype=torch.int64, device=device)
target['iscrowd'] = torch.zeros((1,), dtype=torch.int64, device=device)
return targets
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
def get_world_size():
if dist.is_available() and dist.is_initialized():
return dist.get_world_size()
return 1
| en | 0.913738 | # there is only one class # suppose all instances are not crowd Args: input_dict (dict): all the values will be reduced average (bool): whether to do average or sum Reduce the values in the dictionary from all processes so that all processes have the averaged results. Returns a dict with the same fields as input_dict, after reduction. # sort the keys so that they are consistent across processes | 2.53906 | 3 |
alipay/aop/api/response/MybankCreditProdarrangementContracttextQueryResponse.py | articuly/alipay-sdk-python-all | 0 | 6631625 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class MybankCreditProdarrangementContracttextQueryResponse(AlipayResponse):
def __init__(self):
super(MybankCreditProdarrangementContracttextQueryResponse, self).__init__()
self._text = None
@property
def text(self):
return self._text
@text.setter
def text(self, value):
self._text = value
def parse_response_content(self, response_content):
response = super(MybankCreditProdarrangementContracttextQueryResponse, self).parse_response_content(response_content)
if 'text' in response:
self.text = response['text']
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class MybankCreditProdarrangementContracttextQueryResponse(AlipayResponse):
def __init__(self):
super(MybankCreditProdarrangementContracttextQueryResponse, self).__init__()
self._text = None
@property
def text(self):
return self._text
@text.setter
def text(self, value):
self._text = value
def parse_response_content(self, response_content):
response = super(MybankCreditProdarrangementContracttextQueryResponse, self).parse_response_content(response_content)
if 'text' in response:
self.text = response['text']
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 2.45067 | 2 |
tumblr/spiders/tumblr_spider.py | RickyChen30/scrapy-tumblr-loader | 1 | 6631626 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import os
import re
from urllib.parse import urlparse
import scrapy
class TumblrSpiderSpider(scrapy.Spider):
name = 'tumblr-spider'
@staticmethod
def parse_cookies(cookie: str) -> dict:
cookie = cookie.split(':')[-1]
q = {k.strip(): v for k, v in re.findall(r'(.*?)=(.*?);', cookie)}
return q
def start_requests(self):
# cookies = self.parse_cookies(self.cookies_str)
# print(cookies)
with open('start_urls.txt') as f:
for url in f:
yield scrapy.Request(url.strip(), cookies={
'pfg': '477cc7d08af3433b166e93f39babf79d3be08db0396145eb8a30db4f5e7a137c%23%7B%22' +
'eu_resident%22%3A1%2C%22' +
'gdpr_is_acceptable_age%22%3A1%2C%22' +
'gdpr_consent_core%22%3A1%2C%22' +
'gdpr_consent_first_party_ads%22%3A1%2C%22' +
'gdpr_consent_third_party_ads%22%3A1%2C%22' +
'gdpr_consent_search_history%22%3A1%2C%22exp%22%3A1558465652%7D%237501684376'})
@staticmethod
def get_fn(hostname, image_url):
"""
create file name from url
all images should be stored in 'images/hostname' folder
:param hostname: tumblr blog hostname
:param image_url: image url
:return: file name
"""
o = urlparse(image_url)
fn = 'images/' + hostname + '/' + o.path.strip('/').replace('/', '_')
return fn
def parse(self, response):
html = response.body.decode('utf-8')
o = urlparse(response.url)
re_images = re.findall(r'(https://\d+\.media\.tumblr\.com/[\d\w/_]+_\d+\.(jpg|gif))', html)
for image_link in re_images:
image_link = image_link[0]
image_link = re.sub(r'_\d+.jpg$', '_1280.jpg', image_link)
image_link = re.sub(r'_\d+.gif$', '_1280.gif', image_link)
fn = self.get_fn(o.hostname, image_link)
if os.path.isfile(fn):
continue
# print('image link ' + image_link)
# print(fn)
# print()
d = os.path.dirname(fn)
if not os.path.isdir(d):
os.makedirs(d)
yield response.follow(image_link, self.save_img, meta={'fn': fn})
# some tumblr blogs do not have next page link in form of /page/\d+ substring
# but we will crawl next page if there are downloadable images
# this thing may not work for some blogs, uncomment with care
# if len(re_images):
# page = 1
# match = re.search('/page/(\d+)', response.url)
# if match:
# page = match.group(1)
# page = int(page) + 1
# yield response.follow('/page/%s' % page, self.parse)
for page_link in re.findall(r'href[="]*(/page/\d+)[">]*', html):
yield response.follow(page_link, self.parse)
@staticmethod
def save_img(response):
with open(response.meta['fn'], 'wb') as f:
f.write(response.body)
| # -*- coding: utf-8 -*-
import os
import re
from urllib.parse import urlparse
import scrapy
class TumblrSpiderSpider(scrapy.Spider):
name = 'tumblr-spider'
@staticmethod
def parse_cookies(cookie: str) -> dict:
cookie = cookie.split(':')[-1]
q = {k.strip(): v for k, v in re.findall(r'(.*?)=(.*?);', cookie)}
return q
def start_requests(self):
# cookies = self.parse_cookies(self.cookies_str)
# print(cookies)
with open('start_urls.txt') as f:
for url in f:
yield scrapy.Request(url.strip(), cookies={
'pfg': '477cc7d08af3433b166e93f39babf79d3be08db0396145eb8a30db4f5e7a137c%23%7B%22' +
'eu_resident%22%3A1%2C%22' +
'gdpr_is_acceptable_age%22%3A1%2C%22' +
'gdpr_consent_core%22%3A1%2C%22' +
'gdpr_consent_first_party_ads%22%3A1%2C%22' +
'gdpr_consent_third_party_ads%22%3A1%2C%22' +
'gdpr_consent_search_history%22%3A1%2C%22exp%22%3A1558465652%7D%237501684376'})
@staticmethod
def get_fn(hostname, image_url):
"""
create file name from url
all images should be stored in 'images/hostname' folder
:param hostname: tumblr blog hostname
:param image_url: image url
:return: file name
"""
o = urlparse(image_url)
fn = 'images/' + hostname + '/' + o.path.strip('/').replace('/', '_')
return fn
def parse(self, response):
html = response.body.decode('utf-8')
o = urlparse(response.url)
re_images = re.findall(r'(https://\d+\.media\.tumblr\.com/[\d\w/_]+_\d+\.(jpg|gif))', html)
for image_link in re_images:
image_link = image_link[0]
image_link = re.sub(r'_\d+.jpg$', '_1280.jpg', image_link)
image_link = re.sub(r'_\d+.gif$', '_1280.gif', image_link)
fn = self.get_fn(o.hostname, image_link)
if os.path.isfile(fn):
continue
# print('image link ' + image_link)
# print(fn)
# print()
d = os.path.dirname(fn)
if not os.path.isdir(d):
os.makedirs(d)
yield response.follow(image_link, self.save_img, meta={'fn': fn})
# some tumblr blogs do not have next page link in form of /page/\d+ substring
# but we will crawl next page if there are downloadable images
# this thing may not work for some blogs, uncomment with care
# if len(re_images):
# page = 1
# match = re.search('/page/(\d+)', response.url)
# if match:
# page = match.group(1)
# page = int(page) + 1
# yield response.follow('/page/%s' % page, self.parse)
for page_link in re.findall(r'href[="]*(/page/\d+)[">]*', html):
yield response.follow(page_link, self.parse)
@staticmethod
def save_img(response):
with open(response.meta['fn'], 'wb') as f:
f.write(response.body) | en | 0.609089 | # -*- coding: utf-8 -*- # cookies = self.parse_cookies(self.cookies_str) # print(cookies) create file name from url all images should be stored in 'images/hostname' folder :param hostname: tumblr blog hostname :param image_url: image url :return: file name # print('image link ' + image_link) # print(fn) # print() # some tumblr blogs do not have next page link in form of /page/\d+ substring # but we will crawl next page if there are downloadable images # this thing may not work for some blogs, uncomment with care # if len(re_images): # page = 1 # match = re.search('/page/(\d+)', response.url) # if match: # page = match.group(1) # page = int(page) + 1 # yield response.follow('/page/%s' % page, self.parse) | 3.11147 | 3 |
migrations/0003_2018-09-23_22-50-22.py | nadermx/didishiptoday | 1 | 6631627 | # -*- coding: utf-8 -*-
# Generated by Pony ORM 0.8-dev on 2018-09-23 22:50
from __future__ import unicode_literals
import datetime
from pony import orm
from pony.migrate import diagram_ops as op
dependencies = ['0002_2018-09-23_21-51-50']
operations = [
op.AddAttr('Ship', 'dt_shipped', orm.Optional(datetime.datetime))]
| # -*- coding: utf-8 -*-
# Generated by Pony ORM 0.8-dev on 2018-09-23 22:50
from __future__ import unicode_literals
import datetime
from pony import orm
from pony.migrate import diagram_ops as op
dependencies = ['0002_2018-09-23_21-51-50']
operations = [
op.AddAttr('Ship', 'dt_shipped', orm.Optional(datetime.datetime))]
| en | 0.791422 | # -*- coding: utf-8 -*- # Generated by Pony ORM 0.8-dev on 2018-09-23 22:50 | 1.696427 | 2 |
Models/UNet.py | zeeshanalipnhwr/Semantic-Segmentation-Keras | 3 | 6631628 | from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D, MaxPooling2D, Conv2DTranspose
from keras.layers.core import Activation, Flatten, Dropout, Dense
from keras.layers import Input, concatenate
from keras import backend as K
class UNet:
def __init__(self, depth = 16):
self.depth = depth
def encoder_block(self, input_layer, depth, dropout):
output_layer = Conv2D(depth, (3, 3), activation='relu', padding="same")(input_layer)
output_layer = Conv2D(depth, (3, 3), activation='relu', padding="same")(output_layer)
output_layer = MaxPooling2D(pool_size=(2, 2))(output_layer)
output_layer = Dropout(dropout)(output_layer)
return output_layer
def encoder(self, input_layer, depth):
block1 = self.encoder_block(input_layer, depth, dropout=0.25)
block2 = self.encoder_block(block1, depth*2, dropout=0.25)
block3 = self.encoder_block(block2, depth*4, dropout=0.25)
block4 = self.encoder_block(block3, depth*8, dropout=0.25)
return block1, block2, block3, block4
def decoder_block(self, input_layer, depth, dropout):
output_layer = Conv2D(depth, (3, 3), activation='relu', padding="same")(input_layer)
output_layer = Conv2D(depth, (3, 3), activation='relu', padding="same")(output_layer)
output_layer = Dropout(dropout)(output_layer)
return output_layer
def decoder(self, block1, block2, block3, block4, block5, depth):
upconvolved = Conv2DTranspose(depth, (3, 3), strides = (2, 2), padding = 'same')(block5)
concatenated = concatenate([block4, upconvolved])
output_layer = self.decoder_block(concatenated, depth, dropout=0.25)
upconvolved = Conv2DTranspose(depth, (3, 3), strides = (2, 2), padding = 'same')(output_layer)
concatenated = concatenate([block3, upconvolved])
output_layer = self.decoder_block(concatenated, depth//2, dropout=0.25)
upconvolved = Conv2DTranspose(depth, (3, 3), strides = (2, 2), padding = 'same')(output_layer)
concatenated = concatenate([block2, upconvolved])
output_layer = self.decoder_block(concatenated, depth//4, dropout=0.25)
upconvolved = Conv2DTranspose(depth, (3, 3), strides = (2, 2), padding = 'same')(output_layer)
concatenated = concatenate([block1, upconvolved])
output_layer = self.decoder_block(concatenated, depth//8, dropout=0.25)
return output_layer
def UNet(self, input_shape):
input_layer = Input(shape=input_shape)
block1, block2, block3, block4 = self.encoder(input_layer, self.depth)
block5 = MaxPooling2D(pool_size=(2, 2))(block4)
block5 = Conv2D(self.depth*16, (3, 3), activation='relu', padding="same")(block5)
block5 = Conv2D(self.depth*16, (3, 3), activation='relu', padding="same")(block5)
decoded = self.decoder(block1, block2, block3, block4, block5, self.depth*8)
upconvolved = Conv2DTranspose(self.depth, (3, 3), strides = (2, 2), padding = 'same')(decoded)
output_layer = Conv2D(1, (1, 1), activation='sigmoid', padding="same")(upconvolved)
model = Model(input_layer, output_layer)
return model
| from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D, MaxPooling2D, Conv2DTranspose
from keras.layers.core import Activation, Flatten, Dropout, Dense
from keras.layers import Input, concatenate
from keras import backend as K
class UNet:
def __init__(self, depth = 16):
self.depth = depth
def encoder_block(self, input_layer, depth, dropout):
output_layer = Conv2D(depth, (3, 3), activation='relu', padding="same")(input_layer)
output_layer = Conv2D(depth, (3, 3), activation='relu', padding="same")(output_layer)
output_layer = MaxPooling2D(pool_size=(2, 2))(output_layer)
output_layer = Dropout(dropout)(output_layer)
return output_layer
def encoder(self, input_layer, depth):
block1 = self.encoder_block(input_layer, depth, dropout=0.25)
block2 = self.encoder_block(block1, depth*2, dropout=0.25)
block3 = self.encoder_block(block2, depth*4, dropout=0.25)
block4 = self.encoder_block(block3, depth*8, dropout=0.25)
return block1, block2, block3, block4
def decoder_block(self, input_layer, depth, dropout):
output_layer = Conv2D(depth, (3, 3), activation='relu', padding="same")(input_layer)
output_layer = Conv2D(depth, (3, 3), activation='relu', padding="same")(output_layer)
output_layer = Dropout(dropout)(output_layer)
return output_layer
def decoder(self, block1, block2, block3, block4, block5, depth):
upconvolved = Conv2DTranspose(depth, (3, 3), strides = (2, 2), padding = 'same')(block5)
concatenated = concatenate([block4, upconvolved])
output_layer = self.decoder_block(concatenated, depth, dropout=0.25)
upconvolved = Conv2DTranspose(depth, (3, 3), strides = (2, 2), padding = 'same')(output_layer)
concatenated = concatenate([block3, upconvolved])
output_layer = self.decoder_block(concatenated, depth//2, dropout=0.25)
upconvolved = Conv2DTranspose(depth, (3, 3), strides = (2, 2), padding = 'same')(output_layer)
concatenated = concatenate([block2, upconvolved])
output_layer = self.decoder_block(concatenated, depth//4, dropout=0.25)
upconvolved = Conv2DTranspose(depth, (3, 3), strides = (2, 2), padding = 'same')(output_layer)
concatenated = concatenate([block1, upconvolved])
output_layer = self.decoder_block(concatenated, depth//8, dropout=0.25)
return output_layer
def UNet(self, input_shape):
input_layer = Input(shape=input_shape)
block1, block2, block3, block4 = self.encoder(input_layer, self.depth)
block5 = MaxPooling2D(pool_size=(2, 2))(block4)
block5 = Conv2D(self.depth*16, (3, 3), activation='relu', padding="same")(block5)
block5 = Conv2D(self.depth*16, (3, 3), activation='relu', padding="same")(block5)
decoded = self.decoder(block1, block2, block3, block4, block5, self.depth*8)
upconvolved = Conv2DTranspose(self.depth, (3, 3), strides = (2, 2), padding = 'same')(decoded)
output_layer = Conv2D(1, (1, 1), activation='sigmoid', padding="same")(upconvolved)
model = Model(input_layer, output_layer)
return model
| none | 1 | 2.744689 | 3 |
|
bin/find-songs-make-markov.py | edyesed/bobdylan_ebooks | 0 | 6631629 | <filename>bin/find-songs-make-markov.py
#!/usr/bin/env python
#
# If you want to run locally, you can run this
#
import elasticsearch
import os
import sys
import pymarkovchain
from pprint import pprint
ES_HOST = os.environ.get('ELASTICSEARCH_URL', 'http://localhost:9200')
# Search es, and return the results. we need a minimum number of
# results for a reasonable chain
#
# However, if we don't have a reasonable number of results, we can search
# twitter for more text, and then build a markov out of whatever we have
#
def es_search(es=None, searchword=None, min_hits=10, search_type="match",
fuck_it_well_do_it_live=False):
if search_type == "match":
# v1 query
# searchbody = {"query": {"match": {"text": searchword}}}
# v2 query
#searchbody = { "size": 0,
# "query": {
# "query_string": {
# "query": searchword,
# "analyze_wildcard": True
# }
# }
# }
# v3 query omgwtfbbq, ES can randomize the document selection??
# you'll want this if you get many hits on your search
searchbody = { "query": {
"function_score": {
"query": {
"query_string": {
"query": searchword,
"analyze_wildcard": True
}
},
"boost": "5",
"random_score": {},
"boost_mode": "multiply"
}
}
}
else:
searchbody = {"query": {"more_like_this": {"fields": [
"text"], "like": searchword, "min_term_freq": 1}}}
results = es.search(index="songs", body=searchbody,
filter_path=['hits.total', 'hits.hits._source.text'])
print("ES returned %s" % results['hits']['total'])
if results['hits']['total'] >= min_hits or fuck_it_well_do_it_live:
results = es.search(index="songs", body=searchbody,
filter_path=['hits.total',
'hits.hits._source.text'],
size=min_hits*3)
#size=results['hits']['total'])
return results
# Not enough hits. SEARCH AGAIN
else:
print("going back in")
return es_search(es=es, searchword=searchword, min_hits=300, search_type="not_match", fuck_it_well_do_it_live=True)
if __name__ == "__main__":
es = elasticsearch.Elasticsearch([ES_HOST])
searchword = sys.argv[1]
try:
###
results = es_search(es=es, searchword=searchword)
#results = es_search(es=es, searchword=searchword, search_type="not_match")
# ok
mc = pymarkovchain.MarkovChain()
for songwords in results['hits']['hits']:
#print("training with text: %s" % (songwords['_source']['text']))
mc.generateDatabase(songwords['_source']['text'], sentenceSep='\r\n')
# concat four markovs together
response_text = mc.generateString()
response_text += " " + mc.generateString()
response_text += " " + mc.generateString()
response_text += " " + mc.generateString()
#response_text = mc.generateStringWithSeed(searchword)
print("Response would be:\n%s\n" % (response_text))
max_tweet_len = 280
keepwords = ""
if len(response_text) > max_tweet_len:
words = response_text.split()
for word in words:
#print("KEEPWORDS: %s " % (keepwords))
if len(keepwords) > 280:
raise Exception("Too long of a response")
if len(keepwords) + len(word) > 280:
# RETURN NOW THIS IS ENOUGH
break
keepwords = keepwords + " " + word
else:
keepwords = response_text
print("ACTUAL RESPONSE: %s" % ( len(keepwords)))
try:
print(keepwords.lowercase())
except:
print(keepwords)
except Exception as e:
print("Failed as exception %s" % (e))
| <filename>bin/find-songs-make-markov.py
#!/usr/bin/env python
#
# If you want to run locally, you can run this
#
import elasticsearch
import os
import sys
import pymarkovchain
from pprint import pprint
ES_HOST = os.environ.get('ELASTICSEARCH_URL', 'http://localhost:9200')
# Search es, and return the results. we need a minimum number of
# results for a reasonable chain
#
# However, if we don't have a reasonable number of results, we can search
# twitter for more text, and then build a markov out of whatever we have
#
def es_search(es=None, searchword=None, min_hits=10, search_type="match",
fuck_it_well_do_it_live=False):
if search_type == "match":
# v1 query
# searchbody = {"query": {"match": {"text": searchword}}}
# v2 query
#searchbody = { "size": 0,
# "query": {
# "query_string": {
# "query": searchword,
# "analyze_wildcard": True
# }
# }
# }
# v3 query omgwtfbbq, ES can randomize the document selection??
# you'll want this if you get many hits on your search
searchbody = { "query": {
"function_score": {
"query": {
"query_string": {
"query": searchword,
"analyze_wildcard": True
}
},
"boost": "5",
"random_score": {},
"boost_mode": "multiply"
}
}
}
else:
searchbody = {"query": {"more_like_this": {"fields": [
"text"], "like": searchword, "min_term_freq": 1}}}
results = es.search(index="songs", body=searchbody,
filter_path=['hits.total', 'hits.hits._source.text'])
print("ES returned %s" % results['hits']['total'])
if results['hits']['total'] >= min_hits or fuck_it_well_do_it_live:
results = es.search(index="songs", body=searchbody,
filter_path=['hits.total',
'hits.hits._source.text'],
size=min_hits*3)
#size=results['hits']['total'])
return results
# Not enough hits. SEARCH AGAIN
else:
print("going back in")
return es_search(es=es, searchword=searchword, min_hits=300, search_type="not_match", fuck_it_well_do_it_live=True)
if __name__ == "__main__":
es = elasticsearch.Elasticsearch([ES_HOST])
searchword = sys.argv[1]
try:
###
results = es_search(es=es, searchword=searchword)
#results = es_search(es=es, searchword=searchword, search_type="not_match")
# ok
mc = pymarkovchain.MarkovChain()
for songwords in results['hits']['hits']:
#print("training with text: %s" % (songwords['_source']['text']))
mc.generateDatabase(songwords['_source']['text'], sentenceSep='\r\n')
# concat four markovs together
response_text = mc.generateString()
response_text += " " + mc.generateString()
response_text += " " + mc.generateString()
response_text += " " + mc.generateString()
#response_text = mc.generateStringWithSeed(searchword)
print("Response would be:\n%s\n" % (response_text))
max_tweet_len = 280
keepwords = ""
if len(response_text) > max_tweet_len:
words = response_text.split()
for word in words:
#print("KEEPWORDS: %s " % (keepwords))
if len(keepwords) > 280:
raise Exception("Too long of a response")
if len(keepwords) + len(word) > 280:
# RETURN NOW THIS IS ENOUGH
break
keepwords = keepwords + " " + word
else:
keepwords = response_text
print("ACTUAL RESPONSE: %s" % ( len(keepwords)))
try:
print(keepwords.lowercase())
except:
print(keepwords)
except Exception as e:
print("Failed as exception %s" % (e))
| en | 0.713857 | #!/usr/bin/env python # # If you want to run locally, you can run this # # Search es, and return the results. we need a minimum number of # results for a reasonable chain # # However, if we don't have a reasonable number of results, we can search # twitter for more text, and then build a markov out of whatever we have # # v1 query # searchbody = {"query": {"match": {"text": searchword}}} # v2 query #searchbody = { "size": 0, # "query": { # "query_string": { # "query": searchword, # "analyze_wildcard": True # } # } # } # v3 query omgwtfbbq, ES can randomize the document selection?? # you'll want this if you get many hits on your search #size=results['hits']['total']) # Not enough hits. SEARCH AGAIN ### #results = es_search(es=es, searchword=searchword, search_type="not_match") # ok #print("training with text: %s" % (songwords['_source']['text'])) # concat four markovs together #response_text = mc.generateStringWithSeed(searchword) #print("KEEPWORDS: %s " % (keepwords)) # RETURN NOW THIS IS ENOUGH | 2.872552 | 3 |
tools/mipgen_smmip_collapser.py | risqueslab/PolyG-MIP | 20 | 6631630 | # Written by <NAME>
# boylee [at] uw.edu
import sys
import re
import numpy as np
from scipy import optimize as optimize
from random import choice
from optparse import OptionParser
from string import maketrans
from genome_sam_collapser import *
if __name__ == "__main__":
parser = OptionParser("%prog (STDIN = coordinate_sorted_file.sam) tag_size output_prefix [options]")
parser.add_option("-b", "--split_barcodes", dest="barcode_file", type="str", help="pulls out only reads with exactly matching barcodes in provided file of label<tab>sequence")
parser.add_option("-c", "--merge_samples", action="store_true", default=False, help="selects barcodes but does not split into separate files")
parser.add_option("-d", "--dual_indexed", action="store_true", dest="dual_indexed", default=False, help="reads barcode file as dual indexed, i.e., with two columns of barcodes")
parser.add_option("-p", "--picky", action="store_true", dest="filter_molecular_tags", default=False, help="discards reads with non ATCGs in the molecular tag")
parser.add_option("-t", "--tolerant", action="store_true", dest="allow_ambiguous_barcodes", default=False, help="allows barcodes to be matched with 1bp edit distance")
parser.add_option("-m", "--mip_design_file", dest="mip_file", type="str", help="only pulls out sequences that are within 2bp of mip sites as determined by mip design file")
parser.add_option("-n", "--mip_reference", action="store_true", default=False, help="uses chromosome SAM field as MIP key")
parser.add_option("-C", "--confidence_level", dest="confidence_level", type="float", default=0.9, help="controls consensus calling: confidence refers to the chance of a tag truly representing one distinct haplotype -- high confidence leads to more random sampling to reduce the chances of chimeric consensus and low confidence leads to indiscriminate consensus calling, number refers to probability of ALL site-, barcode-, and tag-stratified reads representing unique captures for that site and barcode sequence (default is 0.9)")
parser.add_option("-T", "--no_trimming", action="store_true", dest="no_trimming", default=False, help="do not remove number of bases corresponding to mip arm sequences even if mip file is provided")
parser.add_option("-r", "--add_or_replace_readgroups", action="store_true", default=False, help="use the barcode file (if given) or barcode sequence to generate read groups")
parser.add_option("-f", "--flex_space", dest="flex_space", type="int", default=0, help="searches given number of bases on either side of read start when looking to assign a read to a known MIP target")
parser.add_option("-s", "--single_end", action="store_true", default=False, help="single end run")
parser.add_option("-S", "--no_softclip_filtering", action="store_false", dest="filter_softclips", default=True, help="retains reads with softclipping at the beginning of the read")
parser.add_option("-w", "--collapse_free", action="store_true", default=False, help="do not run collapsing -- only trim and partition reads")
parser.add_option("-x", "--exact_arms", action="store_true", default=False, help="only accept MIP reads with exact arm matches, default accepts any read at correct position")
options, args = parser.parse_args()
if options.merge_samples and not options.barcode_file:
sys.stderr.write("option 'c' requires option 'b'")
sys.exit()
if options.add_or_replace_readgroups and not options.barcode_file:
sys.stderr.write("option 'r' requires option 'b'")
sys.exit()
if options.exact_arms and not options.mip_file:
sys.stderr.write("option 'x' requires option 'm'")
sys.exit()
initialize_and_iterate(options)
sys.stderr.write("collapsing has terminated\n")
| # Written by <NAME>
# boylee [at] uw.edu
import sys
import re
import numpy as np
from scipy import optimize as optimize
from random import choice
from optparse import OptionParser
from string import maketrans
from genome_sam_collapser import *
if __name__ == "__main__":
parser = OptionParser("%prog (STDIN = coordinate_sorted_file.sam) tag_size output_prefix [options]")
parser.add_option("-b", "--split_barcodes", dest="barcode_file", type="str", help="pulls out only reads with exactly matching barcodes in provided file of label<tab>sequence")
parser.add_option("-c", "--merge_samples", action="store_true", default=False, help="selects barcodes but does not split into separate files")
parser.add_option("-d", "--dual_indexed", action="store_true", dest="dual_indexed", default=False, help="reads barcode file as dual indexed, i.e., with two columns of barcodes")
parser.add_option("-p", "--picky", action="store_true", dest="filter_molecular_tags", default=False, help="discards reads with non ATCGs in the molecular tag")
parser.add_option("-t", "--tolerant", action="store_true", dest="allow_ambiguous_barcodes", default=False, help="allows barcodes to be matched with 1bp edit distance")
parser.add_option("-m", "--mip_design_file", dest="mip_file", type="str", help="only pulls out sequences that are within 2bp of mip sites as determined by mip design file")
parser.add_option("-n", "--mip_reference", action="store_true", default=False, help="uses chromosome SAM field as MIP key")
parser.add_option("-C", "--confidence_level", dest="confidence_level", type="float", default=0.9, help="controls consensus calling: confidence refers to the chance of a tag truly representing one distinct haplotype -- high confidence leads to more random sampling to reduce the chances of chimeric consensus and low confidence leads to indiscriminate consensus calling, number refers to probability of ALL site-, barcode-, and tag-stratified reads representing unique captures for that site and barcode sequence (default is 0.9)")
parser.add_option("-T", "--no_trimming", action="store_true", dest="no_trimming", default=False, help="do not remove number of bases corresponding to mip arm sequences even if mip file is provided")
parser.add_option("-r", "--add_or_replace_readgroups", action="store_true", default=False, help="use the barcode file (if given) or barcode sequence to generate read groups")
parser.add_option("-f", "--flex_space", dest="flex_space", type="int", default=0, help="searches given number of bases on either side of read start when looking to assign a read to a known MIP target")
parser.add_option("-s", "--single_end", action="store_true", default=False, help="single end run")
parser.add_option("-S", "--no_softclip_filtering", action="store_false", dest="filter_softclips", default=True, help="retains reads with softclipping at the beginning of the read")
parser.add_option("-w", "--collapse_free", action="store_true", default=False, help="do not run collapsing -- only trim and partition reads")
parser.add_option("-x", "--exact_arms", action="store_true", default=False, help="only accept MIP reads with exact arm matches, default accepts any read at correct position")
options, args = parser.parse_args()
if options.merge_samples and not options.barcode_file:
sys.stderr.write("option 'c' requires option 'b'")
sys.exit()
if options.add_or_replace_readgroups and not options.barcode_file:
sys.stderr.write("option 'r' requires option 'b'")
sys.exit()
if options.exact_arms and not options.mip_file:
sys.stderr.write("option 'x' requires option 'm'")
sys.exit()
initialize_and_iterate(options)
sys.stderr.write("collapsing has terminated\n")
| en | 0.85371 | # Written by <NAME> # boylee [at] uw.edu | 2.490537 | 2 |
doc/example/dimerization_kinetics.py | yannikschaelte/SSA | 0 | 6631631 | import ssa
import numpy as np
import matplotlib.pyplot as plt
def run():
reactants = np.array([[2, 0], [0, 1]])
products = np.array([[0, 1], [2, 0]])
volume = 1e-15
use_na = True
k_det = np.array([5e5, 0.2])
k1 = ssa.util.k_det_to_k_stoch(
k_det, reactants=reactants, volume=volume, use_na=use_na
)
k2 = np.array([1.66e-3, 0.2])
print(k1, k2)
x0_molar_concentration = np.array([5e-7, 0])
x01 = ssa.util.molar_concentration_to_molecule_number(
x0_molar_concentration, volume=volume, use_na=use_na
)
x02 = np.array([301.1, 0])
print(x01, x02)
t_max = 10.0
model = ssa.Model(
reactants=reactants, products=products, k=k2, x0=x02, t_max=t_max, n_procs=2
)
result = model.simulate(n_reps=5)
ssa.plot(result, show=False)
plt.savefig("dimerization_kinetics.png")
run()
| import ssa
import numpy as np
import matplotlib.pyplot as plt
def run():
reactants = np.array([[2, 0], [0, 1]])
products = np.array([[0, 1], [2, 0]])
volume = 1e-15
use_na = True
k_det = np.array([5e5, 0.2])
k1 = ssa.util.k_det_to_k_stoch(
k_det, reactants=reactants, volume=volume, use_na=use_na
)
k2 = np.array([1.66e-3, 0.2])
print(k1, k2)
x0_molar_concentration = np.array([5e-7, 0])
x01 = ssa.util.molar_concentration_to_molecule_number(
x0_molar_concentration, volume=volume, use_na=use_na
)
x02 = np.array([301.1, 0])
print(x01, x02)
t_max = 10.0
model = ssa.Model(
reactants=reactants, products=products, k=k2, x0=x02, t_max=t_max, n_procs=2
)
result = model.simulate(n_reps=5)
ssa.plot(result, show=False)
plt.savefig("dimerization_kinetics.png")
run()
| none | 1 | 2.235693 | 2 |
|
setup.py | ratt-ru/radiopadre-client | 1 | 6631632 | from setuptools import setup
import os
from radiopadre_client.default_config import __version__
build_root = os.path.dirname(__file__)
install_requires = ['six', 'psutil']
def readme():
"""Get readme content for package long description"""
with open(os.path.join(build_root, 'README.rst')) as f:
return f.read()
scripts = ["bin/run-radiopadre"]
setup(
name="radiopadre-client",
version=__version__,
install_requires=install_requires,
python_requires='>=2.7',
author="<NAME>",
author_email="<EMAIL>",
description=("Radiopadre client-side script"),
long_description=readme(),
license="MIT",
keywords="ipython notebook fits dataset resultset visualisation",
url="http://github.com/ratt-ru/radiopadre-client",
packages=['radiopadre_client', 'radiopadre_client.backends', 'iglesia'],
scripts=scripts,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
],
)
| from setuptools import setup
import os
from radiopadre_client.default_config import __version__
build_root = os.path.dirname(__file__)
install_requires = ['six', 'psutil']
def readme():
"""Get readme content for package long description"""
with open(os.path.join(build_root, 'README.rst')) as f:
return f.read()
scripts = ["bin/run-radiopadre"]
setup(
name="radiopadre-client",
version=__version__,
install_requires=install_requires,
python_requires='>=2.7',
author="<NAME>",
author_email="<EMAIL>",
description=("Radiopadre client-side script"),
long_description=readme(),
license="MIT",
keywords="ipython notebook fits dataset resultset visualisation",
url="http://github.com/ratt-ru/radiopadre-client",
packages=['radiopadre_client', 'radiopadre_client.backends', 'iglesia'],
scripts=scripts,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
],
)
| en | 0.425696 | Get readme content for package long description | 1.585702 | 2 |
pyExplore/preprocessing/cleaner.py | rahul1809/pyExplore | 0 | 6631633 | """
Module Contents functionality to process
the dirty preprocessing and make it useable for future
analysis
"""
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from pyExplore.util import util
class Dataset:
def __init__(self, df):
self.df = df
@classmethod
def load_data(cls, file_path, **kwargs):
return cls(util.load_dataset(file_path, **kwargs))
def drop_multiple_columns(self, column_name_list=None):
"""
Drop multiple columns based on their column names
Input : pandas dataframe, List of column names in the preprocessing set
"""
if column_name_list:
self.df.drop(column_name_list, axis=1, inplace=True)
def convert_categorial_data_to_numerical_data(self):
pass
def remove_duplicates(self, *args, **kwargs):
self.df.remove_duplicates(*args, **kwargs)
def one_hot_encode_data(self):
enc = OneHotEncoder(handle_unknown="ignore")
enc.fit(self.df)
enc.transform(self.df)
def remove_col_white_space(self, column):
# remove white space from the beginning and end of string
self.df[column] = self.df[column].str.strip()
def change_dtypes(self, column_int=None, column_float=None):
"""
Changing dtypes to save memory
Output -> updated df with smaller
"""
for column in column_float:
self.df[column] = self.df[column].astype("float32")
for column in column_int:
self.df[column] = self.df[column].astype("int32")
def remove_majority_na_columns(self, inplace=True):
self.df.dropna(thresh=len(self.df) / 2, axis=1, inplace=inplace)
class TimeSeriesData(Dataset):
"""
Special class to handle time-series
datasets
"""
def __init__(self, df):
super().__init__(df)
def convert_to_datetime_object(self, column, strfmt=None):
self.df[column] = pd.to_datetime(
self.df[column], format=strfmt, infer_datetime_format=True
)
| """
Module Contents functionality to process
the dirty preprocessing and make it useable for future
analysis
"""
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from pyExplore.util import util
class Dataset:
def __init__(self, df):
self.df = df
@classmethod
def load_data(cls, file_path, **kwargs):
return cls(util.load_dataset(file_path, **kwargs))
def drop_multiple_columns(self, column_name_list=None):
"""
Drop multiple columns based on their column names
Input : pandas dataframe, List of column names in the preprocessing set
"""
if column_name_list:
self.df.drop(column_name_list, axis=1, inplace=True)
def convert_categorial_data_to_numerical_data(self):
pass
def remove_duplicates(self, *args, **kwargs):
self.df.remove_duplicates(*args, **kwargs)
def one_hot_encode_data(self):
enc = OneHotEncoder(handle_unknown="ignore")
enc.fit(self.df)
enc.transform(self.df)
def remove_col_white_space(self, column):
# remove white space from the beginning and end of string
self.df[column] = self.df[column].str.strip()
def change_dtypes(self, column_int=None, column_float=None):
"""
Changing dtypes to save memory
Output -> updated df with smaller
"""
for column in column_float:
self.df[column] = self.df[column].astype("float32")
for column in column_int:
self.df[column] = self.df[column].astype("int32")
def remove_majority_na_columns(self, inplace=True):
self.df.dropna(thresh=len(self.df) / 2, axis=1, inplace=inplace)
class TimeSeriesData(Dataset):
"""
Special class to handle time-series
datasets
"""
def __init__(self, df):
super().__init__(df)
def convert_to_datetime_object(self, column, strfmt=None):
self.df[column] = pd.to_datetime(
self.df[column], format=strfmt, infer_datetime_format=True
)
| en | 0.750902 | Module Contents functionality to process the dirty preprocessing and make it useable for future analysis Drop multiple columns based on their column names Input : pandas dataframe, List of column names in the preprocessing set # remove white space from the beginning and end of string Changing dtypes to save memory Output -> updated df with smaller Special class to handle time-series datasets | 2.968466 | 3 |
for-proriv/myfuture/companies/models.py | DmitryAA/EdVision | 0 | 6631634 | <reponame>DmitryAA/EdVision
from django.db import models
from students.models import Address
from django.contrib.auth.models import User
class Companies(models.Model):
name = models.CharField(max_length = 200, verbose_name = 'НАзвание фирмы')
legalName = models.CharField(max_length = 200, verbose_name = '<NAME>вание')
description = models.CharField(max_length = 300, verbose_name = 'Описание')
inn = models.CharField(max_length = 20, verbose_name = 'ИНН')
ogrn = models.CharField(max_length = 20, verbose_name = 'ОГРН')
id_address = models.ForeignKey(Address, on_delete=models.PROTECT)
id_auth = models.ForeignKey(User, on_delete=models.CASCADE)
| from django.db import models
from students.models import Address
from django.contrib.auth.models import User
class Companies(models.Model):
name = models.CharField(max_length = 200, verbose_name = 'НАзвание фирмы')
legalName = models.CharField(max_length = 200, verbose_name = '<NAME>вание')
description = models.CharField(max_length = 300, verbose_name = 'Описание')
inn = models.CharField(max_length = 20, verbose_name = 'ИНН')
ogrn = models.CharField(max_length = 20, verbose_name = 'ОГРН')
id_address = models.ForeignKey(Address, on_delete=models.PROTECT)
id_auth = models.ForeignKey(User, on_delete=models.CASCADE) | none | 1 | 2.125196 | 2 |
|
graphwave/roleX.py | mtang724/graphwave | 0 | 6631635 | # -*- coding: utf-8 -*-
"""
Created on Wed May 3 09:57:52 2017
@author: Lab41: Github: Circulo/circulo/algorithms/rolx.py
#### https://github.com/Lab41/Circulo/blob/master/circulo/algorithms/rolx.py
Set of functions to compute the RolX featurization
"""
import sys
import math
import igraph
import numpy as np
from numpy.linalg import lstsq
from numpy import dot
from scipy.cluster.vq import kmeans2, vq
from scipy.linalg import norm
from scipy.optimize import minimize
from sklearn.decomposition import NMF
import networkx as nx
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import sklearn as sk
import pandas as pd
import torch
from utils.utils import read_real_datasets, NodeClassificationDataset, MLP, DataSplit
def extract_rolx_roles(G, roles=2):
"""
Top-level function. Extracts node-role matrix and sensemaking role-feature matrix as necessary.
"""
print("Creating Vertex Features matrix")
V = vertex_features(G)
#print("V is a %s by %s matrix." % V.shape)
basis, coef = get_factorization(V, roles)
H = basis
#print("Node-role matrix is of dimensions %s by %s" % H.shape)
#print(H)
K = make_sense(G, H)
#print("Role-feature matrix is of dimensions %s by %s" % K.shape)
#print(K)
return H, K
def extract_rolx_roles_bis(G,V, roles=2):
"""
Top-level function. Extracts node-role matrix and sensemaking role-feature matrix as necessary.
Inputs a matrux
"""
basis, coef = get_factorization(V, roles)
H = basis
print("Node-role matrix is of dimensions %s by %s" % H.shape)
#print(H)
K = make_sense(G, H)
print("Role-feature matrix is of dimensions %s by %s" % K.shape)
#print(K)
return H, K
def recursive_feature(G, f, n):
"""
G: iGraph graph with annotations
func: string containing function name
n: int, recursion level
Computes the given function recursively on each vertex
Current precondition: already have run the computation for G, func, n-1.
"""
return np.matrix(recursive_feature_array(G,f,n))
def recursive_feature_array(G, func, n):
"""
Computes recursive features of the graph G for the provided function of G, returning
the matrix representing the nth level of the recursion.
"""
attr_name = "_rolx_" + func.__name__ + "_" + str(n)
if attr_name in G.vs.attributes():
result = np.array(G.vs[attr_name])
return result
if n==0:
stats = func(G)
result = np.array([[x] for x in stats])
result = result * 1.0
G.vs[attr_name] = result
return result
prev_stats = recursive_feature_array(G, func, n-1)
all_neighbor_stats = []
for v in G.vs:
neighbors = G.neighbors(v)
degree = len(neighbors)
if degree == 0:
neighbor_avgs = neighbor_sums = np.zeros(prev_stats[0].size)
else:
prev_neighbor_stats = [prev_stats[x] for x in neighbors]
neighbor_sums_vec = sum(prev_neighbor_stats)
neighbor_avgs_vec = neighbor_sums_vec / degree
v_stats = np.concatenate((neighbor_sums_vec, neighbor_avgs_vec), axis=0)
all_neighbor_stats.append(v_stats)
G.vs[attr_name] = all_neighbor_stats
return all_neighbor_stats
def approx_linear_solution(w, A, threshold=1e-15):
'''
Checks if w is linearly dependent on the columns of A, this is done by solving the least squares problem (LSP)
min || w - Ax ||_2^2
x
and checking if || w - Ax_star || <= threshold, where x_star is the arg_minimizer of the LSP
w: column vector
A: matrix
threshold: int
'''
x0 = np.zeros(A.shape[1])
x_star, residuals, rank, s = lstsq(A, w)
norm_residual = norm(residuals)
result = True if norm_residual <= threshold else False
return (result, norm_residual, x_star)
def degree(G):
""" Auxiliary function to calculate the degree of each element of G. """
return G.degree()
def vertex_egonet(G, v):
""" Computes the number of edges in the ego network of the vertex v. """
ego_network = G.induced_subgraph(G.neighborhood(v))
ego_edges = ego_network.ecount()
return ego_edges
def egonet(G):
""" Computes the ego network for all vertices v in G. """
return [vertex_egonet(G, v) for v in G.vs]
def vertex_egonet_out(G, v):
""" Computes the outgoing edges from the ego network of the vertex v in G. """
neighbors = G.neighborhood(v)
ego_network = G.induced_subgraph(neighbors)
ego_edges = ego_network.ecount()
degree_sum = sum([G.degree(v) for v in neighbors])
out_edges = degree_sum - 2*ego_edges #Summing over degree will doublecount every edge within the ego network
return out_edges
def egonet_out(G):
""" Computes the number of outgoing ego network edges for every vertex in G. """
return [vertex_egonet_out(G, v) for v in G.vs]
def vertex_features(g):
"""
Constructs a vertex feature matrix using recursive feature generation, then uses least-squares solving
to eliminate those exhibiting approximate linear dependence.
"""
G = g.copy()
num_rows = G.vcount()
features = [degree, egonet, egonet_out]
V = np.matrix(np.zeros((num_rows, 16*len(features))))
next_feature_col = 0
for feature in features:
base = recursive_feature(G, feature, 0)
base = base/norm(base)
V = add_col(V, base, next_feature_col)
next_feature_col += 1
level = 1
accepted_features = True
while accepted_features:
accepted_features = False
feature_matrix = recursive_feature(G, feature, level)
rows, cols = feature_matrix.shape
for i in range(cols):
b = feature_matrix[:,i]
b = b/norm(b)
mat = V[:,:next_feature_col]
threshold = 10.0**(-15+level)
(is_approx_soln, _, _) = approx_linear_solution(b, mat, threshold)
if not is_approx_soln:
V = add_col(V, b, next_feature_col)
next_feature_col += 1
accepted_features = True
level += 1
return V[:, :next_feature_col]
def add_col(V, b, insert_col):
""" Add the given column b to the matrix V, enlarging the matrix if necessary. """
rows, cols = V.shape
if insert_col == cols: # need to resize V
zeros = np.matrix(np.zeros((rows, 1)))
V = np.concatenate((V, zeros), axis=1)
V[:, insert_col] = b
return V
def kmeans_quantize(M, bits):
""" Performs k-means quantization on the given matrix. Returns the encoded matrix and the number of bits needed for encoding it. """
k = 2**bits
obs = np.asarray(M).reshape(-1)
centroid, label = kmeans2(obs, k)
enc_M = [centroid[v] for v in label]
enc_M = np.matrix(enc_M).reshape(M.shape)
return enc_M, (bits * enc_M.size)
def kl_divergence(A,B):
""" Computes the Kullback-Leibler divergence of the two matrices A and B. """
a = np.asarray(A, dtype=np.float)
b = np.asarray(B, dtype=np.float)
return np.sum(np.where(a != 0, a * np.log(a / b), 0))
def description_length(V, fctr_res, bits=10):
""" Computes the length necessary to describe the given model with the given number of bits. """
W = fctr_res[0]
H = fctr_res[1]
enc_W, enc_W_cost = kmeans_quantize(W, bits)
enc_H, enc_H_cost = kmeans_quantize(H, bits)
enc_cost = enc_W_cost + enc_H_cost
err_cost = kl_divergence(V, enc_W*enc_H)
return enc_W, enc_H, enc_cost, err_cost
def standardize_rows(M):
""" Distribute the rows of the cost matrix normally to allow for accurate comparisons of error and description
cost. """
rv = np.matrix(M)
for i in range(rv.shape[0]):
mean = np.mean(M[i, :])
stdev = np.std(M[i, :])
rv[i, :]= (M[i, :]- mean)/stdev
return rv
# def standardize(M):
# m_flat = np.asarray(M).reshape(-1)
# mean = np.mean(m_flat)
# stdev = np.std(m_flat)
# m_flat = (m_flat - mean)/stdev
#
# return m_flat.reshape(M.shape)
def get_factorization(V, num_roles):
""" Obtains a nonnegative matrix factorization of the matrix V with num_roles intermediate roles. """
model = NMF(n_components=num_roles, init='random', random_state=0)
model.fit(V)
node_roles = model.transform(V)
role_features = model.components_
return torch.from_numpy(node_roles), torch.from_numpy(role_features)
def get_optimal_factorization(V, min_roles=2, max_roles=6, min_bits=1, max_bits=10):
""" Uses grid search to find the optimal parameter number and encoding of the given matrix factorization. """
max_roles = min(max_roles, V.shape[1]) # Can't have more possible roles than features
num_role_options = max_roles - min_roles
num_bit_options = max_bits - min_bits
mat_enc_cost = np.zeros((num_role_options, num_bit_options))
mat_err_cost = np.zeros((num_role_options, num_bit_options))
mat_fctr_res = [[0] * num_bit_options] * num_role_options
# Setup and run the factorization problem
for i in range(num_role_options):
rank = min_roles + i
fctr_res = get_factorization(V, rank)
for j in range(num_bit_options):
bits = min_bits + j
enc_W, enc_H, enc_cost, err_cost = description_length(V, fctr_res, bits)
mat_enc_cost[i,j] = enc_cost
mat_err_cost[i,j] = err_cost
mat_fctr_res[i][j] = (enc_W, enc_H)
mat_std_enc_cost = standardize_rows(mat_enc_cost)
mat_std_err_cost = standardize_rows(mat_err_cost)
mat_total_cost = mat_enc_cost + mat_err_cost
mat_total_std_cost = mat_std_enc_cost + mat_std_err_cost
# print mat_total_cost
print('min cost @', idx, ' or at ', min_coord)
print("rank, bits, enc_cost, err_cost, total_cost, std_enc_cost, std_err_cost, std_total_cost")
for i in range(num_role_options):
for j in range(num_bit_options):
rank = min_roles + i
bits = min_bits + j
enc_cost = mat_enc_cost[i,j]
err_cost = mat_err_cost[i,j]
std_enc_cost = mat_std_enc_cost[i,j]
std_err_cost = mat_std_err_cost[i,j]
total_cost = mat_total_cost[i,j]
total_std_cost = mat_total_std_cost[i,j]
print("%s, %s, (%s, %s, %s), (%s, %s, %s)" % (rank, bits,
enc_cost, err_cost, total_cost, std_enc_cost, std_err_cost, total_std_cost))
min_idx = mat_total_std_cost.argmin()
min_coord = np.unravel_index(min_idx, mat_total_std_cost.shape)
min_role_index, min_bit_index = min_coord
min_role_value = min_role_index + min_roles
min_bit_value = min_bit_index + min_bits
min_std_enc_cost = mat_std_enc_cost[min_coord]
min_std_err_cost = mat_std_err_cost[min_coord]
min_total_std_cost = mat_total_std_cost[min_coord]
print("%s, %s, (%s, %s, %s)" % (min_role_value, min_bit_value, min_std_enc_cost, min_std_err_cost, min_total_std_cost))
return mat_fctr_res[min_role_index][min_bit_index]
def make_sense(G, H):
""" Given graph G and node-role matrix H, returns a role-feature matrix K for sensemaking analyses of roles. """
features = [ 'betweenness', 'closeness', 'degree', 'diversity', 'eccentricity', 'pagerank', 'personalized_pagerank', 'strength' ]
feature_fns = [ getattr(G, f) for f in features ]
feature_matrix = [ func() for func in feature_fns ]
feature_matrix = np.matrix(feature_matrix).transpose()
#print(feature_matrix)
M = feature_matrix
for i in range(M.shape[1]):
M[:,i] = M[:,i] / norm(M[:,i])
K = complete_factor(H, M, h_on_left=True)
#print(K)
return K
def sense_residual_left_factor(W, H, M):
W = np.matrix(W).reshape((M.shape[0], H.shape[0]))
return norm(M - W*H)
def sense_residual_right_factor(K, H, M):
K = np.matrix(K).reshape((H.shape[1], M.shape[1]))
# print(M.shape,H.shape,K.shape)
return norm(M - H*K)
def complete_factor(H, M, h_on_left=True):
"""Given nonnegative matrix M and a nonnegative factor H of M, finds the other (nonnegative) factor of M.
H: known factor of matrix M.
M: product matrix.
h_on_left: boolean, true if H is the left factor of M, false if H is the right factor.
If H is left factor, find the matrix K such that HK=M. If H is the right factor, finds W such that WH=M
Result is an appropriately-sized matrix. """
if h_on_left:
shape = (H.shape[1], M.shape[1])
residual = sense_residual_right_factor
else:
shape = (M.shape[0], H.shape[0])
residual = sense_residual_left_factor
size = shape[0] * shape[1]
guess = np.random.rand(size)
bounds = [(0, None)] * size # (all elements of matrix must be nonnegative)
result = minimize(residual, guess, args=(H, M), method='L-BFGS-B', bounds=bounds)
x = result["x"]
G = np.matrix(x).reshape(shape)
return G
def main(G_path):
G = igraph.Graph.Read_GML(G_path)
return extract_rolx_roles(G)
# if len(argv) > 0:
# roles = role_id_num
#A = nx.adjacency_matrix(G).todense()
#Gi = igraph.Graph.Adjacency((A > 0).tolist())
#test = extract_rolx_roles(Gi, roles=roles)
### Define a distance based on these distribution over roles
# D_roleX = distance_nodes(test)
#return extract_rolx_roles(G, roles=roles)
# else:
# return H, K
def read_roleid(path_to_file):
role_id_fl = []
with open(path_to_file) as f:
contents = f.readlines()
for content in contents:
content = content.strip('\n')
role_id_fl.append(float(content))
role_id = []
for role in role_id_fl:
role_id.append(int(role))
return role_id
def cluster_graph(role_id, node_embeddings):
colors = role_id
nb_clust = len(np.unique(role_id))
pca = PCA(n_components=2)
trans_data = pca.fit_transform(StandardScaler().fit_transform(node_embeddings))
km = KMeans(n_clusters=nb_clust)
km.fit(trans_data)
labels_pred = km.labels_
######## Params for plotting
cmapx = plt.get_cmap('rainbow')
x = np.linspace(0, 1, nb_clust + 1)
col = [cmapx(xx) for xx in x]
markers = {0: '*', 1: '.', 2: ',', 3: 'o', 4: 'v', 5: '^', 6: '<', 7: '>', 8: 3, 9: 'd', 10: '+', 11: 'x',
12: 'D', 13: '|', 14: '_', 15: 4, 16: 0, 17: 1, 18: 2, 19: 6, 20: 7}
for c in np.unique(role_id):
indc = [i for i, x in enumerate(role_id) if x == c]
plt.scatter(trans_data[indc, 0], trans_data[indc, 1],
c=np.array(col)[list(np.array(labels_pred)[indc])],
marker=markers[c % len(markers)], s=300)
labels = role_id
for label, c, x, y in zip(labels, labels_pred, trans_data[:, 0], trans_data[:, 1]):
plt.annotate(label, xy=(x, y), xytext=(0, 0), textcoords='offset points')
plt.show()
return labels_pred, colors, trans_data, nb_clust
def unsupervised_evaluate(colors, labels_pred, trans_data, nb_clust):
ami = sk.metrics.adjusted_mutual_info_score(colors, labels_pred)
sil = sk.metrics.silhouette_score(trans_data, labels_pred, metric='euclidean')
ch = sk.metrics.calinski_harabasz_score(trans_data, labels_pred)
hom = sk.metrics.homogeneity_score(colors, labels_pred)
comp = sk.metrics.completeness_score(colors, labels_pred)
#print('Homogeneity \t Completeness \t AMI \t nb clusters \t CH \t Silhouette \n')
#print(str(hom) + '\t' + str(comp) + '\t' + str(ami) + '\t' + str(nb_clust) + '\t' + str(ch) + '\t' + str(sil))
return hom, comp, ami, nb_clust, ch, sil
def draw_pca(role_id, node_embeddings):
cmap = plt.get_cmap('hot')
x_range = np.linspace(0, 0.8, len(np.unique(role_id)))
coloring = {u: cmap(x_range[i]) for i, u in enumerate(np.unique(role_id))}
node_color = [coloring[role_id[i]] for i in range(len(role_id))]
pca = PCA(n_components=2)
node_embedded = StandardScaler().fit_transform(node_embeddings)
principalComponents = pca.fit_transform(node_embedded)
principalDf = pd.DataFrame(data=principalComponents,
columns=['principal component 1', 'principal component 2'])
principalDf['target'] = role_id
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel('Principal Component 1', fontsize=15)
ax.set_ylabel('Principal Component 2', fontsize=15)
ax.set_title('2 PCA Components', fontsize=20)
targets = np.unique(role_id)
for target in zip(targets):
color = coloring[target[0]]
indicesToKeep = principalDf['target'] == target
ax.scatter(principalDf.loc[indicesToKeep, 'principal component 1'],
principalDf.loc[indicesToKeep, 'principal component 2'],
s=50,
c=color)
ax.legend(targets)
ax.grid()
plt.show()
def average(lst):
return sum(lst) / len(lst)
if __name__ == "__main__":
# homs = []
# comps = []
# amis = []
# chs = []
# sils = []
# for i in range(10):
# role_id = read_roleid("new_graphs/Varied{}.roleid".format(i))
# # role_id_num = len(set(role_id))
# embeddings, attributes = main("new_graphs/Varied{}.gml".format(i))
# labels_pred, colors, trans_data, nb_clust = cluster_graph(role_id, embeddings)
# draw_pca(role_id, embeddings)
# hom, comp, ami, nb_clust, ch, sil = unsupervised_evaluate(colors, labels_pred, trans_data, nb_clust)
# print(hom, comp, ami, nb_clust, ch, sil)
# homs.append(hom)
# comps.append(comp)
# amis.append(ami)
# chs.append(ch)
# sils.append(sil)
# print('Homogeneity \t Completeness \t AMI \t nb clusters \t CH \t Silhouette \n')
# print(average(homs), average(comps), average(amis), nb_clust, average(chs), average(sils))
# Real world data
import statistics
acc = []
for i in range(4):
# G, labels = read_real_datasets("cornell")
G = nx.read_edgelist("realdatasets/wisconsin.edgelist")
nx.write_gml(G, "wisconsin.gml")
embeddings, attributes = main("wisconsin.gml")
node_embeddings = embeddings
node_labels = read_roleid("realdatasets/np_wisconsin.txt")
node_labels = torch.FloatTensor(node_labels)
input_dims = node_embeddings.shape
class_number = int(max(node_labels)) + 1
FNN = MLP(num_layers=5, input_dim=input_dims[1], hidden_dim=input_dims[1] // 2, output_dim=class_number)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(FNN.parameters())
dataset = NodeClassificationDataset(node_embeddings, node_labels)
split = DataSplit(dataset, shuffle=True)
train_loader, val_loader, test_loader = split.get_split(batch_size=64, num_workers=0)
# train_loader = DataLoader(dataset=dataset, batch_size=32, shuffle=True)
best = -float('inf')
for epoch in range(100):
for i, data in enumerate(train_loader, 0):
# data = data.to(device)
inputs, labels = data
inputs = inputs
labels = labels
y_pred = FNN(inputs.float())
loss = criterion(y_pred, labels)
print(epoch, i, loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
correct = 0
total = 0
for data in val_loader:
inputs, labels = data
inputs = inputs
labels = labels
outputs = FNN(inputs.float())
_, predicted = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
total += labels.size(0)
correct += torch.sum(predicted == labels)
if correct / total > best:
best = correct / total
torch.save(FNN.state_dict(), 'best_mlp.pkl')
print(str(epoch), correct / total)
with torch.no_grad():
FNN.load_state_dict(torch.load('best_mlp.pkl'))
correct = 0
total = 0
for data in test_loader:
inputs, labels = data
inputs = inputs
labels = labels
outputs = FNN(inputs.float())
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += torch.sum(predicted == labels)
print((correct / total).item())
acc.append((correct / total).item())
print("mean:")
print(statistics.mean(acc))
print("std:")
print(statistics.stdev(acc))
| # -*- coding: utf-8 -*-
"""
Created on Wed May 3 09:57:52 2017
@author: Lab41: Github: Circulo/circulo/algorithms/rolx.py
#### https://github.com/Lab41/Circulo/blob/master/circulo/algorithms/rolx.py
Set of functions to compute the RolX featurization
"""
import sys
import math
import igraph
import numpy as np
from numpy.linalg import lstsq
from numpy import dot
from scipy.cluster.vq import kmeans2, vq
from scipy.linalg import norm
from scipy.optimize import minimize
from sklearn.decomposition import NMF
import networkx as nx
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import sklearn as sk
import pandas as pd
import torch
from utils.utils import read_real_datasets, NodeClassificationDataset, MLP, DataSplit
def extract_rolx_roles(G, roles=2):
"""
Top-level function. Extracts node-role matrix and sensemaking role-feature matrix as necessary.
"""
print("Creating Vertex Features matrix")
V = vertex_features(G)
#print("V is a %s by %s matrix." % V.shape)
basis, coef = get_factorization(V, roles)
H = basis
#print("Node-role matrix is of dimensions %s by %s" % H.shape)
#print(H)
K = make_sense(G, H)
#print("Role-feature matrix is of dimensions %s by %s" % K.shape)
#print(K)
return H, K
def extract_rolx_roles_bis(G,V, roles=2):
"""
Top-level function. Extracts node-role matrix and sensemaking role-feature matrix as necessary.
Inputs a matrux
"""
basis, coef = get_factorization(V, roles)
H = basis
print("Node-role matrix is of dimensions %s by %s" % H.shape)
#print(H)
K = make_sense(G, H)
print("Role-feature matrix is of dimensions %s by %s" % K.shape)
#print(K)
return H, K
def recursive_feature(G, f, n):
"""
G: iGraph graph with annotations
func: string containing function name
n: int, recursion level
Computes the given function recursively on each vertex
Current precondition: already have run the computation for G, func, n-1.
"""
return np.matrix(recursive_feature_array(G,f,n))
def recursive_feature_array(G, func, n):
"""
Computes recursive features of the graph G for the provided function of G, returning
the matrix representing the nth level of the recursion.
"""
attr_name = "_rolx_" + func.__name__ + "_" + str(n)
if attr_name in G.vs.attributes():
result = np.array(G.vs[attr_name])
return result
if n==0:
stats = func(G)
result = np.array([[x] for x in stats])
result = result * 1.0
G.vs[attr_name] = result
return result
prev_stats = recursive_feature_array(G, func, n-1)
all_neighbor_stats = []
for v in G.vs:
neighbors = G.neighbors(v)
degree = len(neighbors)
if degree == 0:
neighbor_avgs = neighbor_sums = np.zeros(prev_stats[0].size)
else:
prev_neighbor_stats = [prev_stats[x] for x in neighbors]
neighbor_sums_vec = sum(prev_neighbor_stats)
neighbor_avgs_vec = neighbor_sums_vec / degree
v_stats = np.concatenate((neighbor_sums_vec, neighbor_avgs_vec), axis=0)
all_neighbor_stats.append(v_stats)
G.vs[attr_name] = all_neighbor_stats
return all_neighbor_stats
def approx_linear_solution(w, A, threshold=1e-15):
'''
Checks if w is linearly dependent on the columns of A, this is done by solving the least squares problem (LSP)
min || w - Ax ||_2^2
x
and checking if || w - Ax_star || <= threshold, where x_star is the arg_minimizer of the LSP
w: column vector
A: matrix
threshold: int
'''
x0 = np.zeros(A.shape[1])
x_star, residuals, rank, s = lstsq(A, w)
norm_residual = norm(residuals)
result = True if norm_residual <= threshold else False
return (result, norm_residual, x_star)
def degree(G):
""" Auxiliary function to calculate the degree of each element of G. """
return G.degree()
def vertex_egonet(G, v):
""" Computes the number of edges in the ego network of the vertex v. """
ego_network = G.induced_subgraph(G.neighborhood(v))
ego_edges = ego_network.ecount()
return ego_edges
def egonet(G):
""" Computes the ego network for all vertices v in G. """
return [vertex_egonet(G, v) for v in G.vs]
def vertex_egonet_out(G, v):
""" Computes the outgoing edges from the ego network of the vertex v in G. """
neighbors = G.neighborhood(v)
ego_network = G.induced_subgraph(neighbors)
ego_edges = ego_network.ecount()
degree_sum = sum([G.degree(v) for v in neighbors])
out_edges = degree_sum - 2*ego_edges #Summing over degree will doublecount every edge within the ego network
return out_edges
def egonet_out(G):
""" Computes the number of outgoing ego network edges for every vertex in G. """
return [vertex_egonet_out(G, v) for v in G.vs]
def vertex_features(g):
"""
Constructs a vertex feature matrix using recursive feature generation, then uses least-squares solving
to eliminate those exhibiting approximate linear dependence.
"""
G = g.copy()
num_rows = G.vcount()
features = [degree, egonet, egonet_out]
V = np.matrix(np.zeros((num_rows, 16*len(features))))
next_feature_col = 0
for feature in features:
base = recursive_feature(G, feature, 0)
base = base/norm(base)
V = add_col(V, base, next_feature_col)
next_feature_col += 1
level = 1
accepted_features = True
while accepted_features:
accepted_features = False
feature_matrix = recursive_feature(G, feature, level)
rows, cols = feature_matrix.shape
for i in range(cols):
b = feature_matrix[:,i]
b = b/norm(b)
mat = V[:,:next_feature_col]
threshold = 10.0**(-15+level)
(is_approx_soln, _, _) = approx_linear_solution(b, mat, threshold)
if not is_approx_soln:
V = add_col(V, b, next_feature_col)
next_feature_col += 1
accepted_features = True
level += 1
return V[:, :next_feature_col]
def add_col(V, b, insert_col):
""" Add the given column b to the matrix V, enlarging the matrix if necessary. """
rows, cols = V.shape
if insert_col == cols: # need to resize V
zeros = np.matrix(np.zeros((rows, 1)))
V = np.concatenate((V, zeros), axis=1)
V[:, insert_col] = b
return V
def kmeans_quantize(M, bits):
""" Performs k-means quantization on the given matrix. Returns the encoded matrix and the number of bits needed for encoding it. """
k = 2**bits
obs = np.asarray(M).reshape(-1)
centroid, label = kmeans2(obs, k)
enc_M = [centroid[v] for v in label]
enc_M = np.matrix(enc_M).reshape(M.shape)
return enc_M, (bits * enc_M.size)
def kl_divergence(A,B):
""" Computes the Kullback-Leibler divergence of the two matrices A and B. """
a = np.asarray(A, dtype=np.float)
b = np.asarray(B, dtype=np.float)
return np.sum(np.where(a != 0, a * np.log(a / b), 0))
def description_length(V, fctr_res, bits=10):
""" Computes the length necessary to describe the given model with the given number of bits. """
W = fctr_res[0]
H = fctr_res[1]
enc_W, enc_W_cost = kmeans_quantize(W, bits)
enc_H, enc_H_cost = kmeans_quantize(H, bits)
enc_cost = enc_W_cost + enc_H_cost
err_cost = kl_divergence(V, enc_W*enc_H)
return enc_W, enc_H, enc_cost, err_cost
def standardize_rows(M):
""" Distribute the rows of the cost matrix normally to allow for accurate comparisons of error and description
cost. """
rv = np.matrix(M)
for i in range(rv.shape[0]):
mean = np.mean(M[i, :])
stdev = np.std(M[i, :])
rv[i, :]= (M[i, :]- mean)/stdev
return rv
# def standardize(M):
# m_flat = np.asarray(M).reshape(-1)
# mean = np.mean(m_flat)
# stdev = np.std(m_flat)
# m_flat = (m_flat - mean)/stdev
#
# return m_flat.reshape(M.shape)
def get_factorization(V, num_roles):
""" Obtains a nonnegative matrix factorization of the matrix V with num_roles intermediate roles. """
model = NMF(n_components=num_roles, init='random', random_state=0)
model.fit(V)
node_roles = model.transform(V)
role_features = model.components_
return torch.from_numpy(node_roles), torch.from_numpy(role_features)
def get_optimal_factorization(V, min_roles=2, max_roles=6, min_bits=1, max_bits=10):
""" Uses grid search to find the optimal parameter number and encoding of the given matrix factorization. """
max_roles = min(max_roles, V.shape[1]) # Can't have more possible roles than features
num_role_options = max_roles - min_roles
num_bit_options = max_bits - min_bits
mat_enc_cost = np.zeros((num_role_options, num_bit_options))
mat_err_cost = np.zeros((num_role_options, num_bit_options))
mat_fctr_res = [[0] * num_bit_options] * num_role_options
# Setup and run the factorization problem
for i in range(num_role_options):
rank = min_roles + i
fctr_res = get_factorization(V, rank)
for j in range(num_bit_options):
bits = min_bits + j
enc_W, enc_H, enc_cost, err_cost = description_length(V, fctr_res, bits)
mat_enc_cost[i,j] = enc_cost
mat_err_cost[i,j] = err_cost
mat_fctr_res[i][j] = (enc_W, enc_H)
mat_std_enc_cost = standardize_rows(mat_enc_cost)
mat_std_err_cost = standardize_rows(mat_err_cost)
mat_total_cost = mat_enc_cost + mat_err_cost
mat_total_std_cost = mat_std_enc_cost + mat_std_err_cost
# print mat_total_cost
print('min cost @', idx, ' or at ', min_coord)
print("rank, bits, enc_cost, err_cost, total_cost, std_enc_cost, std_err_cost, std_total_cost")
for i in range(num_role_options):
for j in range(num_bit_options):
rank = min_roles + i
bits = min_bits + j
enc_cost = mat_enc_cost[i,j]
err_cost = mat_err_cost[i,j]
std_enc_cost = mat_std_enc_cost[i,j]
std_err_cost = mat_std_err_cost[i,j]
total_cost = mat_total_cost[i,j]
total_std_cost = mat_total_std_cost[i,j]
print("%s, %s, (%s, %s, %s), (%s, %s, %s)" % (rank, bits,
enc_cost, err_cost, total_cost, std_enc_cost, std_err_cost, total_std_cost))
min_idx = mat_total_std_cost.argmin()
min_coord = np.unravel_index(min_idx, mat_total_std_cost.shape)
min_role_index, min_bit_index = min_coord
min_role_value = min_role_index + min_roles
min_bit_value = min_bit_index + min_bits
min_std_enc_cost = mat_std_enc_cost[min_coord]
min_std_err_cost = mat_std_err_cost[min_coord]
min_total_std_cost = mat_total_std_cost[min_coord]
print("%s, %s, (%s, %s, %s)" % (min_role_value, min_bit_value, min_std_enc_cost, min_std_err_cost, min_total_std_cost))
return mat_fctr_res[min_role_index][min_bit_index]
def make_sense(G, H):
""" Given graph G and node-role matrix H, returns a role-feature matrix K for sensemaking analyses of roles. """
features = [ 'betweenness', 'closeness', 'degree', 'diversity', 'eccentricity', 'pagerank', 'personalized_pagerank', 'strength' ]
feature_fns = [ getattr(G, f) for f in features ]
feature_matrix = [ func() for func in feature_fns ]
feature_matrix = np.matrix(feature_matrix).transpose()
#print(feature_matrix)
M = feature_matrix
for i in range(M.shape[1]):
M[:,i] = M[:,i] / norm(M[:,i])
K = complete_factor(H, M, h_on_left=True)
#print(K)
return K
def sense_residual_left_factor(W, H, M):
W = np.matrix(W).reshape((M.shape[0], H.shape[0]))
return norm(M - W*H)
def sense_residual_right_factor(K, H, M):
K = np.matrix(K).reshape((H.shape[1], M.shape[1]))
# print(M.shape,H.shape,K.shape)
return norm(M - H*K)
def complete_factor(H, M, h_on_left=True):
"""Given nonnegative matrix M and a nonnegative factor H of M, finds the other (nonnegative) factor of M.
H: known factor of matrix M.
M: product matrix.
h_on_left: boolean, true if H is the left factor of M, false if H is the right factor.
If H is left factor, find the matrix K such that HK=M. If H is the right factor, finds W such that WH=M
Result is an appropriately-sized matrix. """
if h_on_left:
shape = (H.shape[1], M.shape[1])
residual = sense_residual_right_factor
else:
shape = (M.shape[0], H.shape[0])
residual = sense_residual_left_factor
size = shape[0] * shape[1]
guess = np.random.rand(size)
bounds = [(0, None)] * size # (all elements of matrix must be nonnegative)
result = minimize(residual, guess, args=(H, M), method='L-BFGS-B', bounds=bounds)
x = result["x"]
G = np.matrix(x).reshape(shape)
return G
def main(G_path):
G = igraph.Graph.Read_GML(G_path)
return extract_rolx_roles(G)
# if len(argv) > 0:
# roles = role_id_num
#A = nx.adjacency_matrix(G).todense()
#Gi = igraph.Graph.Adjacency((A > 0).tolist())
#test = extract_rolx_roles(Gi, roles=roles)
### Define a distance based on these distribution over roles
# D_roleX = distance_nodes(test)
#return extract_rolx_roles(G, roles=roles)
# else:
# return H, K
def read_roleid(path_to_file):
role_id_fl = []
with open(path_to_file) as f:
contents = f.readlines()
for content in contents:
content = content.strip('\n')
role_id_fl.append(float(content))
role_id = []
for role in role_id_fl:
role_id.append(int(role))
return role_id
def cluster_graph(role_id, node_embeddings):
colors = role_id
nb_clust = len(np.unique(role_id))
pca = PCA(n_components=2)
trans_data = pca.fit_transform(StandardScaler().fit_transform(node_embeddings))
km = KMeans(n_clusters=nb_clust)
km.fit(trans_data)
labels_pred = km.labels_
######## Params for plotting
cmapx = plt.get_cmap('rainbow')
x = np.linspace(0, 1, nb_clust + 1)
col = [cmapx(xx) for xx in x]
markers = {0: '*', 1: '.', 2: ',', 3: 'o', 4: 'v', 5: '^', 6: '<', 7: '>', 8: 3, 9: 'd', 10: '+', 11: 'x',
12: 'D', 13: '|', 14: '_', 15: 4, 16: 0, 17: 1, 18: 2, 19: 6, 20: 7}
for c in np.unique(role_id):
indc = [i for i, x in enumerate(role_id) if x == c]
plt.scatter(trans_data[indc, 0], trans_data[indc, 1],
c=np.array(col)[list(np.array(labels_pred)[indc])],
marker=markers[c % len(markers)], s=300)
labels = role_id
for label, c, x, y in zip(labels, labels_pred, trans_data[:, 0], trans_data[:, 1]):
plt.annotate(label, xy=(x, y), xytext=(0, 0), textcoords='offset points')
plt.show()
return labels_pred, colors, trans_data, nb_clust
def unsupervised_evaluate(colors, labels_pred, trans_data, nb_clust):
ami = sk.metrics.adjusted_mutual_info_score(colors, labels_pred)
sil = sk.metrics.silhouette_score(trans_data, labels_pred, metric='euclidean')
ch = sk.metrics.calinski_harabasz_score(trans_data, labels_pred)
hom = sk.metrics.homogeneity_score(colors, labels_pred)
comp = sk.metrics.completeness_score(colors, labels_pred)
#print('Homogeneity \t Completeness \t AMI \t nb clusters \t CH \t Silhouette \n')
#print(str(hom) + '\t' + str(comp) + '\t' + str(ami) + '\t' + str(nb_clust) + '\t' + str(ch) + '\t' + str(sil))
return hom, comp, ami, nb_clust, ch, sil
def draw_pca(role_id, node_embeddings):
cmap = plt.get_cmap('hot')
x_range = np.linspace(0, 0.8, len(np.unique(role_id)))
coloring = {u: cmap(x_range[i]) for i, u in enumerate(np.unique(role_id))}
node_color = [coloring[role_id[i]] for i in range(len(role_id))]
pca = PCA(n_components=2)
node_embedded = StandardScaler().fit_transform(node_embeddings)
principalComponents = pca.fit_transform(node_embedded)
principalDf = pd.DataFrame(data=principalComponents,
columns=['principal component 1', 'principal component 2'])
principalDf['target'] = role_id
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel('Principal Component 1', fontsize=15)
ax.set_ylabel('Principal Component 2', fontsize=15)
ax.set_title('2 PCA Components', fontsize=20)
targets = np.unique(role_id)
for target in zip(targets):
color = coloring[target[0]]
indicesToKeep = principalDf['target'] == target
ax.scatter(principalDf.loc[indicesToKeep, 'principal component 1'],
principalDf.loc[indicesToKeep, 'principal component 2'],
s=50,
c=color)
ax.legend(targets)
ax.grid()
plt.show()
def average(lst):
return sum(lst) / len(lst)
if __name__ == "__main__":
# homs = []
# comps = []
# amis = []
# chs = []
# sils = []
# for i in range(10):
# role_id = read_roleid("new_graphs/Varied{}.roleid".format(i))
# # role_id_num = len(set(role_id))
# embeddings, attributes = main("new_graphs/Varied{}.gml".format(i))
# labels_pred, colors, trans_data, nb_clust = cluster_graph(role_id, embeddings)
# draw_pca(role_id, embeddings)
# hom, comp, ami, nb_clust, ch, sil = unsupervised_evaluate(colors, labels_pred, trans_data, nb_clust)
# print(hom, comp, ami, nb_clust, ch, sil)
# homs.append(hom)
# comps.append(comp)
# amis.append(ami)
# chs.append(ch)
# sils.append(sil)
# print('Homogeneity \t Completeness \t AMI \t nb clusters \t CH \t Silhouette \n')
# print(average(homs), average(comps), average(amis), nb_clust, average(chs), average(sils))
# Real world data
import statistics
acc = []
for i in range(4):
# G, labels = read_real_datasets("cornell")
G = nx.read_edgelist("realdatasets/wisconsin.edgelist")
nx.write_gml(G, "wisconsin.gml")
embeddings, attributes = main("wisconsin.gml")
node_embeddings = embeddings
node_labels = read_roleid("realdatasets/np_wisconsin.txt")
node_labels = torch.FloatTensor(node_labels)
input_dims = node_embeddings.shape
class_number = int(max(node_labels)) + 1
FNN = MLP(num_layers=5, input_dim=input_dims[1], hidden_dim=input_dims[1] // 2, output_dim=class_number)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(FNN.parameters())
dataset = NodeClassificationDataset(node_embeddings, node_labels)
split = DataSplit(dataset, shuffle=True)
train_loader, val_loader, test_loader = split.get_split(batch_size=64, num_workers=0)
# train_loader = DataLoader(dataset=dataset, batch_size=32, shuffle=True)
best = -float('inf')
for epoch in range(100):
for i, data in enumerate(train_loader, 0):
# data = data.to(device)
inputs, labels = data
inputs = inputs
labels = labels
y_pred = FNN(inputs.float())
loss = criterion(y_pred, labels)
print(epoch, i, loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
correct = 0
total = 0
for data in val_loader:
inputs, labels = data
inputs = inputs
labels = labels
outputs = FNN(inputs.float())
_, predicted = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
total += labels.size(0)
correct += torch.sum(predicted == labels)
if correct / total > best:
best = correct / total
torch.save(FNN.state_dict(), 'best_mlp.pkl')
print(str(epoch), correct / total)
with torch.no_grad():
FNN.load_state_dict(torch.load('best_mlp.pkl'))
correct = 0
total = 0
for data in test_loader:
inputs, labels = data
inputs = inputs
labels = labels
outputs = FNN(inputs.float())
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += torch.sum(predicted == labels)
print((correct / total).item())
acc.append((correct / total).item())
print("mean:")
print(statistics.mean(acc))
print("std:")
print(statistics.stdev(acc))
| en | 0.687766 | # -*- coding: utf-8 -*- Created on Wed May 3 09:57:52 2017 @author: Lab41: Github: Circulo/circulo/algorithms/rolx.py #### https://github.com/Lab41/Circulo/blob/master/circulo/algorithms/rolx.py Set of functions to compute the RolX featurization Top-level function. Extracts node-role matrix and sensemaking role-feature matrix as necessary. #print("V is a %s by %s matrix." % V.shape) #print("Node-role matrix is of dimensions %s by %s" % H.shape) #print(H) #print("Role-feature matrix is of dimensions %s by %s" % K.shape) #print(K) Top-level function. Extracts node-role matrix and sensemaking role-feature matrix as necessary. Inputs a matrux #print(H) #print(K) G: iGraph graph with annotations func: string containing function name n: int, recursion level Computes the given function recursively on each vertex Current precondition: already have run the computation for G, func, n-1. Computes recursive features of the graph G for the provided function of G, returning the matrix representing the nth level of the recursion. Checks if w is linearly dependent on the columns of A, this is done by solving the least squares problem (LSP) min || w - Ax ||_2^2 x and checking if || w - Ax_star || <= threshold, where x_star is the arg_minimizer of the LSP w: column vector A: matrix threshold: int Auxiliary function to calculate the degree of each element of G. Computes the number of edges in the ego network of the vertex v. Computes the ego network for all vertices v in G. Computes the outgoing edges from the ego network of the vertex v in G. #Summing over degree will doublecount every edge within the ego network Computes the number of outgoing ego network edges for every vertex in G. Constructs a vertex feature matrix using recursive feature generation, then uses least-squares solving to eliminate those exhibiting approximate linear dependence. Add the given column b to the matrix V, enlarging the matrix if necessary. # need to resize V Performs k-means quantization on the given matrix. Returns the encoded matrix and the number of bits needed for encoding it. Computes the Kullback-Leibler divergence of the two matrices A and B. Computes the length necessary to describe the given model with the given number of bits. Distribute the rows of the cost matrix normally to allow for accurate comparisons of error and description cost. # def standardize(M): # m_flat = np.asarray(M).reshape(-1) # mean = np.mean(m_flat) # stdev = np.std(m_flat) # m_flat = (m_flat - mean)/stdev # # return m_flat.reshape(M.shape) Obtains a nonnegative matrix factorization of the matrix V with num_roles intermediate roles. Uses grid search to find the optimal parameter number and encoding of the given matrix factorization. # Can't have more possible roles than features # Setup and run the factorization problem # print mat_total_cost Given graph G and node-role matrix H, returns a role-feature matrix K for sensemaking analyses of roles. #print(feature_matrix) #print(K) # print(M.shape,H.shape,K.shape) Given nonnegative matrix M and a nonnegative factor H of M, finds the other (nonnegative) factor of M. H: known factor of matrix M. M: product matrix. h_on_left: boolean, true if H is the left factor of M, false if H is the right factor. If H is left factor, find the matrix K such that HK=M. If H is the right factor, finds W such that WH=M Result is an appropriately-sized matrix. # (all elements of matrix must be nonnegative) # if len(argv) > 0: # roles = role_id_num #A = nx.adjacency_matrix(G).todense() #Gi = igraph.Graph.Adjacency((A > 0).tolist()) #test = extract_rolx_roles(Gi, roles=roles) ### Define a distance based on these distribution over roles # D_roleX = distance_nodes(test) #return extract_rolx_roles(G, roles=roles) # else: # return H, K ######## Params for plotting #print('Homogeneity \t Completeness \t AMI \t nb clusters \t CH \t Silhouette \n') #print(str(hom) + '\t' + str(comp) + '\t' + str(ami) + '\t' + str(nb_clust) + '\t' + str(ch) + '\t' + str(sil)) # homs = [] # comps = [] # amis = [] # chs = [] # sils = [] # for i in range(10): # role_id = read_roleid("new_graphs/Varied{}.roleid".format(i)) # # role_id_num = len(set(role_id)) # embeddings, attributes = main("new_graphs/Varied{}.gml".format(i)) # labels_pred, colors, trans_data, nb_clust = cluster_graph(role_id, embeddings) # draw_pca(role_id, embeddings) # hom, comp, ami, nb_clust, ch, sil = unsupervised_evaluate(colors, labels_pred, trans_data, nb_clust) # print(hom, comp, ami, nb_clust, ch, sil) # homs.append(hom) # comps.append(comp) # amis.append(ami) # chs.append(ch) # sils.append(sil) # print('Homogeneity \t Completeness \t AMI \t nb clusters \t CH \t Silhouette \n') # print(average(homs), average(comps), average(amis), nb_clust, average(chs), average(sils)) # Real world data # G, labels = read_real_datasets("cornell") # train_loader = DataLoader(dataset=dataset, batch_size=32, shuffle=True) # data = data.to(device) | 2.544985 | 3 |
training_api/application/data_preparation/models/column_name.py | hadikoub/BMW-TensorFlow-Training-GUI | 1 | 6631636 | from enum import Enum
# noinspection SpellCheckingInspection
class ColumnName(Enum):
"""
A class Enum used to get column names inside the csv file
"""
file_name: str = "filename"
width: str = "width"
height: str = "height"
class_name: str = "class"
xmin: str = "xmin"
xmax: str = "xmax"
ymin: str = "ymin"
ymax: str = "ymax"
| from enum import Enum
# noinspection SpellCheckingInspection
class ColumnName(Enum):
"""
A class Enum used to get column names inside the csv file
"""
file_name: str = "filename"
width: str = "width"
height: str = "height"
class_name: str = "class"
xmin: str = "xmin"
xmax: str = "xmax"
ymin: str = "ymin"
ymax: str = "ymax"
| en | 0.543541 | # noinspection SpellCheckingInspection A class Enum used to get column names inside the csv file | 3.761128 | 4 |
hplip-3.20.3/ui4/devicesetupdialog_base.py | Deril-Pana/wikiBlackcoinNL | 0 | 6631637 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui4/devicesetupdialog_base.ui'
#
# Created: Mon May 4 14:30:32 2009
# by: PyQt4 UI code generator 4.4.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(700, 500)
self.gridlayout = QtGui.QGridLayout(Dialog)
self.gridlayout.setObjectName("gridlayout")
self.label = QtGui.QLabel(Dialog)
font = QtGui.QFont()
font.setPointSize(16)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridlayout.addWidget(self.label, 0, 0, 1, 1)
self.line = QtGui.QFrame(Dialog)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName("line")
self.gridlayout.addWidget(self.line, 1, 0, 1, 3)
self.DeviceComboBox = DeviceUriComboBox(Dialog)
self.DeviceComboBox.setObjectName("DeviceComboBox")
self.gridlayout.addWidget(self.DeviceComboBox, 2, 0, 1, 3)
self.TabWidget = QtGui.QTabWidget(Dialog)
self.TabWidget.setObjectName("TabWidget")
self.PowerSettingsTab = QtGui.QWidget()
self.PowerSettingsTab.setObjectName("PowerSettingsTab")
self.gridlayout1 = QtGui.QGridLayout(self.PowerSettingsTab)
self.gridlayout1.setObjectName("gridlayout1")
self.groupBox = QtGui.QGroupBox(self.PowerSettingsTab)
self.groupBox.setObjectName("groupBox")
self.gridlayout2 = QtGui.QGridLayout(self.groupBox)
self.gridlayout2.setObjectName("gridlayout2")
self.OnRadioButton = QtGui.QRadioButton(self.groupBox)
self.OnRadioButton.setObjectName("OnRadioButton")
self.gridlayout2.addWidget(self.OnRadioButton, 0, 0, 1, 2)
self.hboxlayout = QtGui.QHBoxLayout()
self.hboxlayout.setObjectName("hboxlayout")
self.OffRadioButton = QtGui.QRadioButton(self.groupBox)
self.OffRadioButton.setEnabled(True)
self.OffRadioButton.setObjectName("OffRadioButton")
self.hboxlayout.addWidget(self.OffRadioButton)
self.DurationComboBox = QtGui.QComboBox(self.groupBox)
self.DurationComboBox.setEnabled(False)
self.DurationComboBox.setObjectName("DurationComboBox")
self.hboxlayout.addWidget(self.DurationComboBox)
self.gridlayout2.addLayout(self.hboxlayout, 1, 0, 1, 1)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridlayout2.addItem(spacerItem, 1, 1, 1, 1)
self.gridlayout1.addWidget(self.groupBox, 0, 0, 1, 1)
spacerItem1 = QtGui.QSpacerItem(282, 51, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridlayout1.addItem(spacerItem1, 1, 0, 1, 1)
self.TabWidget.addTab(self.PowerSettingsTab, "")
self.gridlayout.addWidget(self.TabWidget, 3, 0, 1, 3)
spacerItem2 = QtGui.QSpacerItem(510, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
self.gridlayout.addItem(spacerItem2, 4, 0, 1, 1)
spacerItem3 = QtGui.QSpacerItem(361, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridlayout.addItem(spacerItem3, 5, 0, 1, 1)
self.CancelButton = QtGui.QPushButton(Dialog)
self.CancelButton.setObjectName("CancelButton")
self.gridlayout.addWidget(self.CancelButton, 5, 2, 1, 1)
self.retranslateUi(Dialog)
self.TabWidget.setCurrentIndex(0)
QtCore.QObject.connect(self.OffRadioButton, QtCore.SIGNAL("toggled(bool)"), self.DurationComboBox.setEnabled)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "HP Device Manager - Device Setup", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Dialog", "Device Setup", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("Dialog", "Automatic Power Off", None, QtGui.QApplication.UnicodeUTF8))
self.OnRadioButton.setText(QtGui.QApplication.translate("Dialog", "Always leave printer on", None, QtGui.QApplication.UnicodeUTF8))
self.OffRadioButton.setText(QtGui.QApplication.translate("Dialog", "Automatically turn printer off after:", None, QtGui.QApplication.UnicodeUTF8))
self.TabWidget.setTabText(self.TabWidget.indexOf(self.PowerSettingsTab), QtGui.QApplication.translate("Dialog", "Power Settings", None, QtGui.QApplication.UnicodeUTF8))
self.CancelButton.setText(QtGui.QApplication.translate("Dialog", "Close", None, QtGui.QApplication.UnicodeUTF8))
from .deviceuricombobox import DeviceUriComboBox
| # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui4/devicesetupdialog_base.ui'
#
# Created: Mon May 4 14:30:32 2009
# by: PyQt4 UI code generator 4.4.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(700, 500)
self.gridlayout = QtGui.QGridLayout(Dialog)
self.gridlayout.setObjectName("gridlayout")
self.label = QtGui.QLabel(Dialog)
font = QtGui.QFont()
font.setPointSize(16)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridlayout.addWidget(self.label, 0, 0, 1, 1)
self.line = QtGui.QFrame(Dialog)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName("line")
self.gridlayout.addWidget(self.line, 1, 0, 1, 3)
self.DeviceComboBox = DeviceUriComboBox(Dialog)
self.DeviceComboBox.setObjectName("DeviceComboBox")
self.gridlayout.addWidget(self.DeviceComboBox, 2, 0, 1, 3)
self.TabWidget = QtGui.QTabWidget(Dialog)
self.TabWidget.setObjectName("TabWidget")
self.PowerSettingsTab = QtGui.QWidget()
self.PowerSettingsTab.setObjectName("PowerSettingsTab")
self.gridlayout1 = QtGui.QGridLayout(self.PowerSettingsTab)
self.gridlayout1.setObjectName("gridlayout1")
self.groupBox = QtGui.QGroupBox(self.PowerSettingsTab)
self.groupBox.setObjectName("groupBox")
self.gridlayout2 = QtGui.QGridLayout(self.groupBox)
self.gridlayout2.setObjectName("gridlayout2")
self.OnRadioButton = QtGui.QRadioButton(self.groupBox)
self.OnRadioButton.setObjectName("OnRadioButton")
self.gridlayout2.addWidget(self.OnRadioButton, 0, 0, 1, 2)
self.hboxlayout = QtGui.QHBoxLayout()
self.hboxlayout.setObjectName("hboxlayout")
self.OffRadioButton = QtGui.QRadioButton(self.groupBox)
self.OffRadioButton.setEnabled(True)
self.OffRadioButton.setObjectName("OffRadioButton")
self.hboxlayout.addWidget(self.OffRadioButton)
self.DurationComboBox = QtGui.QComboBox(self.groupBox)
self.DurationComboBox.setEnabled(False)
self.DurationComboBox.setObjectName("DurationComboBox")
self.hboxlayout.addWidget(self.DurationComboBox)
self.gridlayout2.addLayout(self.hboxlayout, 1, 0, 1, 1)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridlayout2.addItem(spacerItem, 1, 1, 1, 1)
self.gridlayout1.addWidget(self.groupBox, 0, 0, 1, 1)
spacerItem1 = QtGui.QSpacerItem(282, 51, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridlayout1.addItem(spacerItem1, 1, 0, 1, 1)
self.TabWidget.addTab(self.PowerSettingsTab, "")
self.gridlayout.addWidget(self.TabWidget, 3, 0, 1, 3)
spacerItem2 = QtGui.QSpacerItem(510, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
self.gridlayout.addItem(spacerItem2, 4, 0, 1, 1)
spacerItem3 = QtGui.QSpacerItem(361, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridlayout.addItem(spacerItem3, 5, 0, 1, 1)
self.CancelButton = QtGui.QPushButton(Dialog)
self.CancelButton.setObjectName("CancelButton")
self.gridlayout.addWidget(self.CancelButton, 5, 2, 1, 1)
self.retranslateUi(Dialog)
self.TabWidget.setCurrentIndex(0)
QtCore.QObject.connect(self.OffRadioButton, QtCore.SIGNAL("toggled(bool)"), self.DurationComboBox.setEnabled)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "HP Device Manager - Device Setup", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Dialog", "Device Setup", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("Dialog", "Automatic Power Off", None, QtGui.QApplication.UnicodeUTF8))
self.OnRadioButton.setText(QtGui.QApplication.translate("Dialog", "Always leave printer on", None, QtGui.QApplication.UnicodeUTF8))
self.OffRadioButton.setText(QtGui.QApplication.translate("Dialog", "Automatically turn printer off after:", None, QtGui.QApplication.UnicodeUTF8))
self.TabWidget.setTabText(self.TabWidget.indexOf(self.PowerSettingsTab), QtGui.QApplication.translate("Dialog", "Power Settings", None, QtGui.QApplication.UnicodeUTF8))
self.CancelButton.setText(QtGui.QApplication.translate("Dialog", "Close", None, QtGui.QApplication.UnicodeUTF8))
from .deviceuricombobox import DeviceUriComboBox
| en | 0.776503 | # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'ui4/devicesetupdialog_base.ui' # # Created: Mon May 4 14:30:32 2009 # by: PyQt4 UI code generator 4.4.4 # # WARNING! All changes made in this file will be lost! | 1.797499 | 2 |
cloudkitty-9.0.0/cloudkitty/common/db/models.py | scottwedge/OpenStack-Stein | 0 | 6631638 | # -*- coding: utf-8 -*-
# Copyright 2016 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: <NAME>
#
from sqlalchemy.ext import declarative
NAMING_CONVENTION = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"}
def get_base():
base = declarative.declarative_base()
base.metadata.naming_convention = NAMING_CONVENTION
return base
| # -*- coding: utf-8 -*-
# Copyright 2016 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: <NAME>
#
from sqlalchemy.ext import declarative
NAMING_CONVENTION = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"}
def get_base():
base = declarative.declarative_base()
base.metadata.naming_convention = NAMING_CONVENTION
return base
| en | 0.827342 | # -*- coding: utf-8 -*- # Copyright 2016 Objectif Libre # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: <NAME> # | 2.126645 | 2 |
prody/routines/__init__.py | gokceneraslan/ProDy | 0 | 6631639 | # ProDy: A Python Package for Protein Dynamics Analysis
#
# Copyright (C) 2010-2012 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""This module defines ProDy routines used as command line programs."""
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2010-2012 <NAME>'
import sys
import argparse
from actions import *
PRODY_COMMANDS = ['anm', 'gnm', 'pca', 'eda', 'align', 'blast', 'biomol',
'catdcd', 'fetch', 'select', ]
__all__ = ['main']
prody_parser = argparse.ArgumentParser(
description="ProDy: A Python Package for Protein Dynamics Analysis",
epilog="See 'prody <command> -h' for more information on a specific "
"command."
)
prody_parser.add_argument('-c', '--cite',
help="print citation info and exit",
action=ProDyCitation, nargs=0)
prody_parser.add_argument('-v', '--version',
help="print ProDy version and exit",
action=ProDyVersion, nargs=0)
prody_commands = prody_parser.add_subparsers(
title='subcommands')
for cmd in PRODY_COMMANDS:
pkg = __import__('prody_' + cmd, globals(), locals(), [], -1)
pkg.addCommand(prody_commands)
def prody_main():
if len(sys.argv) == 1:
prody_parser.print_help()
else:
args = prody_parser.parse_args()
args.func(args)
if __name__ == '__main__':
prody_main()
| # ProDy: A Python Package for Protein Dynamics Analysis
#
# Copyright (C) 2010-2012 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""This module defines ProDy routines used as command line programs."""
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2010-2012 <NAME>'
import sys
import argparse
from actions import *
PRODY_COMMANDS = ['anm', 'gnm', 'pca', 'eda', 'align', 'blast', 'biomol',
'catdcd', 'fetch', 'select', ]
__all__ = ['main']
prody_parser = argparse.ArgumentParser(
description="ProDy: A Python Package for Protein Dynamics Analysis",
epilog="See 'prody <command> -h' for more information on a specific "
"command."
)
prody_parser.add_argument('-c', '--cite',
help="print citation info and exit",
action=ProDyCitation, nargs=0)
prody_parser.add_argument('-v', '--version',
help="print ProDy version and exit",
action=ProDyVersion, nargs=0)
prody_commands = prody_parser.add_subparsers(
title='subcommands')
for cmd in PRODY_COMMANDS:
pkg = __import__('prody_' + cmd, globals(), locals(), [], -1)
pkg.addCommand(prody_commands)
def prody_main():
if len(sys.argv) == 1:
prody_parser.print_help()
else:
args = prody_parser.parse_args()
args.func(args)
if __name__ == '__main__':
prody_main()
| en | 0.846578 | # ProDy: A Python Package for Protein Dynamics Analysis # # Copyright (C) 2010-2012 <NAME> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> This module defines ProDy routines used as command line programs. | 2.139619 | 2 |
orange3/Orange/data/table.py | rgschmitz1/BioDepot-workflow-builder | 54 | 6631640 | <filename>orange3/Orange/data/table.py
import operator
import os
import zlib
from collections import MutableSequence, Iterable, Sequence, Sized
from functools import reduce
from itertools import chain
from numbers import Real, Integral
from threading import Lock, RLock
import bottleneck as bn
import numpy as np
from scipy import sparse as sp
import Orange.data # import for io.py
from Orange.data import (
_contingency,
_valuecount,
Domain,
Variable,
Storage,
StringVariable,
Unknown,
Value,
Instance,
ContinuousVariable,
DiscreteVariable,
MISSING_VALUES,
)
from Orange.data.util import (
SharedComputeValue,
vstack,
hstack,
assure_array_dense,
assure_array_sparse,
assure_column_dense,
assure_column_sparse,
)
from Orange.statistics.util import (
bincount,
countnans,
contingency,
stats as fast_stats,
sparse_has_implicit_zeros,
sparse_count_implicit_zeros,
sparse_implicit_zero_weights,
)
from Orange.util import flatten
__all__ = ["dataset_dirs", "get_sample_datasets_dir", "RowInstance", "Table"]
def get_sample_datasets_dir():
orange_data_table = os.path.dirname(__file__)
dataset_dir = os.path.join(orange_data_table, "..", "datasets")
return os.path.realpath(dataset_dir)
dataset_dirs = ["", get_sample_datasets_dir()]
"""Domain conversion cache used in Table.from_table. It is global so that
chaining of domain conversions also works with caching even with descendants
of Table."""
_conversion_cache = None
_conversion_cache_lock = RLock()
class RowInstance(Instance):
sparse_x = None
sparse_y = None
sparse_metas = None
_weight = None
def __init__(self, table, row_index):
"""
Construct a data instance representing the given row of the table.
"""
self.table = table
self._domain = table.domain
self.row_index = row_index
self.id = table.ids[row_index]
self._x = table.X[row_index]
if sp.issparse(self._x):
self.sparse_x = sp.csr_matrix(self._x)
self._x = np.asarray(self._x.todense())[0]
self._y = table._Y[row_index]
if sp.issparse(self._y):
self.sparse_y = sp.csr_matrix(self._y)
self._y = np.asarray(self._y.todense())[0]
self._metas = table.metas[row_index]
if sp.issparse(self._metas):
self.sparse_metas = sp.csr_matrix(self._metas)
self._metas = np.asarray(self._metas.todense())[0]
@property
def weight(self):
if not self.table.has_weights():
return 1
return self.table.W[self.row_index]
@weight.setter
def weight(self, weight):
if not self.table.has_weights():
self.table.set_weights()
self.table.W[self.row_index] = weight
def set_class(self, value):
self._check_single_class()
if not isinstance(value, Real):
value = self.table.domain.class_var.to_val(value)
self._y[0] = value
if self.sparse_y:
self.table._Y[self.row_index, 0] = value
def __setitem__(self, key, value):
if not isinstance(key, Integral):
key = self._domain.index(key)
if isinstance(value, str):
var = self._domain[key]
value = var.to_val(value)
if key >= 0:
if not isinstance(value, Real):
raise TypeError(
"Expected primitive value, got '%s'" % type(value).__name__
)
if key < len(self._x):
self._x[key] = value
if self.sparse_x is not None:
self.table.X[self.row_index, key] = value
else:
self._y[key - len(self._x)] = value
if self.sparse_y is not None:
self.table._Y[self.row_index, key - len(self._x)] = value
else:
self._metas[-1 - key] = value
if self.sparse_metas:
self.table.metas[self.row_index, -1 - key] = value
def _str(self, limit):
def sp_values(matrix, variables):
if not sp.issparse(matrix):
if matrix.ndim == 1:
matrix = matrix[:, np.newaxis]
return Instance.str_values(matrix[row], variables, limit)
row_entries, idx = [], 0
while idx < len(variables):
# Make sure to stop printing variables if we limit the output
if limit and len(row_entries) >= 5:
break
var = variables[idx]
if var.is_discrete or matrix[row, idx]:
row_entries.append(
"%s=%s" % (var.name, var.str_val(matrix[row, idx]))
)
idx += 1
s = ", ".join(row_entries)
if limit and idx < len(variables):
s += ", ..."
return s
table = self.table
domain = table.domain
row = self.row_index
s = "[" + sp_values(table.X, domain.attributes)
if domain.class_vars:
s += " | " + sp_values(table.Y, domain.class_vars)
s += "]"
if self._domain.metas:
s += " {" + sp_values(table.metas, domain.metas) + "}"
return s
def __str__(self):
return self._str(False)
def __repr__(self):
return self._str(True)
class Columns:
def __init__(self, domain):
for v in chain(domain.variables, domain.metas):
setattr(self, v.name.replace(" ", "_"), v)
# noinspection PyPep8Naming
class Table(MutableSequence, Storage):
__file__ = None
name = "untitled"
@property
def columns(self):
"""
A class whose attributes contain attribute descriptors for columns.
For a table `table`, setting `c = table.columns` will allow accessing
the table's variables with, for instance `c.gender`, `c.age` ets.
Spaces are replaced with underscores.
"""
return Columns(self.domain)
_next_instance_id = 0
_next_instance_lock = Lock()
@property
def Y(self):
if self._Y.shape[1] == 1:
return self._Y[:, 0]
return self._Y
@Y.setter
def Y(self, value):
if len(value.shape) == 1:
value = value[:, None]
if sp.issparse(value) and len(self) != value.shape[0]:
value = value.T
self._Y = value
def __new__(cls, *args, **kwargs):
if not args and not kwargs:
return super().__new__(cls)
if "filename" in kwargs:
args = [kwargs.pop("filename")]
if not args:
raise TypeError("Table takes at least 1 positional argument (0 given))")
if isinstance(args[0], str):
if args[0].startswith("https://") or args[0].startswith("http://"):
return cls.from_url(args[0], **kwargs)
else:
return cls.from_file(args[0])
elif isinstance(args[0], Table):
return cls.from_table(args[0].domain, args[0])
elif isinstance(args[0], Domain):
domain, args = args[0], args[1:]
if not args:
return cls.from_domain(domain, **kwargs)
if isinstance(args[0], Table):
return cls.from_table(domain, *args)
elif isinstance(args[0], list):
return cls.from_list(domain, *args)
else:
domain = None
return cls.from_numpy(domain, *args, **kwargs)
def __init__(self, *args, **kwargs):
# So subclasses can expect to call super without breakage; noop
pass
@classmethod
def from_domain(cls, domain, n_rows=0, weights=False):
"""
Construct a new `Table` with the given number of rows for the given
domain. The optional vector of weights is initialized to 1's.
:param domain: domain for the `Table`
:type domain: Orange.data.Domain
:param n_rows: number of rows in the new table
:type n_rows: int
:param weights: indicates whether to construct a vector of weights
:type weights: bool
:return: a new table
:rtype: Orange.data.Table
"""
self = cls()
self.domain = domain
self.n_rows = n_rows
self.X = np.zeros((n_rows, len(domain.attributes)))
self.Y = np.zeros((n_rows, len(domain.class_vars)))
if weights:
self.W = np.ones(n_rows)
else:
self.W = np.empty((n_rows, 0))
self.metas = np.empty((n_rows, len(self.domain.metas)), object)
cls._init_ids(self)
self.attributes = {}
return self
@classmethod
def from_table(cls, domain, source, row_indices=...):
"""
Create a new table from selected columns and/or rows of an existing
one. The columns are chosen using a domain. The domain may also include
variables that do not appear in the source table; they are computed
from source variables if possible.
The resulting data may be a view or a copy of the existing data.
:param domain: the domain for the new table
:type domain: Orange.data.Domain
:param source: the source table
:type source: Orange.data.Table
:param row_indices: indices of the rows to include
:type row_indices: a slice or a sequence
:return: a new table
:rtype: Orange.data.Table
"""
global _conversion_cache
def get_columns(
row_indices, src_cols, n_rows, dtype=np.float64, is_sparse=False
):
if not len(src_cols):
if is_sparse:
return sp.csr_matrix((n_rows, 0), dtype=source.X.dtype)
else:
return np.zeros((n_rows, 0), dtype=source.X.dtype)
# match density for subarrays
match_density = assure_array_sparse if is_sparse else assure_array_dense
n_src_attrs = len(source.domain.attributes)
if all(isinstance(x, Integral) and 0 <= x < n_src_attrs for x in src_cols):
return match_density(_subarray(source.X, row_indices, src_cols))
if all(isinstance(x, Integral) and x < 0 for x in src_cols):
arr = match_density(
_subarray(source.metas, row_indices, [-1 - x for x in src_cols])
)
if arr.dtype != dtype:
return arr.astype(dtype)
return arr
if all(isinstance(x, Integral) and x >= n_src_attrs for x in src_cols):
return match_density(
_subarray(
source._Y, row_indices, [x - n_src_attrs for x in src_cols]
)
)
# initialize final array & set `match_density` for columns
if is_sparse:
a = sp.dok_matrix((n_rows, len(src_cols)), dtype=dtype)
match_density = assure_column_sparse
else:
a = np.empty((n_rows, len(src_cols)), dtype=dtype)
match_density = assure_column_dense
shared_cache = _conversion_cache
for i, col in enumerate(src_cols):
if col is None:
a[:, i] = Unknown
elif not isinstance(col, Integral):
if isinstance(col, SharedComputeValue):
if (id(col.compute_shared), id(source)) not in shared_cache:
shared_cache[
id(col.compute_shared), id(source)
] = col.compute_shared(source)
shared = shared_cache[id(col.compute_shared), id(source)]
if row_indices is not ...:
a[:, i] = match_density(
col(source, shared_data=shared)[row_indices]
)
else:
a[:, i] = match_density(col(source, shared_data=shared))
else:
if row_indices is not ...:
a[:, i] = match_density(col(source)[row_indices])
else:
a[:, i] = match_density(col(source))
elif col < 0:
a[:, i] = match_density(source.metas[row_indices, -1 - col])
elif col < n_src_attrs:
a[:, i] = match_density(source.X[row_indices, col])
else:
a[:, i] = match_density(source._Y[row_indices, col - n_src_attrs])
if is_sparse:
a = a.tocsr()
return a
with _conversion_cache_lock:
new_cache = _conversion_cache is None
try:
if new_cache:
_conversion_cache = {}
else:
cached = _conversion_cache.get((id(domain), id(source)))
if cached:
return cached
if domain == source.domain:
table = cls.from_table_rows(source, row_indices)
# assure resulting domain is the instance passed on input
table.domain = domain
# since sparse flags are not considered when checking for
# domain equality, fix manually.
table = assure_domain_conversion_sparsity(table, source)
return table
if isinstance(row_indices, slice):
start, stop, stride = row_indices.indices(source.X.shape[0])
n_rows = (stop - start) // stride
if n_rows < 0:
n_rows = 0
elif row_indices is ...:
n_rows = len(source)
else:
n_rows = len(row_indices)
self = cls()
self.domain = domain
conversion = domain.get_conversion(source.domain)
self.X = get_columns(
row_indices,
conversion.attributes,
n_rows,
is_sparse=conversion.sparse_X,
)
if self.X.ndim == 1:
self.X = self.X.reshape(-1, len(self.domain.attributes))
self.Y = get_columns(
row_indices,
conversion.class_vars,
n_rows,
is_sparse=conversion.sparse_Y,
)
dtype = np.float64
if any(isinstance(var, StringVariable) for var in domain.metas):
dtype = np.object
self.metas = get_columns(
row_indices,
conversion.metas,
n_rows,
dtype,
is_sparse=conversion.sparse_metas,
)
if self.metas.ndim == 1:
self.metas = self.metas.reshape(-1, len(self.domain.metas))
if source.has_weights():
self.W = source.W[row_indices]
else:
self.W = np.empty((n_rows, 0))
self.name = getattr(source, "name", "")
if hasattr(source, "ids"):
self.ids = source.ids[row_indices]
else:
cls._init_ids(self)
self.attributes = getattr(source, "attributes", {})
_conversion_cache[(id(domain), id(source))] = self
return self
finally:
if new_cache:
_conversion_cache = None
def transform(self, domain):
"""
Construct a table with a different domain.
The new table keeps the row ids and other information. If the table
is a subclass of :obj:`Table`, the resulting table will be of the same
type.
In a typical scenario, an existing table is augmented with a new
column by ::
domain = Domain(old_domain.attributes + [new_attribute],
old_domain.class_vars,
old_domain.metas)
table = data.transform(domain)
table[:, new_attribute] = new_column
Args:
domain (Domain): new domain
Returns:
A new table
"""
return type(self).from_table(domain, self)
@classmethod
def from_table_rows(cls, source, row_indices):
"""
Construct a new table by selecting rows from the source table.
:param source: an existing table
:type source: Orange.data.Table
:param row_indices: indices of the rows to include
:type row_indices: a slice or a sequence
:return: a new table
:rtype: Orange.data.Table
"""
self = cls()
self.domain = source.domain
self.X = source.X[row_indices]
if self.X.ndim == 1:
self.X = self.X.reshape(-1, len(self.domain.attributes))
self.Y = source._Y[row_indices]
self.metas = source.metas[row_indices]
if self.metas.ndim == 1:
self.metas = self.metas.reshape(-1, len(self.domain.metas))
self.W = source.W[row_indices]
self.name = getattr(source, "name", "")
self.ids = np.array(source.ids[row_indices])
self.attributes = getattr(source, "attributes", {})
return self
@classmethod
def from_numpy(cls, domain, X, Y=None, metas=None, W=None):
"""
Construct a table from numpy arrays with the given domain. The number
of variables in the domain must match the number of columns in the
corresponding arrays. All arrays must have the same number of rows.
Arrays may be of different numpy types, and may be dense or sparse.
:param domain: the domain for the new table
:type domain: Orange.data.Domain
:param X: array with attribute values
:type X: np.array
:param Y: array with class values
:type Y: np.array
:param metas: array with meta attributes
:type metas: np.array
:param W: array with weights
:type W: np.array
:return:
"""
X, Y, W = _check_arrays(X, Y, W, dtype="float64")
metas, = _check_arrays(metas, dtype=object)
if Y is not None and Y.ndim == 1:
Y = Y.reshape(Y.shape[0], 1)
if domain is None:
domain = Domain.from_numpy(X, Y, metas)
if Y is None:
if sp.issparse(X):
Y = np.empty((X.shape[0], 0), dtype=np.float64)
else:
Y = X[:, len(domain.attributes) :]
X = X[:, : len(domain.attributes)]
if metas is None:
metas = np.empty((X.shape[0], 0), object)
if W is None or W.size == 0:
W = np.empty((X.shape[0], 0))
else:
W = W.reshape(W.size)
if X.shape[1] != len(domain.attributes):
raise ValueError(
"Invalid number of variable columns ({} != {})".format(
X.shape[1], len(domain.attributes)
)
)
if Y.shape[1] != len(domain.class_vars):
raise ValueError(
"Invalid number of class columns ({} != {})".format(
Y.shape[1], len(domain.class_vars)
)
)
if metas.shape[1] != len(domain.metas):
raise ValueError(
"Invalid number of meta attribute columns ({} != {})".format(
metas.shape[1], len(domain.metas)
)
)
if not X.shape[0] == Y.shape[0] == metas.shape[0] == W.shape[0]:
raise ValueError("Parts of data contain different numbers of rows.")
self = cls()
self.domain = domain
self.X = X
self.Y = Y
self.metas = metas
self.W = W
self.n_rows = self.X.shape[0]
cls._init_ids(self)
self.attributes = {}
return self
@classmethod
def from_list(cls, domain, rows, weights=None):
if weights is not None and len(rows) != len(weights):
raise ValueError("mismatching number of instances and weights")
self = cls.from_domain(domain, len(rows), weights is not None)
attrs, classes = domain.attributes, domain.class_vars
metas = domain.metas
nattrs, ncls = len(domain.attributes), len(domain.class_vars)
for i, row in enumerate(rows):
if isinstance(row, Instance):
row = row.list
for j, (var, val) in enumerate(zip(attrs, row)):
self.X[i, j] = var.to_val(val)
for j, (var, val) in enumerate(zip(classes, row[nattrs:])):
self._Y[i, j] = var.to_val(val)
for j, (var, val) in enumerate(zip(metas, row[nattrs + ncls :])):
self.metas[i, j] = var.to_val(val)
if weights is not None:
self.W = np.array(weights)
return self
@classmethod
def _init_ids(cls, obj):
with cls._next_instance_lock:
obj.ids = np.array(
range(cls._next_instance_id, cls._next_instance_id + obj.X.shape[0])
)
cls._next_instance_id += obj.X.shape[0]
@classmethod
def new_id(cls):
with cls._next_instance_lock:
id = cls._next_instance_id
cls._next_instance_id += 1
return id
def save(self, filename):
"""
Save a data table to a file. The path can be absolute or relative.
:param filename: File name
:type filename: str
"""
ext = os.path.splitext(filename)[1]
from Orange.data.io import FileFormat
writer = FileFormat.writers.get(ext)
if not writer:
desc = FileFormat.names.get(ext)
if desc:
raise IOError("Writing of {}s is not supported".format(desc.lower()))
else:
raise IOError("Unknown file name extension.")
writer.write_file(filename, self)
@classmethod
def from_file(cls, filename, sheet=None):
"""
Read a data table from a file. The path can be absolute or relative.
:param filename: File name
:type filename: str
:param sheet: Sheet in a file (optional)
:type sheet: str
:return: a new data table
:rtype: Orange.data.Table
"""
from Orange.data.io import FileFormat
absolute_filename = FileFormat.locate(filename, dataset_dirs)
reader = FileFormat.get_reader(absolute_filename)
reader.select_sheet(sheet)
data = reader.read()
# Readers return plain table. Make sure to cast it to appropriate
# (subclass) type
if cls != data.__class__:
data = cls(data)
# no need to call _init_ids as fuctions from .io already
# construct a table with .ids
data.__file__ = absolute_filename
return data
@classmethod
def from_url(cls, url):
from Orange.data.io import UrlReader
reader = UrlReader(url)
data = reader.read()
if cls != data.__class__:
data = cls(data)
return data
# Helper function for __setitem__ and insert:
# Set the row of table data matrices
# noinspection PyProtectedMember
def _set_row(self, example, row):
domain = self.domain
if isinstance(example, Instance):
if example.domain == domain:
if isinstance(example, RowInstance):
self.X[row] = example._x
self._Y[row] = example._y
else:
self.X[row] = example._x
self._Y[row] = example._y
self.metas[row] = example._metas
return
self.X[row], self._Y[row], self.metas[row] = self.domain.convert(example)
try:
self.ids[row] = example.id
except:
with type(self)._next_instance_lock:
self.ids[row] = type(self)._next_instance_id
type(self)._next_instance_id += 1
else:
self.X[row] = [
var.to_val(val) for var, val in zip(domain.attributes, example)
]
self._Y[row] = [
var.to_val(val)
for var, val in zip(
domain.class_vars, example[len(domain.attributes) :]
)
]
self.metas[row] = np.array(
[var.Unknown for var in domain.metas], dtype=object
)
def _check_all_dense(self):
return all(
x in (Storage.DENSE, Storage.MISSING)
for x in (self.X_density(), self.Y_density(), self.metas_density())
)
# A helper function for extend and insert
# Resize X, Y, metas and W.
def _resize_all(self, new_length):
old_length = self.X.shape[0]
if old_length == new_length:
return
if not self._check_all_dense():
raise ValueError("Tables with sparse data cannot be resized")
try:
self.X.resize(new_length, self.X.shape[1])
self._Y.resize(new_length, self._Y.shape[1])
self.metas.resize(new_length, self.metas.shape[1])
if self.W.ndim == 2:
self.W.resize((new_length, 0))
else:
self.W.resize(new_length)
self.ids.resize(new_length)
except Exception:
if self.X.shape[0] == new_length:
self.X.resize(old_length, self.X.shape[1])
if self._Y.shape[0] == new_length:
self._Y.resize(old_length, self._Y.shape[1])
if self.metas.shape[0] == new_length:
self.metas.resize(old_length, self.metas.shape[1])
if self.W.shape[0] == new_length:
if self.W.ndim == 2:
self.W.resize((old_length, 0))
else:
self.W.resize(old_length)
if self.ids.shape[0] == new_length:
self.ids.resize(old_length)
raise
def __getitem__(self, key):
if isinstance(key, Integral):
return RowInstance(self, key)
if not isinstance(key, tuple):
return self.from_table_rows(self, key)
if len(key) != 2:
raise IndexError("Table indices must be one- or two-dimensional")
row_idx, col_idx = key
if isinstance(row_idx, Integral):
if isinstance(col_idx, (str, Integral, Variable)):
col_idx = self.domain.index(col_idx)
var = self.domain[col_idx]
if 0 <= col_idx < len(self.domain.attributes):
return Value(var, self.X[row_idx, col_idx])
elif col_idx >= len(self.domain.attributes):
return Value(
var, self._Y[row_idx, col_idx - len(self.domain.attributes)]
)
elif col_idx < 0:
return Value(var, self.metas[row_idx, -1 - col_idx])
else:
row_idx = [row_idx]
# multiple rows OR single row but multiple columns:
# construct a new table
attributes, col_indices = self.domain._compute_col_indices(col_idx)
if attributes is not None:
n_attrs = len(self.domain.attributes)
r_attrs = [
attributes[i] for i, col in enumerate(col_indices) if 0 <= col < n_attrs
]
r_classes = [
attributes[i] for i, col in enumerate(col_indices) if col >= n_attrs
]
r_metas = [attributes[i] for i, col in enumerate(col_indices) if col < 0]
domain = Domain(r_attrs, r_classes, r_metas)
else:
domain = self.domain
return self.from_table(domain, self, row_idx)
def __setitem__(self, key, value):
if not isinstance(key, tuple):
if isinstance(value, Real):
self.X[key, :] = value
return
self._set_row(value, key)
return
if len(key) != 2:
raise IndexError("Table indices must be one- or two-dimensional")
row_idx, col_idx = key
# single row
if isinstance(row_idx, Integral):
if isinstance(col_idx, slice):
col_idx = range(*slice.indices(col_idx, self.X.shape[1]))
if not isinstance(col_idx, str) and isinstance(col_idx, Iterable):
col_idx = list(col_idx)
if not isinstance(col_idx, str) and isinstance(col_idx, Sized):
if isinstance(value, (Sequence, np.ndarray)):
values = value
elif isinstance(value, Iterable):
values = list(value)
else:
raise TypeError(
"Setting multiple values requires a " "sequence or numpy array"
)
if len(values) != len(col_idx):
raise ValueError("Invalid number of values")
else:
col_idx, values = [col_idx], [value]
for value, col_idx in zip(values, col_idx):
if not isinstance(value, Integral):
value = self.domain[col_idx].to_val(value)
if not isinstance(col_idx, Integral):
col_idx = self.domain.index(col_idx)
if col_idx >= 0:
if col_idx < self.X.shape[1]:
self.X[row_idx, col_idx] = value
else:
self._Y[row_idx, col_idx - self.X.shape[1]] = value
else:
self.metas[row_idx, -1 - col_idx] = value
# multiple rows, multiple columns
attributes, col_indices = self.domain._compute_col_indices(col_idx)
if col_indices is ...:
col_indices = range(len(self.domain))
n_attrs = self.X.shape[1]
if isinstance(value, str):
if not attributes:
attributes = self.domain.attributes
for var, col in zip(attributes, col_indices):
if 0 <= col < n_attrs:
self.X[row_idx, col] = var.to_val(value)
elif col >= n_attrs:
self._Y[row_idx, col - n_attrs] = var.to_val(value)
else:
self.metas[row_idx, -1 - col] = var.to_val(value)
else:
attr_cols = np.fromiter(
(col for col in col_indices if 0 <= col < n_attrs), int
)
class_cols = np.fromiter(
(col - n_attrs for col in col_indices if col >= n_attrs), int
)
meta_cols = np.fromiter((-1 - col for col in col_indices if col < 0), int)
if value is None:
value = Unknown
if not isinstance(value, (Real, np.ndarray)) and (
len(attr_cols) or len(class_cols)
):
raise TypeError("Ordinary attributes can only have primitive values")
if len(attr_cols):
self.X[row_idx, attr_cols] = value
if len(class_cols):
self._Y[row_idx, class_cols] = value
if len(meta_cols):
self.metas[row_idx, meta_cols] = value
def __delitem__(self, key):
if not self._check_all_dense():
raise ValueError("Rows of sparse data cannot be deleted")
if key is ...:
key = range(len(self))
self.X = np.delete(self.X, key, axis=0)
self.Y = np.delete(self._Y, key, axis=0)
self.metas = np.delete(self.metas, key, axis=0)
self.W = np.delete(self.W, key, axis=0)
self.ids = np.delete(self.ids, key, axis=0)
def __len__(self):
return self.X.shape[0]
def __str__(self):
return "[" + ",\n ".join(str(ex) for ex in self) + "]"
def __repr__(self):
head = 5
if self.is_sparse():
head = min(self.X.shape[0], head)
s = "[" + ",\n ".join(repr(ex) for ex in self[:head])
if len(self) > head:
s += ",\n ..."
s += "\n]"
return s
def clear(self):
"""Remove all rows from the table."""
if not self._check_all_dense():
raise ValueError("Tables with sparse data cannot be cleared")
del self[...]
def append(self, instance):
"""
Append a data instance to the table.
:param instance: a data instance
:type instance: Orange.data.Instance or a sequence of values
"""
self.insert(len(self), instance)
def insert(self, row, instance):
"""
Insert a data instance into the table.
:param row: row index
:type row: int
:param instance: a data instance
:type instance: Orange.data.Instance or a sequence of values
"""
if row < 0:
row += len(self)
if row < 0 or row > len(self):
raise IndexError("Index out of range")
self.ensure_copy() # ensure that numpy arrays are single-segment for resize
self._resize_all(len(self) + 1)
if row < len(self):
self.X[row + 1 :] = self.X[row:-1]
self._Y[row + 1 :] = self._Y[row:-1]
self.metas[row + 1 :] = self.metas[row:-1]
self.W[row + 1 :] = self.W[row:-1]
self.ids[row + 1 :] = self.ids[row:-1]
try:
self._set_row(instance, row)
if self.W.shape[-1]:
self.W[row] = 1
except Exception:
self.X[row:-1] = self.X[row + 1 :]
self._Y[row:-1] = self._Y[row + 1 :]
self.metas[row:-1] = self.metas[row + 1 :]
self.W[row:-1] = self.W[row + 1 :]
self.ids[row:-1] = self.ids[row + 1 :]
self._resize_all(len(self) - 1)
raise
def extend(self, instances):
"""
Extend the table with the given instances. The instances can be given
as a table of the same or a different domain, or a sequence. In the
latter case, each instances can be given as
:obj:`~Orange.data.Instance` or a sequence of values (e.g. list,
tuple, numpy.array).
:param instances: additional instances
:type instances: Orange.data.Table or a sequence of instances
"""
if isinstance(instances, Table) and instances.domain == self.domain:
self.X = vstack((self.X, instances.X))
self._Y = vstack((self._Y, instances._Y))
self.metas = vstack((self.metas, instances.metas))
self.W = vstack((self.W, instances.W))
self.ids = hstack((self.ids, instances.ids))
else:
try:
old_length = len(self)
self._resize_all(old_length + len(instances))
for i, example in enumerate(instances):
self[old_length + i] = example
try:
self.ids[old_length + i] = example.id
except AttributeError:
self.ids[old_length + i] = self.new_id()
except Exception:
self._resize_all(old_length)
raise
@staticmethod
def concatenate(tables, axis=1):
"""Return concatenation of `tables` by `axis`."""
if not tables:
raise ValueError("need at least one table to concatenate")
if len(tables) == 1:
return tables[0].copy()
CONCAT_ROWS, CONCAT_COLS = 0, 1
if axis == CONCAT_ROWS:
table = tables[0].copy()
for t in tables[1:]:
table.extend(t)
return table
elif axis == CONCAT_COLS:
if reduce(
operator.iand,
(
set(
map(
operator.attrgetter("name"),
chain(t.domain.variables, t.domain.metas),
)
)
for t in tables
),
):
raise ValueError(
"Concatenating two domains with variables "
"with same name is undefined"
)
domain = Domain(
flatten(t.domain.attributes for t in tables),
flatten(t.domain.class_vars for t in tables),
flatten(t.domain.metas for t in tables),
)
def ndmin(A):
return A if A.ndim > 1 else A.reshape(A.shape[0], 1)
table = Table.from_numpy(
domain,
np.hstack(tuple(ndmin(t.X) for t in tables)),
np.hstack(tuple(ndmin(t.Y) for t in tables)),
np.hstack(tuple(ndmin(t.metas) for t in tables)),
np.hstack(tuple(ndmin(t.W) for t in tables)),
)
return table
raise ValueError("axis {} out of bounds [0, 2)".format(axis))
def is_view(self):
"""
Return `True` if all arrays represent a view referring to another table
"""
return (
(not self.X.shape[-1] or self.X.base is not None)
and (not self._Y.shape[-1] or self._Y.base is not None)
and (not self.metas.shape[-1] or self.metas.base is not None)
and (not self._weights.shape[-1] or self.W.base is not None)
)
def is_copy(self):
"""
Return `True` if the table owns its data
"""
return (
(not self.X.shape[-1] or self.X.base is None)
and (self._Y.base is None)
and (self.metas.base is None)
and (self.W.base is None)
)
def is_sparse(self):
"""
Return `True` if the table stores data in sparse format
"""
return any(sp.issparse(i) for i in [self.X, self.Y, self.metas])
def ensure_copy(self):
"""
Ensure that the table owns its data; copy arrays when necessary.
"""
def is_view(x):
# Sparse matrices don't have views like numpy arrays. Since indexing on
# them creates copies in constructor we can skip this check here.
return not sp.issparse(x) and x.base is not None
if is_view(self.X):
self.X = self.X.copy()
if is_view(self._Y):
self._Y = self._Y.copy()
if is_view(self.metas):
self.metas = self.metas.copy()
if is_view(self.W):
self.W = self.W.copy()
def copy(self):
"""
Return a copy of the table
"""
t = self.__class__(self)
t.ensure_copy()
return t
@staticmethod
def __determine_density(data):
if data is None:
return Storage.Missing
if data is not None and sp.issparse(data):
return Storage.SPARSE_BOOL if (data.data == 1).all() else Storage.SPARSE
else:
return Storage.DENSE
def X_density(self):
if not hasattr(self, "_X_density"):
self._X_density = self.__determine_density(self.X)
return self._X_density
def Y_density(self):
if not hasattr(self, "_Y_density"):
self._Y_density = self.__determine_density(self._Y)
return self._Y_density
def metas_density(self):
if not hasattr(self, "_metas_density"):
self._metas_density = self.__determine_density(self.metas)
return self._metas_density
def set_weights(self, weight=1):
"""
Set weights of data instances; create a vector of weights if necessary.
"""
if not self.W.shape[-1]:
self.W = np.empty(len(self))
self.W[:] = weight
def has_weights(self):
"""Return `True` if the data instances are weighed. """
return self.W.shape[-1] != 0
def total_weight(self):
"""
Return the total weight of instances in the table, or their number if
they are unweighted.
"""
if self.W.shape[-1]:
return sum(self.W)
return len(self)
def has_missing(self):
"""Return `True` if there are any missing attribute or class values."""
missing_x = not sp.issparse(self.X) and bn.anynan(
self.X
) # do not check for sparse X
return missing_x or bn.anynan(self._Y)
def has_missing_class(self):
"""Return `True` if there are any missing class values."""
return bn.anynan(self._Y)
def checksum(self, include_metas=True):
# TODO: zlib.adler32 does not work for numpy arrays with dtype object
# (after pickling and unpickling such arrays, checksum changes)
# Why, and should we fix it or remove it?
"""Return a checksum over X, Y, metas and W."""
cs = zlib.adler32(np.ascontiguousarray(self.X))
cs = zlib.adler32(np.ascontiguousarray(self._Y), cs)
if include_metas:
cs = zlib.adler32(np.ascontiguousarray(self.metas), cs)
cs = zlib.adler32(np.ascontiguousarray(self.W), cs)
return cs
def shuffle(self):
"""Randomly shuffle the rows of the table."""
if not self._check_all_dense():
raise ValueError("Rows of sparse data cannot be shuffled")
ind = np.arange(self.X.shape[0])
np.random.shuffle(ind)
self.X = self.X[ind]
self._Y = self._Y[ind]
self.metas = self.metas[ind]
self.W = self.W[ind]
def get_column_view(self, index):
"""
Return a vector - as a view, not a copy - with a column of the table,
and a bool flag telling whether this column is sparse. Note that
vertical slicing of sparse matrices is inefficient.
:param index: the index of the column
:type index: int, str or Orange.data.Variable
:return: (one-dimensional numpy array, sparse)
"""
def rx(M):
if sp.issparse(M):
return np.asarray(M.todense())[:, 0], True
else:
return M, False
if not isinstance(index, Integral):
index = self.domain.index(index)
if index >= 0:
if index < self.X.shape[1]:
return rx(self.X[:, index])
else:
return rx(self._Y[:, index - self.X.shape[1]])
else:
return rx(self.metas[:, -1 - index])
def _filter_is_defined(self, columns=None, negate=False):
if columns is None:
if sp.issparse(self.X):
remove = self.X.indptr[1:] != self.X.indptr[-1:] + self.X.shape[1]
else:
remove = bn.anynan(self.X, axis=1)
if sp.issparse(self._Y):
remove = np.logical_or(
remove, self._Y.indptr[1:] != self._Y.indptr[-1:] + self._Y.shape[1]
)
else:
remove = np.logical_or(remove, bn.anynan(self._Y, axis=1))
else:
remove = np.zeros(len(self), dtype=bool)
for column in columns:
col, sparse = self.get_column_view(column)
if sparse:
remove = np.logical_or(remove, col == 0)
else:
remove = np.logical_or(remove, bn.anynan([col], axis=0))
retain = remove if negate else np.logical_not(remove)
return self.from_table_rows(self, retain)
def _filter_has_class(self, negate=False):
if sp.issparse(self._Y):
if negate:
retain = self._Y.indptr[1:] != self._Y.indptr[-1:] + self._Y.shape[1]
else:
retain = self._Y.indptr[1:] == self._Y.indptr[-1:] + self._Y.shape[1]
else:
retain = bn.anynan(self._Y, axis=1)
if not negate:
retain = np.logical_not(retain)
return self.from_table_rows(self, retain)
def _filter_same_value(self, column, value, negate=False):
if not isinstance(value, Real):
value = self.domain[column].to_val(value)
sel = self.get_column_view(column)[0] == value
if negate:
sel = np.logical_not(sel)
return self.from_table_rows(self, sel)
def _filter_values(self, filter):
selection = self._values_filter_to_indicator(filter)
return self.from_table(self.domain, self, selection)
def _values_filter_to_indicator(self, filter):
"""Return selection of rows matching the filter conditions
Handles conjunction/disjunction and negate modifiers
Parameters
----------
filter: Values object containing the conditions
Returns
-------
A 1d bool array. len(result) == len(self)
"""
from Orange.data.filter import Values
if isinstance(filter, Values):
conditions = filter.conditions
conjunction = filter.conjunction
else:
conditions = [filter]
conjunction = True
if conjunction:
sel = np.ones(len(self), dtype=bool)
else:
sel = np.zeros(len(self), dtype=bool)
for f in conditions:
selection = self._filter_to_indicator(f)
if conjunction:
sel *= selection
else:
sel += selection
if filter.negate:
sel = ~sel
return sel
def _filter_to_indicator(self, filter):
"""Return selection of rows that match the condition.
Parameters
----------
filter: ValueFilter describing the condition
Returns
-------
A 1d bool array. len(result) == len(self)
"""
from Orange.data.filter import (
FilterContinuous,
FilterDiscrete,
FilterRegex,
FilterString,
FilterStringList,
Values,
)
if isinstance(filter, Values):
return self._values_filter_to_indicator(filter)
col = self.get_column_view(filter.column)[0]
if isinstance(filter, FilterDiscrete):
return self._discrete_filter_to_indicator(filter, col)
if isinstance(filter, FilterContinuous):
return self._continuous_filter_to_indicator(filter, col)
if isinstance(filter, FilterString):
return self._string_filter_to_indicator(filter, col)
if isinstance(filter, FilterStringList):
if not filter.case_sensitive:
col = np.char.lower(np.array(col, dtype=str))
vals = [val.lower() for val in filter.values]
else:
vals = filter.values
return reduce(operator.add, (col == val for val in vals))
if isinstance(filter, FilterRegex):
return np.vectorize(filter)(col)
raise TypeError("Invalid filter")
def _discrete_filter_to_indicator(self, filter, col):
"""Return selection of rows matched by the given discrete filter.
Parameters
----------
filter: FilterDiscrete
col: np.ndarray
Returns
-------
A 1d bool array. len(result) == len(self)
"""
if filter.values is None: # <- is defined filter
col = col.astype(float)
return ~np.isnan(col)
sel = np.zeros(len(self), dtype=bool)
for val in filter.values:
if not isinstance(val, Real):
val = self.domain[filter.column].to_val(val)
sel += col == val
return sel
def _continuous_filter_to_indicator(self, filter, col):
"""Return selection of rows matched by the given continuous filter.
Parameters
----------
filter: FilterContinuous
col: np.ndarray
Returns
-------
A 1d bool array. len(result) == len(self)
"""
if filter.oper == filter.IsDefined:
col = col.astype(float)
return ~np.isnan(col)
return self._range_filter_to_indicator(filter, col, filter.min, filter.max)
def _string_filter_to_indicator(self, filter, col):
"""Return selection of rows matched by the given string filter.
Parameters
----------
filter: FilterString
col: np.ndarray
Returns
-------
A 1d bool array. len(result) == len(self)
"""
if filter.oper == filter.IsDefined:
return col.astype(bool)
col = col.astype(str)
fmin = filter.min or ""
fmax = filter.max or ""
if not filter.case_sensitive:
# convert all to lower case
col = np.char.lower(col)
fmin = fmin.lower()
fmax = fmax.lower()
if filter.oper == filter.Contains:
return np.fromiter((fmin in e for e in col), dtype=bool)
if filter.oper == filter.StartsWith:
return np.fromiter((e.startswith(fmin) for e in col), dtype=bool)
if filter.oper == filter.EndsWith:
return np.fromiter((e.endswith(fmin) for e in col), dtype=bool)
return self._range_filter_to_indicator(filter, col, fmin, fmax)
@staticmethod
def _range_filter_to_indicator(filter, col, fmin, fmax):
if filter.oper == filter.Equal:
return col == fmin
if filter.oper == filter.NotEqual:
return col != fmin
if filter.oper == filter.Less:
return col < fmin
if filter.oper == filter.LessEqual:
return col <= fmin
if filter.oper == filter.Greater:
return col > fmin
if filter.oper == filter.GreaterEqual:
return col >= fmin
if filter.oper == filter.Between:
return (col >= fmin) * (col <= fmax)
if filter.oper == filter.Outside:
return (col < fmin) + (col > fmax)
raise TypeError("Invalid operator")
def _compute_basic_stats(
self, columns=None, include_metas=False, compute_variance=False
):
if compute_variance:
raise NotImplementedError(
"computation of variance is " "not implemented yet"
)
W = self.W if self.has_weights() else None
rr = []
stats = []
if not columns:
if self.domain.attributes:
rr.append(fast_stats(self.X, W))
if self.domain.class_vars:
rr.append(fast_stats(self._Y, W))
if include_metas and self.domain.metas:
rr.append(fast_stats(self.metas, W))
if len(rr):
stats = np.vstack(tuple(rr))
else:
columns = [self.domain.index(c) for c in columns]
nattrs = len(self.domain.attributes)
Xs = any(0 <= c < nattrs for c in columns) and fast_stats(self.X, W)
Ys = any(c >= nattrs for c in columns) and fast_stats(self._Y, W)
ms = any(c < 0 for c in columns) and fast_stats(self.metas, W)
for column in columns:
if 0 <= column < nattrs:
stats.append(Xs[column, :])
elif column >= nattrs:
stats.append(Ys[column - nattrs, :])
else:
stats.append(ms[-1 - column])
return stats
def _compute_distributions(self, columns=None):
if columns is None:
columns = range(len(self.domain.variables))
else:
columns = [self.domain.index(var) for var in columns]
distributions = []
if sp.issparse(self.X):
self.X = self.X.tocsc()
W = self.W.ravel() if self.has_weights() else None
for col in columns:
variable = self.domain[col]
# Select the correct data column from X, Y or metas
if 0 <= col < self.X.shape[1]:
x = self.X[:, col]
elif col < 0:
x = self.metas[:, col * (-1) - 1]
if np.issubdtype(x.dtype, np.dtype(object)):
x = x.astype(float)
else:
x = self._Y[:, col - self.X.shape[1]]
if variable.is_discrete:
dist, unknowns = bincount(
x, weights=W, max_val=len(variable.values) - 1
)
elif not x.shape[0]:
dist, unknowns = np.zeros((2, 0)), 0
else:
if W is not None:
if sp.issparse(x):
arg_sort = np.argsort(x.data)
ranks = x.indices[arg_sort]
vals = np.vstack((x.data[arg_sort], W[ranks]))
else:
ranks = np.argsort(x)
vals = np.vstack((x[ranks], W[ranks]))
else:
x_values = x.data if sp.issparse(x) else x
vals = np.ones((2, x_values.shape[0]))
vals[0, :] = x_values
vals[0, :].sort()
dist = np.array(_valuecount.valuecount(vals))
# If sparse, then 0s will not be counted with `valuecount`, so
# we have to add them to the result manually.
if sp.issparse(x) and sparse_has_implicit_zeros(x):
if W is not None:
zero_weights = sparse_implicit_zero_weights(x, W).sum()
else:
zero_weights = sparse_count_implicit_zeros(x)
zero_vec = [0, zero_weights]
dist = np.insert(
dist, np.searchsorted(dist[0], 0), zero_vec, axis=1
)
# Since `countnans` assumes vector shape to be (1, n) and `x`
# shape is (n, 1), we pass the transpose
unknowns = countnans(x.T, W)
distributions.append((dist, unknowns))
return distributions
def _compute_contingency(self, col_vars=None, row_var=None):
n_atts = self.X.shape[1]
if col_vars is None:
col_vars = range(len(self.domain.variables))
else:
col_vars = [self.domain.index(var) for var in col_vars]
if row_var is None:
row_var = self.domain.class_var
if row_var is None:
raise ValueError("No row variable")
row_desc = self.domain[row_var]
if not row_desc.is_discrete:
raise TypeError("Row variable must be discrete")
row_indi = self.domain.index(row_var)
n_rows = len(row_desc.values)
if 0 <= row_indi < n_atts:
row_data = self.X[:, row_indi]
elif row_indi < 0:
row_data = self.metas[:, -1 - row_indi]
else:
row_data = self._Y[:, row_indi - n_atts]
W = self.W if self.has_weights() else None
nan_inds = None
col_desc = [self.domain[var] for var in col_vars]
col_indi = [self.domain.index(var) for var in col_vars]
if any(not (var.is_discrete or var.is_continuous) for var in col_desc):
raise ValueError(
"contingency can be computed only for discrete " "and continuous values"
)
if row_data.dtype.kind != "f": # meta attributes can be stored as type object
row_data = row_data.astype(float)
unknown_rows = countnans(row_data)
if unknown_rows:
nan_inds = np.isnan(row_data)
row_data = row_data[~nan_inds]
if W:
W = W[~nan_inds]
unknown_rows = np.sum(W[nan_inds])
contingencies = [None] * len(col_desc)
for arr, f_cond, f_ind in (
(self.X, lambda i: 0 <= i < n_atts, lambda i: i),
(self._Y, lambda i: i >= n_atts, lambda i: i - n_atts),
(self.metas, lambda i: i < 0, lambda i: -1 - i),
):
if nan_inds is not None:
arr = arr[~nan_inds]
arr_indi = [e for e, ind in enumerate(col_indi) if f_cond(ind)]
vars = [(e, f_ind(col_indi[e]), col_desc[e]) for e in arr_indi]
disc_vars = [v for v in vars if v[2].is_discrete]
if disc_vars:
if sp.issparse(arr):
max_vals = max(len(v[2].values) for v in disc_vars)
disc_indi = {i for _, i, _ in disc_vars}
mask = [i in disc_indi for i in range(arr.shape[1])]
conts, nans = contingency(
arr, row_data, max_vals - 1, n_rows - 1, W, mask
)
for col_i, arr_i, var in disc_vars:
n_vals = len(var.values)
contingencies[col_i] = (conts[arr_i][:, :n_vals], nans[arr_i])
else:
for col_i, arr_i, var in disc_vars:
contingencies[col_i] = contingency(
arr[:, arr_i].astype(float),
row_data,
len(var.values) - 1,
n_rows - 1,
W,
)
cont_vars = [v for v in vars if v[2].is_continuous]
if cont_vars:
classes = row_data.astype(dtype=np.intp)
if W is not None:
W = W.astype(dtype=np.float64)
if sp.issparse(arr):
arr = sp.csc_matrix(arr)
for col_i, arr_i, _ in cont_vars:
if sp.issparse(arr):
col_data = arr.data[arr.indptr[arr_i] : arr.indptr[arr_i + 1]]
rows = arr.indices[arr.indptr[arr_i] : arr.indptr[arr_i + 1]]
W_ = None if W is None else W[rows]
classes_ = classes[rows]
else:
col_data, W_, classes_ = arr[:, arr_i], W, classes
col_data = col_data.astype(dtype=np.float64)
U, C, unknown = _contingency.contingency_floatarray(
col_data, classes_, n_rows, W_
)
contingencies[col_i] = ([U, C], unknown)
return contingencies, unknown_rows
@classmethod
def transpose(
cls,
table,
feature_names_column="",
meta_attr_name="Feature name",
feature_name="Feature",
):
"""
Transpose the table.
:param table: Table - table to transpose
:param feature_names_column: str - name of (String) meta attribute to
use for feature names
:param meta_attr_name: str - name of new meta attribute into which
feature names are mapped
:return: Table - transposed table
"""
self = cls()
n_cols, self.n_rows = table.X.shape
old_domain = table.attributes.get("old_domain")
# attributes
# - classes and metas to attributes of attributes
# - arbitrary meta column to feature names
self.X = table.X.T
attributes = (
[ContinuousVariable(str(row[feature_names_column])) for row in table]
if feature_names_column
else [
ContinuousVariable(
feature_name
+ " "
+ str(i + 1).zfill(int(np.ceil(np.log10(n_cols))))
)
for i in range(n_cols)
]
)
if old_domain is not None and feature_names_column:
for i, _ in enumerate(attributes):
if attributes[i].name in old_domain:
var = old_domain[attributes[i].name]
attr = (
ContinuousVariable(var.name)
if var.is_continuous
else DiscreteVariable(var.name, var.values)
)
attr.attributes = var.attributes.copy()
attributes[i] = attr
def set_attributes_of_attributes(_vars, _table):
for i, variable in enumerate(_vars):
if variable.name == feature_names_column:
continue
for j, row in enumerate(_table):
value = (
variable.repr_val(row)
if np.isscalar(row)
else row[i]
if isinstance(row[i], str)
else variable.repr_val(row[i])
)
if value not in MISSING_VALUES:
attributes[j].attributes[variable.name] = value
set_attributes_of_attributes(table.domain.class_vars, table.Y)
set_attributes_of_attributes(table.domain.metas, table.metas)
# weights
self.W = np.empty((self.n_rows, 0))
def get_table_from_attributes_of_attributes(_vars, _dtype=float):
T = np.empty((self.n_rows, len(_vars)), dtype=_dtype)
for i, _attr in enumerate(table.domain.attributes):
for j, _var in enumerate(_vars):
val = str(_attr.attributes.get(_var.name, ""))
if not _var.is_string:
val = (
np.nan
if val in MISSING_VALUES
else _var.values.index(val)
if _var.is_discrete
else float(val)
)
T[i, j] = val
return T
# class_vars - attributes of attributes to class - from old domain
class_vars = []
if old_domain is not None:
class_vars = old_domain.class_vars
self.Y = get_table_from_attributes_of_attributes(class_vars)
# metas
# - feature names and attributes of attributes to metas
self.metas, metas = np.empty((self.n_rows, 0), dtype=object), []
if (
meta_attr_name not in [m.name for m in table.domain.metas]
and table.domain.attributes
):
self.metas = np.array(
[[a.name] for a in table.domain.attributes], dtype=object
)
metas.append(StringVariable(meta_attr_name))
names = chain.from_iterable(
list(attr.attributes) for attr in table.domain.attributes
)
names = sorted(set(names) - {var.name for var in class_vars})
def guessed_var(i, var_name):
orig_vals = M[:, i]
val_map, vals, var_type = Orange.data.io.guess_data_type(orig_vals)
values, variable = Orange.data.io.sanitize_variable(
val_map, vals, orig_vals, var_type, {}, _metas, None, var_name
)
M[:, i] = values
return variable
_metas = [StringVariable(n) for n in names]
if old_domain is not None:
_metas = [m for m in old_domain.metas if m.name != meta_attr_name]
M = get_table_from_attributes_of_attributes(_metas, _dtype=object)
if old_domain is None:
_metas = [guessed_var(i, m.name) for i, m in enumerate(_metas)]
if _metas:
self.metas = np.hstack((self.metas, M))
metas.extend(_metas)
self.domain = Domain(attributes, class_vars, metas)
cls._init_ids(self)
self.attributes = table.attributes.copy()
self.attributes["old_domain"] = table.domain
return self
def to_sparse(self, sparse_attributes=True, sparse_class=False, sparse_metas=False):
def sparsify(features):
for f in features:
f.sparse = True
new_domain = self.domain.copy()
if sparse_attributes:
sparsify(new_domain.attributes)
if sparse_class:
sparsify(new_domain.class_vars)
if sparse_metas:
sparsify(new_domain.metas)
return self.transform(new_domain)
def to_dense(self, dense_attributes=True, dense_class=True, dense_metas=True):
def densify(features):
for f in features:
f.sparse = False
new_domain = self.domain.copy()
if dense_attributes:
densify(new_domain.attributes)
if dense_class:
densify(new_domain.class_vars)
if dense_metas:
densify(new_domain.metas)
t = self.transform(new_domain)
t.ids = self.ids # preserve indices
return t
def _check_arrays(*arrays, dtype=None):
checked = []
if not len(arrays):
return checked
def ninstances(array):
if hasattr(array, "shape"):
return array.shape[0]
else:
return len(array) if array is not None else 0
shape_1 = ninstances(arrays[0])
for array in arrays:
if array is None:
checked.append(array)
continue
if ninstances(array) != shape_1:
raise ValueError(
"Leading dimension mismatch (%d != %d)" % (ninstances(array), shape_1)
)
if sp.issparse(array):
array.data = np.asarray(array.data)
has_inf = _check_inf(array.data)
else:
if dtype is not None:
array = np.asarray(array, dtype=dtype)
else:
array = np.asarray(array)
has_inf = _check_inf(array)
if has_inf:
raise ValueError("Array contains infinity.")
checked.append(array)
return checked
def _check_inf(array):
return array.dtype.char in np.typecodes["AllFloat"] and np.isinf(array.data).any()
def _subarray(arr, rows, cols):
rows = _optimize_indices(rows, arr.shape[0])
cols = _optimize_indices(cols, arr.shape[1])
return arr[_rxc_ix(rows, cols)]
def _optimize_indices(indices, maxlen):
"""
Convert integer indices to slice if possible. It only converts increasing
integer ranges with positive steps and valid starts and ends.
Only convert valid ends so that invalid ranges will still raise
an exception.
Allows numpy to reuse the data array, because it defaults to copying
if given indices.
Parameters
----------
indices : 1D sequence, slice or Ellipsis
"""
if isinstance(indices, slice):
return indices
if indices is ...:
return slice(None, None, 1)
if len(indices) >= 1:
indices = np.asarray(indices)
if indices.dtype != np.bool:
begin = indices[0]
end = indices[-1]
steps = np.diff(indices) if len(indices) > 1 else np.array([1])
step = steps[0]
# continuous ranges with constant step and valid start and stop index can be slices
if np.all(steps == step) and step > 0 and begin >= 0 and end < maxlen:
return slice(begin, end + step, step)
return indices
def _rxc_ix(rows, cols):
"""
Construct an index object to index the `rows` x `cols` cross product.
Rows and columns can be a 1d bool or int sequence, or a slice.
The later is a convenience and is interpreted the same
as `slice(None, None, -1)`
Parameters
----------
rows : 1D sequence, slice
Row indices.
cols : 1D sequence, slice
Column indices.
See Also
--------
numpy.ix_
Examples
--------
>>> import numpy as np
>>> a = np.arange(10).reshape(2, 5)
>>> a[_rxc_ix([0, 1], [3, 4])]
array([[3, 4],
[8, 9]])
>>> a[_rxc_ix([False, True], slice(None, None, 1))]
array([[5, 6, 7, 8, 9]])
"""
isslice = (isinstance(rows, slice), isinstance(cols, slice))
if isslice == (True, True):
return rows, cols
elif isslice == (True, False):
return rows, np.asarray(np.ix_(cols), int).ravel()
elif isslice == (False, True):
return np.asarray(np.ix_(rows), int).ravel(), cols
else:
r, c = np.ix_(rows, cols)
return np.asarray(r, int), np.asarray(c, int)
def assure_domain_conversion_sparsity(target, source):
"""
Assure that the table obeys the domain conversion's suggestions about sparsity.
Args:
target (Table): the target table.
source (Table): the source table.
Returns:
Table: with fixed sparsity. The sparsity is set as it is recommended by domain conversion
for transformation from source to the target domain.
"""
conversion = target.domain.get_conversion(source.domain)
match_density = [assure_array_dense, assure_array_sparse]
target.X = match_density[conversion.sparse_X](target.X)
target.Y = match_density[conversion.sparse_Y](target.Y)
target.metas = match_density[conversion.sparse_metas](target.metas)
return target
| <filename>orange3/Orange/data/table.py
import operator
import os
import zlib
from collections import MutableSequence, Iterable, Sequence, Sized
from functools import reduce
from itertools import chain
from numbers import Real, Integral
from threading import Lock, RLock
import bottleneck as bn
import numpy as np
from scipy import sparse as sp
import Orange.data # import for io.py
from Orange.data import (
_contingency,
_valuecount,
Domain,
Variable,
Storage,
StringVariable,
Unknown,
Value,
Instance,
ContinuousVariable,
DiscreteVariable,
MISSING_VALUES,
)
from Orange.data.util import (
SharedComputeValue,
vstack,
hstack,
assure_array_dense,
assure_array_sparse,
assure_column_dense,
assure_column_sparse,
)
from Orange.statistics.util import (
bincount,
countnans,
contingency,
stats as fast_stats,
sparse_has_implicit_zeros,
sparse_count_implicit_zeros,
sparse_implicit_zero_weights,
)
from Orange.util import flatten
__all__ = ["dataset_dirs", "get_sample_datasets_dir", "RowInstance", "Table"]
def get_sample_datasets_dir():
orange_data_table = os.path.dirname(__file__)
dataset_dir = os.path.join(orange_data_table, "..", "datasets")
return os.path.realpath(dataset_dir)
dataset_dirs = ["", get_sample_datasets_dir()]
"""Domain conversion cache used in Table.from_table. It is global so that
chaining of domain conversions also works with caching even with descendants
of Table."""
_conversion_cache = None
_conversion_cache_lock = RLock()
class RowInstance(Instance):
sparse_x = None
sparse_y = None
sparse_metas = None
_weight = None
def __init__(self, table, row_index):
"""
Construct a data instance representing the given row of the table.
"""
self.table = table
self._domain = table.domain
self.row_index = row_index
self.id = table.ids[row_index]
self._x = table.X[row_index]
if sp.issparse(self._x):
self.sparse_x = sp.csr_matrix(self._x)
self._x = np.asarray(self._x.todense())[0]
self._y = table._Y[row_index]
if sp.issparse(self._y):
self.sparse_y = sp.csr_matrix(self._y)
self._y = np.asarray(self._y.todense())[0]
self._metas = table.metas[row_index]
if sp.issparse(self._metas):
self.sparse_metas = sp.csr_matrix(self._metas)
self._metas = np.asarray(self._metas.todense())[0]
@property
def weight(self):
if not self.table.has_weights():
return 1
return self.table.W[self.row_index]
@weight.setter
def weight(self, weight):
if not self.table.has_weights():
self.table.set_weights()
self.table.W[self.row_index] = weight
def set_class(self, value):
self._check_single_class()
if not isinstance(value, Real):
value = self.table.domain.class_var.to_val(value)
self._y[0] = value
if self.sparse_y:
self.table._Y[self.row_index, 0] = value
def __setitem__(self, key, value):
if not isinstance(key, Integral):
key = self._domain.index(key)
if isinstance(value, str):
var = self._domain[key]
value = var.to_val(value)
if key >= 0:
if not isinstance(value, Real):
raise TypeError(
"Expected primitive value, got '%s'" % type(value).__name__
)
if key < len(self._x):
self._x[key] = value
if self.sparse_x is not None:
self.table.X[self.row_index, key] = value
else:
self._y[key - len(self._x)] = value
if self.sparse_y is not None:
self.table._Y[self.row_index, key - len(self._x)] = value
else:
self._metas[-1 - key] = value
if self.sparse_metas:
self.table.metas[self.row_index, -1 - key] = value
def _str(self, limit):
def sp_values(matrix, variables):
if not sp.issparse(matrix):
if matrix.ndim == 1:
matrix = matrix[:, np.newaxis]
return Instance.str_values(matrix[row], variables, limit)
row_entries, idx = [], 0
while idx < len(variables):
# Make sure to stop printing variables if we limit the output
if limit and len(row_entries) >= 5:
break
var = variables[idx]
if var.is_discrete or matrix[row, idx]:
row_entries.append(
"%s=%s" % (var.name, var.str_val(matrix[row, idx]))
)
idx += 1
s = ", ".join(row_entries)
if limit and idx < len(variables):
s += ", ..."
return s
table = self.table
domain = table.domain
row = self.row_index
s = "[" + sp_values(table.X, domain.attributes)
if domain.class_vars:
s += " | " + sp_values(table.Y, domain.class_vars)
s += "]"
if self._domain.metas:
s += " {" + sp_values(table.metas, domain.metas) + "}"
return s
def __str__(self):
return self._str(False)
def __repr__(self):
return self._str(True)
class Columns:
def __init__(self, domain):
for v in chain(domain.variables, domain.metas):
setattr(self, v.name.replace(" ", "_"), v)
# noinspection PyPep8Naming
class Table(MutableSequence, Storage):
__file__ = None
name = "untitled"
@property
def columns(self):
"""
A class whose attributes contain attribute descriptors for columns.
For a table `table`, setting `c = table.columns` will allow accessing
the table's variables with, for instance `c.gender`, `c.age` ets.
Spaces are replaced with underscores.
"""
return Columns(self.domain)
_next_instance_id = 0
_next_instance_lock = Lock()
@property
def Y(self):
if self._Y.shape[1] == 1:
return self._Y[:, 0]
return self._Y
@Y.setter
def Y(self, value):
if len(value.shape) == 1:
value = value[:, None]
if sp.issparse(value) and len(self) != value.shape[0]:
value = value.T
self._Y = value
def __new__(cls, *args, **kwargs):
if not args and not kwargs:
return super().__new__(cls)
if "filename" in kwargs:
args = [kwargs.pop("filename")]
if not args:
raise TypeError("Table takes at least 1 positional argument (0 given))")
if isinstance(args[0], str):
if args[0].startswith("https://") or args[0].startswith("http://"):
return cls.from_url(args[0], **kwargs)
else:
return cls.from_file(args[0])
elif isinstance(args[0], Table):
return cls.from_table(args[0].domain, args[0])
elif isinstance(args[0], Domain):
domain, args = args[0], args[1:]
if not args:
return cls.from_domain(domain, **kwargs)
if isinstance(args[0], Table):
return cls.from_table(domain, *args)
elif isinstance(args[0], list):
return cls.from_list(domain, *args)
else:
domain = None
return cls.from_numpy(domain, *args, **kwargs)
def __init__(self, *args, **kwargs):
# So subclasses can expect to call super without breakage; noop
pass
@classmethod
def from_domain(cls, domain, n_rows=0, weights=False):
"""
Construct a new `Table` with the given number of rows for the given
domain. The optional vector of weights is initialized to 1's.
:param domain: domain for the `Table`
:type domain: Orange.data.Domain
:param n_rows: number of rows in the new table
:type n_rows: int
:param weights: indicates whether to construct a vector of weights
:type weights: bool
:return: a new table
:rtype: Orange.data.Table
"""
self = cls()
self.domain = domain
self.n_rows = n_rows
self.X = np.zeros((n_rows, len(domain.attributes)))
self.Y = np.zeros((n_rows, len(domain.class_vars)))
if weights:
self.W = np.ones(n_rows)
else:
self.W = np.empty((n_rows, 0))
self.metas = np.empty((n_rows, len(self.domain.metas)), object)
cls._init_ids(self)
self.attributes = {}
return self
@classmethod
def from_table(cls, domain, source, row_indices=...):
"""
Create a new table from selected columns and/or rows of an existing
one. The columns are chosen using a domain. The domain may also include
variables that do not appear in the source table; they are computed
from source variables if possible.
The resulting data may be a view or a copy of the existing data.
:param domain: the domain for the new table
:type domain: Orange.data.Domain
:param source: the source table
:type source: Orange.data.Table
:param row_indices: indices of the rows to include
:type row_indices: a slice or a sequence
:return: a new table
:rtype: Orange.data.Table
"""
global _conversion_cache
def get_columns(
row_indices, src_cols, n_rows, dtype=np.float64, is_sparse=False
):
if not len(src_cols):
if is_sparse:
return sp.csr_matrix((n_rows, 0), dtype=source.X.dtype)
else:
return np.zeros((n_rows, 0), dtype=source.X.dtype)
# match density for subarrays
match_density = assure_array_sparse if is_sparse else assure_array_dense
n_src_attrs = len(source.domain.attributes)
if all(isinstance(x, Integral) and 0 <= x < n_src_attrs for x in src_cols):
return match_density(_subarray(source.X, row_indices, src_cols))
if all(isinstance(x, Integral) and x < 0 for x in src_cols):
arr = match_density(
_subarray(source.metas, row_indices, [-1 - x for x in src_cols])
)
if arr.dtype != dtype:
return arr.astype(dtype)
return arr
if all(isinstance(x, Integral) and x >= n_src_attrs for x in src_cols):
return match_density(
_subarray(
source._Y, row_indices, [x - n_src_attrs for x in src_cols]
)
)
# initialize final array & set `match_density` for columns
if is_sparse:
a = sp.dok_matrix((n_rows, len(src_cols)), dtype=dtype)
match_density = assure_column_sparse
else:
a = np.empty((n_rows, len(src_cols)), dtype=dtype)
match_density = assure_column_dense
shared_cache = _conversion_cache
for i, col in enumerate(src_cols):
if col is None:
a[:, i] = Unknown
elif not isinstance(col, Integral):
if isinstance(col, SharedComputeValue):
if (id(col.compute_shared), id(source)) not in shared_cache:
shared_cache[
id(col.compute_shared), id(source)
] = col.compute_shared(source)
shared = shared_cache[id(col.compute_shared), id(source)]
if row_indices is not ...:
a[:, i] = match_density(
col(source, shared_data=shared)[row_indices]
)
else:
a[:, i] = match_density(col(source, shared_data=shared))
else:
if row_indices is not ...:
a[:, i] = match_density(col(source)[row_indices])
else:
a[:, i] = match_density(col(source))
elif col < 0:
a[:, i] = match_density(source.metas[row_indices, -1 - col])
elif col < n_src_attrs:
a[:, i] = match_density(source.X[row_indices, col])
else:
a[:, i] = match_density(source._Y[row_indices, col - n_src_attrs])
if is_sparse:
a = a.tocsr()
return a
with _conversion_cache_lock:
new_cache = _conversion_cache is None
try:
if new_cache:
_conversion_cache = {}
else:
cached = _conversion_cache.get((id(domain), id(source)))
if cached:
return cached
if domain == source.domain:
table = cls.from_table_rows(source, row_indices)
# assure resulting domain is the instance passed on input
table.domain = domain
# since sparse flags are not considered when checking for
# domain equality, fix manually.
table = assure_domain_conversion_sparsity(table, source)
return table
if isinstance(row_indices, slice):
start, stop, stride = row_indices.indices(source.X.shape[0])
n_rows = (stop - start) // stride
if n_rows < 0:
n_rows = 0
elif row_indices is ...:
n_rows = len(source)
else:
n_rows = len(row_indices)
self = cls()
self.domain = domain
conversion = domain.get_conversion(source.domain)
self.X = get_columns(
row_indices,
conversion.attributes,
n_rows,
is_sparse=conversion.sparse_X,
)
if self.X.ndim == 1:
self.X = self.X.reshape(-1, len(self.domain.attributes))
self.Y = get_columns(
row_indices,
conversion.class_vars,
n_rows,
is_sparse=conversion.sparse_Y,
)
dtype = np.float64
if any(isinstance(var, StringVariable) for var in domain.metas):
dtype = np.object
self.metas = get_columns(
row_indices,
conversion.metas,
n_rows,
dtype,
is_sparse=conversion.sparse_metas,
)
if self.metas.ndim == 1:
self.metas = self.metas.reshape(-1, len(self.domain.metas))
if source.has_weights():
self.W = source.W[row_indices]
else:
self.W = np.empty((n_rows, 0))
self.name = getattr(source, "name", "")
if hasattr(source, "ids"):
self.ids = source.ids[row_indices]
else:
cls._init_ids(self)
self.attributes = getattr(source, "attributes", {})
_conversion_cache[(id(domain), id(source))] = self
return self
finally:
if new_cache:
_conversion_cache = None
def transform(self, domain):
"""
Construct a table with a different domain.
The new table keeps the row ids and other information. If the table
is a subclass of :obj:`Table`, the resulting table will be of the same
type.
In a typical scenario, an existing table is augmented with a new
column by ::
domain = Domain(old_domain.attributes + [new_attribute],
old_domain.class_vars,
old_domain.metas)
table = data.transform(domain)
table[:, new_attribute] = new_column
Args:
domain (Domain): new domain
Returns:
A new table
"""
return type(self).from_table(domain, self)
@classmethod
def from_table_rows(cls, source, row_indices):
"""
Construct a new table by selecting rows from the source table.
:param source: an existing table
:type source: Orange.data.Table
:param row_indices: indices of the rows to include
:type row_indices: a slice or a sequence
:return: a new table
:rtype: Orange.data.Table
"""
self = cls()
self.domain = source.domain
self.X = source.X[row_indices]
if self.X.ndim == 1:
self.X = self.X.reshape(-1, len(self.domain.attributes))
self.Y = source._Y[row_indices]
self.metas = source.metas[row_indices]
if self.metas.ndim == 1:
self.metas = self.metas.reshape(-1, len(self.domain.metas))
self.W = source.W[row_indices]
self.name = getattr(source, "name", "")
self.ids = np.array(source.ids[row_indices])
self.attributes = getattr(source, "attributes", {})
return self
@classmethod
def from_numpy(cls, domain, X, Y=None, metas=None, W=None):
"""
Construct a table from numpy arrays with the given domain. The number
of variables in the domain must match the number of columns in the
corresponding arrays. All arrays must have the same number of rows.
Arrays may be of different numpy types, and may be dense or sparse.
:param domain: the domain for the new table
:type domain: Orange.data.Domain
:param X: array with attribute values
:type X: np.array
:param Y: array with class values
:type Y: np.array
:param metas: array with meta attributes
:type metas: np.array
:param W: array with weights
:type W: np.array
:return:
"""
X, Y, W = _check_arrays(X, Y, W, dtype="float64")
metas, = _check_arrays(metas, dtype=object)
if Y is not None and Y.ndim == 1:
Y = Y.reshape(Y.shape[0], 1)
if domain is None:
domain = Domain.from_numpy(X, Y, metas)
if Y is None:
if sp.issparse(X):
Y = np.empty((X.shape[0], 0), dtype=np.float64)
else:
Y = X[:, len(domain.attributes) :]
X = X[:, : len(domain.attributes)]
if metas is None:
metas = np.empty((X.shape[0], 0), object)
if W is None or W.size == 0:
W = np.empty((X.shape[0], 0))
else:
W = W.reshape(W.size)
if X.shape[1] != len(domain.attributes):
raise ValueError(
"Invalid number of variable columns ({} != {})".format(
X.shape[1], len(domain.attributes)
)
)
if Y.shape[1] != len(domain.class_vars):
raise ValueError(
"Invalid number of class columns ({} != {})".format(
Y.shape[1], len(domain.class_vars)
)
)
if metas.shape[1] != len(domain.metas):
raise ValueError(
"Invalid number of meta attribute columns ({} != {})".format(
metas.shape[1], len(domain.metas)
)
)
if not X.shape[0] == Y.shape[0] == metas.shape[0] == W.shape[0]:
raise ValueError("Parts of data contain different numbers of rows.")
self = cls()
self.domain = domain
self.X = X
self.Y = Y
self.metas = metas
self.W = W
self.n_rows = self.X.shape[0]
cls._init_ids(self)
self.attributes = {}
return self
@classmethod
def from_list(cls, domain, rows, weights=None):
if weights is not None and len(rows) != len(weights):
raise ValueError("mismatching number of instances and weights")
self = cls.from_domain(domain, len(rows), weights is not None)
attrs, classes = domain.attributes, domain.class_vars
metas = domain.metas
nattrs, ncls = len(domain.attributes), len(domain.class_vars)
for i, row in enumerate(rows):
if isinstance(row, Instance):
row = row.list
for j, (var, val) in enumerate(zip(attrs, row)):
self.X[i, j] = var.to_val(val)
for j, (var, val) in enumerate(zip(classes, row[nattrs:])):
self._Y[i, j] = var.to_val(val)
for j, (var, val) in enumerate(zip(metas, row[nattrs + ncls :])):
self.metas[i, j] = var.to_val(val)
if weights is not None:
self.W = np.array(weights)
return self
@classmethod
def _init_ids(cls, obj):
with cls._next_instance_lock:
obj.ids = np.array(
range(cls._next_instance_id, cls._next_instance_id + obj.X.shape[0])
)
cls._next_instance_id += obj.X.shape[0]
@classmethod
def new_id(cls):
with cls._next_instance_lock:
id = cls._next_instance_id
cls._next_instance_id += 1
return id
def save(self, filename):
"""
Save a data table to a file. The path can be absolute or relative.
:param filename: File name
:type filename: str
"""
ext = os.path.splitext(filename)[1]
from Orange.data.io import FileFormat
writer = FileFormat.writers.get(ext)
if not writer:
desc = FileFormat.names.get(ext)
if desc:
raise IOError("Writing of {}s is not supported".format(desc.lower()))
else:
raise IOError("Unknown file name extension.")
writer.write_file(filename, self)
@classmethod
def from_file(cls, filename, sheet=None):
"""
Read a data table from a file. The path can be absolute or relative.
:param filename: File name
:type filename: str
:param sheet: Sheet in a file (optional)
:type sheet: str
:return: a new data table
:rtype: Orange.data.Table
"""
from Orange.data.io import FileFormat
absolute_filename = FileFormat.locate(filename, dataset_dirs)
reader = FileFormat.get_reader(absolute_filename)
reader.select_sheet(sheet)
data = reader.read()
# Readers return plain table. Make sure to cast it to appropriate
# (subclass) type
if cls != data.__class__:
data = cls(data)
# no need to call _init_ids as fuctions from .io already
# construct a table with .ids
data.__file__ = absolute_filename
return data
@classmethod
def from_url(cls, url):
from Orange.data.io import UrlReader
reader = UrlReader(url)
data = reader.read()
if cls != data.__class__:
data = cls(data)
return data
# Helper function for __setitem__ and insert:
# Set the row of table data matrices
# noinspection PyProtectedMember
def _set_row(self, example, row):
domain = self.domain
if isinstance(example, Instance):
if example.domain == domain:
if isinstance(example, RowInstance):
self.X[row] = example._x
self._Y[row] = example._y
else:
self.X[row] = example._x
self._Y[row] = example._y
self.metas[row] = example._metas
return
self.X[row], self._Y[row], self.metas[row] = self.domain.convert(example)
try:
self.ids[row] = example.id
except:
with type(self)._next_instance_lock:
self.ids[row] = type(self)._next_instance_id
type(self)._next_instance_id += 1
else:
self.X[row] = [
var.to_val(val) for var, val in zip(domain.attributes, example)
]
self._Y[row] = [
var.to_val(val)
for var, val in zip(
domain.class_vars, example[len(domain.attributes) :]
)
]
self.metas[row] = np.array(
[var.Unknown for var in domain.metas], dtype=object
)
def _check_all_dense(self):
return all(
x in (Storage.DENSE, Storage.MISSING)
for x in (self.X_density(), self.Y_density(), self.metas_density())
)
# A helper function for extend and insert
# Resize X, Y, metas and W.
def _resize_all(self, new_length):
old_length = self.X.shape[0]
if old_length == new_length:
return
if not self._check_all_dense():
raise ValueError("Tables with sparse data cannot be resized")
try:
self.X.resize(new_length, self.X.shape[1])
self._Y.resize(new_length, self._Y.shape[1])
self.metas.resize(new_length, self.metas.shape[1])
if self.W.ndim == 2:
self.W.resize((new_length, 0))
else:
self.W.resize(new_length)
self.ids.resize(new_length)
except Exception:
if self.X.shape[0] == new_length:
self.X.resize(old_length, self.X.shape[1])
if self._Y.shape[0] == new_length:
self._Y.resize(old_length, self._Y.shape[1])
if self.metas.shape[0] == new_length:
self.metas.resize(old_length, self.metas.shape[1])
if self.W.shape[0] == new_length:
if self.W.ndim == 2:
self.W.resize((old_length, 0))
else:
self.W.resize(old_length)
if self.ids.shape[0] == new_length:
self.ids.resize(old_length)
raise
def __getitem__(self, key):
if isinstance(key, Integral):
return RowInstance(self, key)
if not isinstance(key, tuple):
return self.from_table_rows(self, key)
if len(key) != 2:
raise IndexError("Table indices must be one- or two-dimensional")
row_idx, col_idx = key
if isinstance(row_idx, Integral):
if isinstance(col_idx, (str, Integral, Variable)):
col_idx = self.domain.index(col_idx)
var = self.domain[col_idx]
if 0 <= col_idx < len(self.domain.attributes):
return Value(var, self.X[row_idx, col_idx])
elif col_idx >= len(self.domain.attributes):
return Value(
var, self._Y[row_idx, col_idx - len(self.domain.attributes)]
)
elif col_idx < 0:
return Value(var, self.metas[row_idx, -1 - col_idx])
else:
row_idx = [row_idx]
# multiple rows OR single row but multiple columns:
# construct a new table
attributes, col_indices = self.domain._compute_col_indices(col_idx)
if attributes is not None:
n_attrs = len(self.domain.attributes)
r_attrs = [
attributes[i] for i, col in enumerate(col_indices) if 0 <= col < n_attrs
]
r_classes = [
attributes[i] for i, col in enumerate(col_indices) if col >= n_attrs
]
r_metas = [attributes[i] for i, col in enumerate(col_indices) if col < 0]
domain = Domain(r_attrs, r_classes, r_metas)
else:
domain = self.domain
return self.from_table(domain, self, row_idx)
def __setitem__(self, key, value):
if not isinstance(key, tuple):
if isinstance(value, Real):
self.X[key, :] = value
return
self._set_row(value, key)
return
if len(key) != 2:
raise IndexError("Table indices must be one- or two-dimensional")
row_idx, col_idx = key
# single row
if isinstance(row_idx, Integral):
if isinstance(col_idx, slice):
col_idx = range(*slice.indices(col_idx, self.X.shape[1]))
if not isinstance(col_idx, str) and isinstance(col_idx, Iterable):
col_idx = list(col_idx)
if not isinstance(col_idx, str) and isinstance(col_idx, Sized):
if isinstance(value, (Sequence, np.ndarray)):
values = value
elif isinstance(value, Iterable):
values = list(value)
else:
raise TypeError(
"Setting multiple values requires a " "sequence or numpy array"
)
if len(values) != len(col_idx):
raise ValueError("Invalid number of values")
else:
col_idx, values = [col_idx], [value]
for value, col_idx in zip(values, col_idx):
if not isinstance(value, Integral):
value = self.domain[col_idx].to_val(value)
if not isinstance(col_idx, Integral):
col_idx = self.domain.index(col_idx)
if col_idx >= 0:
if col_idx < self.X.shape[1]:
self.X[row_idx, col_idx] = value
else:
self._Y[row_idx, col_idx - self.X.shape[1]] = value
else:
self.metas[row_idx, -1 - col_idx] = value
# multiple rows, multiple columns
attributes, col_indices = self.domain._compute_col_indices(col_idx)
if col_indices is ...:
col_indices = range(len(self.domain))
n_attrs = self.X.shape[1]
if isinstance(value, str):
if not attributes:
attributes = self.domain.attributes
for var, col in zip(attributes, col_indices):
if 0 <= col < n_attrs:
self.X[row_idx, col] = var.to_val(value)
elif col >= n_attrs:
self._Y[row_idx, col - n_attrs] = var.to_val(value)
else:
self.metas[row_idx, -1 - col] = var.to_val(value)
else:
attr_cols = np.fromiter(
(col for col in col_indices if 0 <= col < n_attrs), int
)
class_cols = np.fromiter(
(col - n_attrs for col in col_indices if col >= n_attrs), int
)
meta_cols = np.fromiter((-1 - col for col in col_indices if col < 0), int)
if value is None:
value = Unknown
if not isinstance(value, (Real, np.ndarray)) and (
len(attr_cols) or len(class_cols)
):
raise TypeError("Ordinary attributes can only have primitive values")
if len(attr_cols):
self.X[row_idx, attr_cols] = value
if len(class_cols):
self._Y[row_idx, class_cols] = value
if len(meta_cols):
self.metas[row_idx, meta_cols] = value
def __delitem__(self, key):
if not self._check_all_dense():
raise ValueError("Rows of sparse data cannot be deleted")
if key is ...:
key = range(len(self))
self.X = np.delete(self.X, key, axis=0)
self.Y = np.delete(self._Y, key, axis=0)
self.metas = np.delete(self.metas, key, axis=0)
self.W = np.delete(self.W, key, axis=0)
self.ids = np.delete(self.ids, key, axis=0)
def __len__(self):
return self.X.shape[0]
def __str__(self):
return "[" + ",\n ".join(str(ex) for ex in self) + "]"
def __repr__(self):
head = 5
if self.is_sparse():
head = min(self.X.shape[0], head)
s = "[" + ",\n ".join(repr(ex) for ex in self[:head])
if len(self) > head:
s += ",\n ..."
s += "\n]"
return s
def clear(self):
"""Remove all rows from the table."""
if not self._check_all_dense():
raise ValueError("Tables with sparse data cannot be cleared")
del self[...]
def append(self, instance):
"""
Append a data instance to the table.
:param instance: a data instance
:type instance: Orange.data.Instance or a sequence of values
"""
self.insert(len(self), instance)
def insert(self, row, instance):
"""
Insert a data instance into the table.
:param row: row index
:type row: int
:param instance: a data instance
:type instance: Orange.data.Instance or a sequence of values
"""
if row < 0:
row += len(self)
if row < 0 or row > len(self):
raise IndexError("Index out of range")
self.ensure_copy() # ensure that numpy arrays are single-segment for resize
self._resize_all(len(self) + 1)
if row < len(self):
self.X[row + 1 :] = self.X[row:-1]
self._Y[row + 1 :] = self._Y[row:-1]
self.metas[row + 1 :] = self.metas[row:-1]
self.W[row + 1 :] = self.W[row:-1]
self.ids[row + 1 :] = self.ids[row:-1]
try:
self._set_row(instance, row)
if self.W.shape[-1]:
self.W[row] = 1
except Exception:
self.X[row:-1] = self.X[row + 1 :]
self._Y[row:-1] = self._Y[row + 1 :]
self.metas[row:-1] = self.metas[row + 1 :]
self.W[row:-1] = self.W[row + 1 :]
self.ids[row:-1] = self.ids[row + 1 :]
self._resize_all(len(self) - 1)
raise
def extend(self, instances):
"""
Extend the table with the given instances. The instances can be given
as a table of the same or a different domain, or a sequence. In the
latter case, each instances can be given as
:obj:`~Orange.data.Instance` or a sequence of values (e.g. list,
tuple, numpy.array).
:param instances: additional instances
:type instances: Orange.data.Table or a sequence of instances
"""
if isinstance(instances, Table) and instances.domain == self.domain:
self.X = vstack((self.X, instances.X))
self._Y = vstack((self._Y, instances._Y))
self.metas = vstack((self.metas, instances.metas))
self.W = vstack((self.W, instances.W))
self.ids = hstack((self.ids, instances.ids))
else:
try:
old_length = len(self)
self._resize_all(old_length + len(instances))
for i, example in enumerate(instances):
self[old_length + i] = example
try:
self.ids[old_length + i] = example.id
except AttributeError:
self.ids[old_length + i] = self.new_id()
except Exception:
self._resize_all(old_length)
raise
@staticmethod
def concatenate(tables, axis=1):
"""Return concatenation of `tables` by `axis`."""
if not tables:
raise ValueError("need at least one table to concatenate")
if len(tables) == 1:
return tables[0].copy()
CONCAT_ROWS, CONCAT_COLS = 0, 1
if axis == CONCAT_ROWS:
table = tables[0].copy()
for t in tables[1:]:
table.extend(t)
return table
elif axis == CONCAT_COLS:
if reduce(
operator.iand,
(
set(
map(
operator.attrgetter("name"),
chain(t.domain.variables, t.domain.metas),
)
)
for t in tables
),
):
raise ValueError(
"Concatenating two domains with variables "
"with same name is undefined"
)
domain = Domain(
flatten(t.domain.attributes for t in tables),
flatten(t.domain.class_vars for t in tables),
flatten(t.domain.metas for t in tables),
)
def ndmin(A):
return A if A.ndim > 1 else A.reshape(A.shape[0], 1)
table = Table.from_numpy(
domain,
np.hstack(tuple(ndmin(t.X) for t in tables)),
np.hstack(tuple(ndmin(t.Y) for t in tables)),
np.hstack(tuple(ndmin(t.metas) for t in tables)),
np.hstack(tuple(ndmin(t.W) for t in tables)),
)
return table
raise ValueError("axis {} out of bounds [0, 2)".format(axis))
def is_view(self):
"""
Return `True` if all arrays represent a view referring to another table
"""
return (
(not self.X.shape[-1] or self.X.base is not None)
and (not self._Y.shape[-1] or self._Y.base is not None)
and (not self.metas.shape[-1] or self.metas.base is not None)
and (not self._weights.shape[-1] or self.W.base is not None)
)
def is_copy(self):
"""
Return `True` if the table owns its data
"""
return (
(not self.X.shape[-1] or self.X.base is None)
and (self._Y.base is None)
and (self.metas.base is None)
and (self.W.base is None)
)
def is_sparse(self):
"""
Return `True` if the table stores data in sparse format
"""
return any(sp.issparse(i) for i in [self.X, self.Y, self.metas])
def ensure_copy(self):
"""
Ensure that the table owns its data; copy arrays when necessary.
"""
def is_view(x):
# Sparse matrices don't have views like numpy arrays. Since indexing on
# them creates copies in constructor we can skip this check here.
return not sp.issparse(x) and x.base is not None
if is_view(self.X):
self.X = self.X.copy()
if is_view(self._Y):
self._Y = self._Y.copy()
if is_view(self.metas):
self.metas = self.metas.copy()
if is_view(self.W):
self.W = self.W.copy()
def copy(self):
"""
Return a copy of the table
"""
t = self.__class__(self)
t.ensure_copy()
return t
@staticmethod
def __determine_density(data):
if data is None:
return Storage.Missing
if data is not None and sp.issparse(data):
return Storage.SPARSE_BOOL if (data.data == 1).all() else Storage.SPARSE
else:
return Storage.DENSE
def X_density(self):
if not hasattr(self, "_X_density"):
self._X_density = self.__determine_density(self.X)
return self._X_density
def Y_density(self):
if not hasattr(self, "_Y_density"):
self._Y_density = self.__determine_density(self._Y)
return self._Y_density
def metas_density(self):
if not hasattr(self, "_metas_density"):
self._metas_density = self.__determine_density(self.metas)
return self._metas_density
def set_weights(self, weight=1):
"""
Set weights of data instances; create a vector of weights if necessary.
"""
if not self.W.shape[-1]:
self.W = np.empty(len(self))
self.W[:] = weight
def has_weights(self):
"""Return `True` if the data instances are weighed. """
return self.W.shape[-1] != 0
def total_weight(self):
"""
Return the total weight of instances in the table, or their number if
they are unweighted.
"""
if self.W.shape[-1]:
return sum(self.W)
return len(self)
def has_missing(self):
"""Return `True` if there are any missing attribute or class values."""
missing_x = not sp.issparse(self.X) and bn.anynan(
self.X
) # do not check for sparse X
return missing_x or bn.anynan(self._Y)
def has_missing_class(self):
"""Return `True` if there are any missing class values."""
return bn.anynan(self._Y)
def checksum(self, include_metas=True):
# TODO: zlib.adler32 does not work for numpy arrays with dtype object
# (after pickling and unpickling such arrays, checksum changes)
# Why, and should we fix it or remove it?
"""Return a checksum over X, Y, metas and W."""
cs = zlib.adler32(np.ascontiguousarray(self.X))
cs = zlib.adler32(np.ascontiguousarray(self._Y), cs)
if include_metas:
cs = zlib.adler32(np.ascontiguousarray(self.metas), cs)
cs = zlib.adler32(np.ascontiguousarray(self.W), cs)
return cs
def shuffle(self):
"""Randomly shuffle the rows of the table."""
if not self._check_all_dense():
raise ValueError("Rows of sparse data cannot be shuffled")
ind = np.arange(self.X.shape[0])
np.random.shuffle(ind)
self.X = self.X[ind]
self._Y = self._Y[ind]
self.metas = self.metas[ind]
self.W = self.W[ind]
def get_column_view(self, index):
"""
Return a vector - as a view, not a copy - with a column of the table,
and a bool flag telling whether this column is sparse. Note that
vertical slicing of sparse matrices is inefficient.
:param index: the index of the column
:type index: int, str or Orange.data.Variable
:return: (one-dimensional numpy array, sparse)
"""
def rx(M):
if sp.issparse(M):
return np.asarray(M.todense())[:, 0], True
else:
return M, False
if not isinstance(index, Integral):
index = self.domain.index(index)
if index >= 0:
if index < self.X.shape[1]:
return rx(self.X[:, index])
else:
return rx(self._Y[:, index - self.X.shape[1]])
else:
return rx(self.metas[:, -1 - index])
def _filter_is_defined(self, columns=None, negate=False):
if columns is None:
if sp.issparse(self.X):
remove = self.X.indptr[1:] != self.X.indptr[-1:] + self.X.shape[1]
else:
remove = bn.anynan(self.X, axis=1)
if sp.issparse(self._Y):
remove = np.logical_or(
remove, self._Y.indptr[1:] != self._Y.indptr[-1:] + self._Y.shape[1]
)
else:
remove = np.logical_or(remove, bn.anynan(self._Y, axis=1))
else:
remove = np.zeros(len(self), dtype=bool)
for column in columns:
col, sparse = self.get_column_view(column)
if sparse:
remove = np.logical_or(remove, col == 0)
else:
remove = np.logical_or(remove, bn.anynan([col], axis=0))
retain = remove if negate else np.logical_not(remove)
return self.from_table_rows(self, retain)
def _filter_has_class(self, negate=False):
if sp.issparse(self._Y):
if negate:
retain = self._Y.indptr[1:] != self._Y.indptr[-1:] + self._Y.shape[1]
else:
retain = self._Y.indptr[1:] == self._Y.indptr[-1:] + self._Y.shape[1]
else:
retain = bn.anynan(self._Y, axis=1)
if not negate:
retain = np.logical_not(retain)
return self.from_table_rows(self, retain)
def _filter_same_value(self, column, value, negate=False):
if not isinstance(value, Real):
value = self.domain[column].to_val(value)
sel = self.get_column_view(column)[0] == value
if negate:
sel = np.logical_not(sel)
return self.from_table_rows(self, sel)
def _filter_values(self, filter):
selection = self._values_filter_to_indicator(filter)
return self.from_table(self.domain, self, selection)
def _values_filter_to_indicator(self, filter):
"""Return selection of rows matching the filter conditions
Handles conjunction/disjunction and negate modifiers
Parameters
----------
filter: Values object containing the conditions
Returns
-------
A 1d bool array. len(result) == len(self)
"""
from Orange.data.filter import Values
if isinstance(filter, Values):
conditions = filter.conditions
conjunction = filter.conjunction
else:
conditions = [filter]
conjunction = True
if conjunction:
sel = np.ones(len(self), dtype=bool)
else:
sel = np.zeros(len(self), dtype=bool)
for f in conditions:
selection = self._filter_to_indicator(f)
if conjunction:
sel *= selection
else:
sel += selection
if filter.negate:
sel = ~sel
return sel
def _filter_to_indicator(self, filter):
"""Return selection of rows that match the condition.
Parameters
----------
filter: ValueFilter describing the condition
Returns
-------
A 1d bool array. len(result) == len(self)
"""
from Orange.data.filter import (
FilterContinuous,
FilterDiscrete,
FilterRegex,
FilterString,
FilterStringList,
Values,
)
if isinstance(filter, Values):
return self._values_filter_to_indicator(filter)
col = self.get_column_view(filter.column)[0]
if isinstance(filter, FilterDiscrete):
return self._discrete_filter_to_indicator(filter, col)
if isinstance(filter, FilterContinuous):
return self._continuous_filter_to_indicator(filter, col)
if isinstance(filter, FilterString):
return self._string_filter_to_indicator(filter, col)
if isinstance(filter, FilterStringList):
if not filter.case_sensitive:
col = np.char.lower(np.array(col, dtype=str))
vals = [val.lower() for val in filter.values]
else:
vals = filter.values
return reduce(operator.add, (col == val for val in vals))
if isinstance(filter, FilterRegex):
return np.vectorize(filter)(col)
raise TypeError("Invalid filter")
def _discrete_filter_to_indicator(self, filter, col):
"""Return selection of rows matched by the given discrete filter.
Parameters
----------
filter: FilterDiscrete
col: np.ndarray
Returns
-------
A 1d bool array. len(result) == len(self)
"""
if filter.values is None: # <- is defined filter
col = col.astype(float)
return ~np.isnan(col)
sel = np.zeros(len(self), dtype=bool)
for val in filter.values:
if not isinstance(val, Real):
val = self.domain[filter.column].to_val(val)
sel += col == val
return sel
def _continuous_filter_to_indicator(self, filter, col):
"""Return selection of rows matched by the given continuous filter.
Parameters
----------
filter: FilterContinuous
col: np.ndarray
Returns
-------
A 1d bool array. len(result) == len(self)
"""
if filter.oper == filter.IsDefined:
col = col.astype(float)
return ~np.isnan(col)
return self._range_filter_to_indicator(filter, col, filter.min, filter.max)
def _string_filter_to_indicator(self, filter, col):
"""Return selection of rows matched by the given string filter.
Parameters
----------
filter: FilterString
col: np.ndarray
Returns
-------
A 1d bool array. len(result) == len(self)
"""
if filter.oper == filter.IsDefined:
return col.astype(bool)
col = col.astype(str)
fmin = filter.min or ""
fmax = filter.max or ""
if not filter.case_sensitive:
# convert all to lower case
col = np.char.lower(col)
fmin = fmin.lower()
fmax = fmax.lower()
if filter.oper == filter.Contains:
return np.fromiter((fmin in e for e in col), dtype=bool)
if filter.oper == filter.StartsWith:
return np.fromiter((e.startswith(fmin) for e in col), dtype=bool)
if filter.oper == filter.EndsWith:
return np.fromiter((e.endswith(fmin) for e in col), dtype=bool)
return self._range_filter_to_indicator(filter, col, fmin, fmax)
@staticmethod
def _range_filter_to_indicator(filter, col, fmin, fmax):
if filter.oper == filter.Equal:
return col == fmin
if filter.oper == filter.NotEqual:
return col != fmin
if filter.oper == filter.Less:
return col < fmin
if filter.oper == filter.LessEqual:
return col <= fmin
if filter.oper == filter.Greater:
return col > fmin
if filter.oper == filter.GreaterEqual:
return col >= fmin
if filter.oper == filter.Between:
return (col >= fmin) * (col <= fmax)
if filter.oper == filter.Outside:
return (col < fmin) + (col > fmax)
raise TypeError("Invalid operator")
def _compute_basic_stats(
self, columns=None, include_metas=False, compute_variance=False
):
if compute_variance:
raise NotImplementedError(
"computation of variance is " "not implemented yet"
)
W = self.W if self.has_weights() else None
rr = []
stats = []
if not columns:
if self.domain.attributes:
rr.append(fast_stats(self.X, W))
if self.domain.class_vars:
rr.append(fast_stats(self._Y, W))
if include_metas and self.domain.metas:
rr.append(fast_stats(self.metas, W))
if len(rr):
stats = np.vstack(tuple(rr))
else:
columns = [self.domain.index(c) for c in columns]
nattrs = len(self.domain.attributes)
Xs = any(0 <= c < nattrs for c in columns) and fast_stats(self.X, W)
Ys = any(c >= nattrs for c in columns) and fast_stats(self._Y, W)
ms = any(c < 0 for c in columns) and fast_stats(self.metas, W)
for column in columns:
if 0 <= column < nattrs:
stats.append(Xs[column, :])
elif column >= nattrs:
stats.append(Ys[column - nattrs, :])
else:
stats.append(ms[-1 - column])
return stats
def _compute_distributions(self, columns=None):
if columns is None:
columns = range(len(self.domain.variables))
else:
columns = [self.domain.index(var) for var in columns]
distributions = []
if sp.issparse(self.X):
self.X = self.X.tocsc()
W = self.W.ravel() if self.has_weights() else None
for col in columns:
variable = self.domain[col]
# Select the correct data column from X, Y or metas
if 0 <= col < self.X.shape[1]:
x = self.X[:, col]
elif col < 0:
x = self.metas[:, col * (-1) - 1]
if np.issubdtype(x.dtype, np.dtype(object)):
x = x.astype(float)
else:
x = self._Y[:, col - self.X.shape[1]]
if variable.is_discrete:
dist, unknowns = bincount(
x, weights=W, max_val=len(variable.values) - 1
)
elif not x.shape[0]:
dist, unknowns = np.zeros((2, 0)), 0
else:
if W is not None:
if sp.issparse(x):
arg_sort = np.argsort(x.data)
ranks = x.indices[arg_sort]
vals = np.vstack((x.data[arg_sort], W[ranks]))
else:
ranks = np.argsort(x)
vals = np.vstack((x[ranks], W[ranks]))
else:
x_values = x.data if sp.issparse(x) else x
vals = np.ones((2, x_values.shape[0]))
vals[0, :] = x_values
vals[0, :].sort()
dist = np.array(_valuecount.valuecount(vals))
# If sparse, then 0s will not be counted with `valuecount`, so
# we have to add them to the result manually.
if sp.issparse(x) and sparse_has_implicit_zeros(x):
if W is not None:
zero_weights = sparse_implicit_zero_weights(x, W).sum()
else:
zero_weights = sparse_count_implicit_zeros(x)
zero_vec = [0, zero_weights]
dist = np.insert(
dist, np.searchsorted(dist[0], 0), zero_vec, axis=1
)
# Since `countnans` assumes vector shape to be (1, n) and `x`
# shape is (n, 1), we pass the transpose
unknowns = countnans(x.T, W)
distributions.append((dist, unknowns))
return distributions
def _compute_contingency(self, col_vars=None, row_var=None):
n_atts = self.X.shape[1]
if col_vars is None:
col_vars = range(len(self.domain.variables))
else:
col_vars = [self.domain.index(var) for var in col_vars]
if row_var is None:
row_var = self.domain.class_var
if row_var is None:
raise ValueError("No row variable")
row_desc = self.domain[row_var]
if not row_desc.is_discrete:
raise TypeError("Row variable must be discrete")
row_indi = self.domain.index(row_var)
n_rows = len(row_desc.values)
if 0 <= row_indi < n_atts:
row_data = self.X[:, row_indi]
elif row_indi < 0:
row_data = self.metas[:, -1 - row_indi]
else:
row_data = self._Y[:, row_indi - n_atts]
W = self.W if self.has_weights() else None
nan_inds = None
col_desc = [self.domain[var] for var in col_vars]
col_indi = [self.domain.index(var) for var in col_vars]
if any(not (var.is_discrete or var.is_continuous) for var in col_desc):
raise ValueError(
"contingency can be computed only for discrete " "and continuous values"
)
if row_data.dtype.kind != "f": # meta attributes can be stored as type object
row_data = row_data.astype(float)
unknown_rows = countnans(row_data)
if unknown_rows:
nan_inds = np.isnan(row_data)
row_data = row_data[~nan_inds]
if W:
W = W[~nan_inds]
unknown_rows = np.sum(W[nan_inds])
contingencies = [None] * len(col_desc)
for arr, f_cond, f_ind in (
(self.X, lambda i: 0 <= i < n_atts, lambda i: i),
(self._Y, lambda i: i >= n_atts, lambda i: i - n_atts),
(self.metas, lambda i: i < 0, lambda i: -1 - i),
):
if nan_inds is not None:
arr = arr[~nan_inds]
arr_indi = [e for e, ind in enumerate(col_indi) if f_cond(ind)]
vars = [(e, f_ind(col_indi[e]), col_desc[e]) for e in arr_indi]
disc_vars = [v for v in vars if v[2].is_discrete]
if disc_vars:
if sp.issparse(arr):
max_vals = max(len(v[2].values) for v in disc_vars)
disc_indi = {i for _, i, _ in disc_vars}
mask = [i in disc_indi for i in range(arr.shape[1])]
conts, nans = contingency(
arr, row_data, max_vals - 1, n_rows - 1, W, mask
)
for col_i, arr_i, var in disc_vars:
n_vals = len(var.values)
contingencies[col_i] = (conts[arr_i][:, :n_vals], nans[arr_i])
else:
for col_i, arr_i, var in disc_vars:
contingencies[col_i] = contingency(
arr[:, arr_i].astype(float),
row_data,
len(var.values) - 1,
n_rows - 1,
W,
)
cont_vars = [v for v in vars if v[2].is_continuous]
if cont_vars:
classes = row_data.astype(dtype=np.intp)
if W is not None:
W = W.astype(dtype=np.float64)
if sp.issparse(arr):
arr = sp.csc_matrix(arr)
for col_i, arr_i, _ in cont_vars:
if sp.issparse(arr):
col_data = arr.data[arr.indptr[arr_i] : arr.indptr[arr_i + 1]]
rows = arr.indices[arr.indptr[arr_i] : arr.indptr[arr_i + 1]]
W_ = None if W is None else W[rows]
classes_ = classes[rows]
else:
col_data, W_, classes_ = arr[:, arr_i], W, classes
col_data = col_data.astype(dtype=np.float64)
U, C, unknown = _contingency.contingency_floatarray(
col_data, classes_, n_rows, W_
)
contingencies[col_i] = ([U, C], unknown)
return contingencies, unknown_rows
@classmethod
def transpose(
cls,
table,
feature_names_column="",
meta_attr_name="Feature name",
feature_name="Feature",
):
"""
Transpose the table.
:param table: Table - table to transpose
:param feature_names_column: str - name of (String) meta attribute to
use for feature names
:param meta_attr_name: str - name of new meta attribute into which
feature names are mapped
:return: Table - transposed table
"""
self = cls()
n_cols, self.n_rows = table.X.shape
old_domain = table.attributes.get("old_domain")
# attributes
# - classes and metas to attributes of attributes
# - arbitrary meta column to feature names
self.X = table.X.T
attributes = (
[ContinuousVariable(str(row[feature_names_column])) for row in table]
if feature_names_column
else [
ContinuousVariable(
feature_name
+ " "
+ str(i + 1).zfill(int(np.ceil(np.log10(n_cols))))
)
for i in range(n_cols)
]
)
if old_domain is not None and feature_names_column:
for i, _ in enumerate(attributes):
if attributes[i].name in old_domain:
var = old_domain[attributes[i].name]
attr = (
ContinuousVariable(var.name)
if var.is_continuous
else DiscreteVariable(var.name, var.values)
)
attr.attributes = var.attributes.copy()
attributes[i] = attr
def set_attributes_of_attributes(_vars, _table):
for i, variable in enumerate(_vars):
if variable.name == feature_names_column:
continue
for j, row in enumerate(_table):
value = (
variable.repr_val(row)
if np.isscalar(row)
else row[i]
if isinstance(row[i], str)
else variable.repr_val(row[i])
)
if value not in MISSING_VALUES:
attributes[j].attributes[variable.name] = value
set_attributes_of_attributes(table.domain.class_vars, table.Y)
set_attributes_of_attributes(table.domain.metas, table.metas)
# weights
self.W = np.empty((self.n_rows, 0))
def get_table_from_attributes_of_attributes(_vars, _dtype=float):
T = np.empty((self.n_rows, len(_vars)), dtype=_dtype)
for i, _attr in enumerate(table.domain.attributes):
for j, _var in enumerate(_vars):
val = str(_attr.attributes.get(_var.name, ""))
if not _var.is_string:
val = (
np.nan
if val in MISSING_VALUES
else _var.values.index(val)
if _var.is_discrete
else float(val)
)
T[i, j] = val
return T
# class_vars - attributes of attributes to class - from old domain
class_vars = []
if old_domain is not None:
class_vars = old_domain.class_vars
self.Y = get_table_from_attributes_of_attributes(class_vars)
# metas
# - feature names and attributes of attributes to metas
self.metas, metas = np.empty((self.n_rows, 0), dtype=object), []
if (
meta_attr_name not in [m.name for m in table.domain.metas]
and table.domain.attributes
):
self.metas = np.array(
[[a.name] for a in table.domain.attributes], dtype=object
)
metas.append(StringVariable(meta_attr_name))
names = chain.from_iterable(
list(attr.attributes) for attr in table.domain.attributes
)
names = sorted(set(names) - {var.name for var in class_vars})
def guessed_var(i, var_name):
orig_vals = M[:, i]
val_map, vals, var_type = Orange.data.io.guess_data_type(orig_vals)
values, variable = Orange.data.io.sanitize_variable(
val_map, vals, orig_vals, var_type, {}, _metas, None, var_name
)
M[:, i] = values
return variable
_metas = [StringVariable(n) for n in names]
if old_domain is not None:
_metas = [m for m in old_domain.metas if m.name != meta_attr_name]
M = get_table_from_attributes_of_attributes(_metas, _dtype=object)
if old_domain is None:
_metas = [guessed_var(i, m.name) for i, m in enumerate(_metas)]
if _metas:
self.metas = np.hstack((self.metas, M))
metas.extend(_metas)
self.domain = Domain(attributes, class_vars, metas)
cls._init_ids(self)
self.attributes = table.attributes.copy()
self.attributes["old_domain"] = table.domain
return self
def to_sparse(self, sparse_attributes=True, sparse_class=False, sparse_metas=False):
def sparsify(features):
for f in features:
f.sparse = True
new_domain = self.domain.copy()
if sparse_attributes:
sparsify(new_domain.attributes)
if sparse_class:
sparsify(new_domain.class_vars)
if sparse_metas:
sparsify(new_domain.metas)
return self.transform(new_domain)
def to_dense(self, dense_attributes=True, dense_class=True, dense_metas=True):
def densify(features):
for f in features:
f.sparse = False
new_domain = self.domain.copy()
if dense_attributes:
densify(new_domain.attributes)
if dense_class:
densify(new_domain.class_vars)
if dense_metas:
densify(new_domain.metas)
t = self.transform(new_domain)
t.ids = self.ids # preserve indices
return t
def _check_arrays(*arrays, dtype=None):
checked = []
if not len(arrays):
return checked
def ninstances(array):
if hasattr(array, "shape"):
return array.shape[0]
else:
return len(array) if array is not None else 0
shape_1 = ninstances(arrays[0])
for array in arrays:
if array is None:
checked.append(array)
continue
if ninstances(array) != shape_1:
raise ValueError(
"Leading dimension mismatch (%d != %d)" % (ninstances(array), shape_1)
)
if sp.issparse(array):
array.data = np.asarray(array.data)
has_inf = _check_inf(array.data)
else:
if dtype is not None:
array = np.asarray(array, dtype=dtype)
else:
array = np.asarray(array)
has_inf = _check_inf(array)
if has_inf:
raise ValueError("Array contains infinity.")
checked.append(array)
return checked
def _check_inf(array):
return array.dtype.char in np.typecodes["AllFloat"] and np.isinf(array.data).any()
def _subarray(arr, rows, cols):
rows = _optimize_indices(rows, arr.shape[0])
cols = _optimize_indices(cols, arr.shape[1])
return arr[_rxc_ix(rows, cols)]
def _optimize_indices(indices, maxlen):
"""
Convert integer indices to slice if possible. It only converts increasing
integer ranges with positive steps and valid starts and ends.
Only convert valid ends so that invalid ranges will still raise
an exception.
Allows numpy to reuse the data array, because it defaults to copying
if given indices.
Parameters
----------
indices : 1D sequence, slice or Ellipsis
"""
if isinstance(indices, slice):
return indices
if indices is ...:
return slice(None, None, 1)
if len(indices) >= 1:
indices = np.asarray(indices)
if indices.dtype != np.bool:
begin = indices[0]
end = indices[-1]
steps = np.diff(indices) if len(indices) > 1 else np.array([1])
step = steps[0]
# continuous ranges with constant step and valid start and stop index can be slices
if np.all(steps == step) and step > 0 and begin >= 0 and end < maxlen:
return slice(begin, end + step, step)
return indices
def _rxc_ix(rows, cols):
"""
Construct an index object to index the `rows` x `cols` cross product.
Rows and columns can be a 1d bool or int sequence, or a slice.
The later is a convenience and is interpreted the same
as `slice(None, None, -1)`
Parameters
----------
rows : 1D sequence, slice
Row indices.
cols : 1D sequence, slice
Column indices.
See Also
--------
numpy.ix_
Examples
--------
>>> import numpy as np
>>> a = np.arange(10).reshape(2, 5)
>>> a[_rxc_ix([0, 1], [3, 4])]
array([[3, 4],
[8, 9]])
>>> a[_rxc_ix([False, True], slice(None, None, 1))]
array([[5, 6, 7, 8, 9]])
"""
isslice = (isinstance(rows, slice), isinstance(cols, slice))
if isslice == (True, True):
return rows, cols
elif isslice == (True, False):
return rows, np.asarray(np.ix_(cols), int).ravel()
elif isslice == (False, True):
return np.asarray(np.ix_(rows), int).ravel(), cols
else:
r, c = np.ix_(rows, cols)
return np.asarray(r, int), np.asarray(c, int)
def assure_domain_conversion_sparsity(target, source):
"""
Assure that the table obeys the domain conversion's suggestions about sparsity.
Args:
target (Table): the target table.
source (Table): the source table.
Returns:
Table: with fixed sparsity. The sparsity is set as it is recommended by domain conversion
for transformation from source to the target domain.
"""
conversion = target.domain.get_conversion(source.domain)
match_density = [assure_array_dense, assure_array_sparse]
target.X = match_density[conversion.sparse_X](target.X)
target.Y = match_density[conversion.sparse_Y](target.Y)
target.metas = match_density[conversion.sparse_metas](target.metas)
return target
| en | 0.681363 | # import for io.py Domain conversion cache used in Table.from_table. It is global so that chaining of domain conversions also works with caching even with descendants of Table. Construct a data instance representing the given row of the table. # Make sure to stop printing variables if we limit the output # noinspection PyPep8Naming A class whose attributes contain attribute descriptors for columns. For a table `table`, setting `c = table.columns` will allow accessing the table's variables with, for instance `c.gender`, `c.age` ets. Spaces are replaced with underscores. # So subclasses can expect to call super without breakage; noop Construct a new `Table` with the given number of rows for the given domain. The optional vector of weights is initialized to 1's. :param domain: domain for the `Table` :type domain: Orange.data.Domain :param n_rows: number of rows in the new table :type n_rows: int :param weights: indicates whether to construct a vector of weights :type weights: bool :return: a new table :rtype: Orange.data.Table Create a new table from selected columns and/or rows of an existing one. The columns are chosen using a domain. The domain may also include variables that do not appear in the source table; they are computed from source variables if possible. The resulting data may be a view or a copy of the existing data. :param domain: the domain for the new table :type domain: Orange.data.Domain :param source: the source table :type source: Orange.data.Table :param row_indices: indices of the rows to include :type row_indices: a slice or a sequence :return: a new table :rtype: Orange.data.Table # match density for subarrays # initialize final array & set `match_density` for columns # assure resulting domain is the instance passed on input # since sparse flags are not considered when checking for # domain equality, fix manually. Construct a table with a different domain. The new table keeps the row ids and other information. If the table is a subclass of :obj:`Table`, the resulting table will be of the same type. In a typical scenario, an existing table is augmented with a new column by :: domain = Domain(old_domain.attributes + [new_attribute], old_domain.class_vars, old_domain.metas) table = data.transform(domain) table[:, new_attribute] = new_column Args: domain (Domain): new domain Returns: A new table Construct a new table by selecting rows from the source table. :param source: an existing table :type source: Orange.data.Table :param row_indices: indices of the rows to include :type row_indices: a slice or a sequence :return: a new table :rtype: Orange.data.Table Construct a table from numpy arrays with the given domain. The number of variables in the domain must match the number of columns in the corresponding arrays. All arrays must have the same number of rows. Arrays may be of different numpy types, and may be dense or sparse. :param domain: the domain for the new table :type domain: Orange.data.Domain :param X: array with attribute values :type X: np.array :param Y: array with class values :type Y: np.array :param metas: array with meta attributes :type metas: np.array :param W: array with weights :type W: np.array :return: Save a data table to a file. The path can be absolute or relative. :param filename: File name :type filename: str Read a data table from a file. The path can be absolute or relative. :param filename: File name :type filename: str :param sheet: Sheet in a file (optional) :type sheet: str :return: a new data table :rtype: Orange.data.Table # Readers return plain table. Make sure to cast it to appropriate # (subclass) type # no need to call _init_ids as fuctions from .io already # construct a table with .ids # Helper function for __setitem__ and insert: # Set the row of table data matrices # noinspection PyProtectedMember # A helper function for extend and insert # Resize X, Y, metas and W. # multiple rows OR single row but multiple columns: # construct a new table # single row # multiple rows, multiple columns Remove all rows from the table. Append a data instance to the table. :param instance: a data instance :type instance: Orange.data.Instance or a sequence of values Insert a data instance into the table. :param row: row index :type row: int :param instance: a data instance :type instance: Orange.data.Instance or a sequence of values # ensure that numpy arrays are single-segment for resize Extend the table with the given instances. The instances can be given as a table of the same or a different domain, or a sequence. In the latter case, each instances can be given as :obj:`~Orange.data.Instance` or a sequence of values (e.g. list, tuple, numpy.array). :param instances: additional instances :type instances: Orange.data.Table or a sequence of instances Return concatenation of `tables` by `axis`. Return `True` if all arrays represent a view referring to another table Return `True` if the table owns its data Return `True` if the table stores data in sparse format Ensure that the table owns its data; copy arrays when necessary. # Sparse matrices don't have views like numpy arrays. Since indexing on # them creates copies in constructor we can skip this check here. Return a copy of the table Set weights of data instances; create a vector of weights if necessary. Return `True` if the data instances are weighed. Return the total weight of instances in the table, or their number if they are unweighted. Return `True` if there are any missing attribute or class values. # do not check for sparse X Return `True` if there are any missing class values. # TODO: zlib.adler32 does not work for numpy arrays with dtype object # (after pickling and unpickling such arrays, checksum changes) # Why, and should we fix it or remove it? Return a checksum over X, Y, metas and W. Randomly shuffle the rows of the table. Return a vector - as a view, not a copy - with a column of the table, and a bool flag telling whether this column is sparse. Note that vertical slicing of sparse matrices is inefficient. :param index: the index of the column :type index: int, str or Orange.data.Variable :return: (one-dimensional numpy array, sparse) Return selection of rows matching the filter conditions Handles conjunction/disjunction and negate modifiers Parameters ---------- filter: Values object containing the conditions Returns ------- A 1d bool array. len(result) == len(self) Return selection of rows that match the condition. Parameters ---------- filter: ValueFilter describing the condition Returns ------- A 1d bool array. len(result) == len(self) Return selection of rows matched by the given discrete filter. Parameters ---------- filter: FilterDiscrete col: np.ndarray Returns ------- A 1d bool array. len(result) == len(self) # <- is defined filter Return selection of rows matched by the given continuous filter. Parameters ---------- filter: FilterContinuous col: np.ndarray Returns ------- A 1d bool array. len(result) == len(self) Return selection of rows matched by the given string filter. Parameters ---------- filter: FilterString col: np.ndarray Returns ------- A 1d bool array. len(result) == len(self) # convert all to lower case # Select the correct data column from X, Y or metas # If sparse, then 0s will not be counted with `valuecount`, so # we have to add them to the result manually. # Since `countnans` assumes vector shape to be (1, n) and `x` # shape is (n, 1), we pass the transpose # meta attributes can be stored as type object Transpose the table. :param table: Table - table to transpose :param feature_names_column: str - name of (String) meta attribute to use for feature names :param meta_attr_name: str - name of new meta attribute into which feature names are mapped :return: Table - transposed table # attributes # - classes and metas to attributes of attributes # - arbitrary meta column to feature names # weights # class_vars - attributes of attributes to class - from old domain # metas # - feature names and attributes of attributes to metas # preserve indices Convert integer indices to slice if possible. It only converts increasing integer ranges with positive steps and valid starts and ends. Only convert valid ends so that invalid ranges will still raise an exception. Allows numpy to reuse the data array, because it defaults to copying if given indices. Parameters ---------- indices : 1D sequence, slice or Ellipsis # continuous ranges with constant step and valid start and stop index can be slices Construct an index object to index the `rows` x `cols` cross product. Rows and columns can be a 1d bool or int sequence, or a slice. The later is a convenience and is interpreted the same as `slice(None, None, -1)` Parameters ---------- rows : 1D sequence, slice Row indices. cols : 1D sequence, slice Column indices. See Also -------- numpy.ix_ Examples -------- >>> import numpy as np >>> a = np.arange(10).reshape(2, 5) >>> a[_rxc_ix([0, 1], [3, 4])] array([[3, 4], [8, 9]]) >>> a[_rxc_ix([False, True], slice(None, None, 1))] array([[5, 6, 7, 8, 9]]) Assure that the table obeys the domain conversion's suggestions about sparsity. Args: target (Table): the target table. source (Table): the source table. Returns: Table: with fixed sparsity. The sparsity is set as it is recommended by domain conversion for transformation from source to the target domain. | 2.14162 | 2 |
examples/main_simple-vit.py | khuongnd/pagi | 0 | 6631641 | import os
from datetime import datetime
from typing import Any, Generator, Mapping, Tuple
import dataget
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
from tensorboardX.writer import SummaryWriter
import typer
import optax
import einops
import elegy
class ViT(elegy.Module):
"""Standard LeNet-300-100 MLP network."""
def __init__(
self,
size: int,
num_layers: int,
num_heads: int,
dropout: float,
**kwargs,
):
super().__init__(**kwargs)
self.size = size
self.num_layers = num_layers
self.num_heads = num_heads
self.dropout = dropout
def call(self, x: jnp.ndarray):
batch_size = x.shape[0]
# normalize data
x = x.astype(jnp.float32) / 255.0
# make patch embeddings
x = einops.rearrange(
x, "batch (h1 h2) (w1 w2) -> batch (h1 w1) (h2 w2)", h2=7, w2=7
)
x = elegy.nn.Linear(self.size)(x)
# add predict token
predict_token = jnp.zeros(shape=[batch_size, 1, self.size])
x = jnp.concatenate([predict_token, x], axis=1)
# create positional embeddings
positional_embeddings = self.add_parameter(
"positional_embeddings",
lambda: elegy.initializers.TruncatedNormal()(x.shape[-2:], jnp.float32),
)
positional_embeddings = einops.repeat(
positional_embeddings,
"... -> batch ...",
batch=batch_size,
)
# add positional embeddings
x = x + positional_embeddings
# apply N transformers encoder layers
x = elegy.nn.transformers.TransformerEncoder(
lambda: elegy.nn.transformers.TransformerEncoderLayer(
head_size=self.size,
num_heads=self.num_heads,
dropout=self.dropout,
),
num_layers=self.num_layers,
)(x)
# get predict output token
x = x[:, 0]
# apply predict head
logits = elegy.nn.Linear(10)(x)
return logits
def main(
debug: bool = False,
eager: bool = False,
logdir: str = "runs",
steps_per_epoch: int = 200,
batch_size: int = 64,
epochs: int = 100,
size: int = 32,
num_layers: int = 3,
num_heads: int = 8,
dropout: float = 0.0,
):
if debug:
import debugpy
print("Waiting for debugger...")
debugpy.listen(5678)
debugpy.wait_for_client()
current_time = datetime.now().strftime("%b%d_%H-%M-%S")
logdir = os.path.join(logdir, current_time)
X_train, y_train, X_test, y_test = dataget.image.mnist(global_cache=True).get()
print("X_train:", X_train.shape, X_train.dtype)
print("y_train:", y_train.shape, y_train.dtype)
print("X_test:", X_test.shape, X_test.dtype)
print("y_test:", y_test.shape, y_test.dtype)
model = elegy.Model(
module=ViT(
size=size,
num_layers=num_layers,
num_heads=num_heads,
dropout=dropout,
),
loss=[
elegy.losses.SparseCategoricalCrossentropy(from_logits=True),
# elegy.regularizers.GlobalL2(l=1e-4),
],
metrics=elegy.metrics.SparseCategoricalAccuracy(),
optimizer=optax.adamw(1e-3),
run_eagerly=eager,
)
model.init(X_train, y_train)
model.summary(X_train[:64])
history = model.fit(
x=X_train,
y=y_train,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
batch_size=batch_size,
validation_data=(X_test, y_test),
shuffle=True,
callbacks=[elegy.callbacks.TensorBoard(logdir=logdir)],
)
elegy.utils.plot_history(history)
# get random samples
idxs = np.random.randint(0, 10000, size=(9,))
x_sample = X_test[idxs]
# get predictions
y_pred = model.predict(x=x_sample)
# plot and save results
with SummaryWriter(os.path.join(logdir, "val")) as tbwriter:
figure = plt.figure(figsize=(12, 12))
for i in range(3):
for j in range(3):
k = 3 * i + j
plt.subplot(3, 3, k + 1)
plt.title(f"{np.argmax(y_pred[k])}")
plt.imshow(x_sample[k], cmap="gray")
# tbwriter.add_figure("Predictions", figure, 100)
plt.show()
print(
"\n\n\nMetrics and images can be explored using tensorboard using:",
f"\n \t\t\t tensorboard --logdir {logdir}",
)
if __name__ == "__main__":
typer.run(main) | import os
from datetime import datetime
from typing import Any, Generator, Mapping, Tuple
import dataget
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
from tensorboardX.writer import SummaryWriter
import typer
import optax
import einops
import elegy
class ViT(elegy.Module):
"""Standard LeNet-300-100 MLP network."""
def __init__(
self,
size: int,
num_layers: int,
num_heads: int,
dropout: float,
**kwargs,
):
super().__init__(**kwargs)
self.size = size
self.num_layers = num_layers
self.num_heads = num_heads
self.dropout = dropout
def call(self, x: jnp.ndarray):
batch_size = x.shape[0]
# normalize data
x = x.astype(jnp.float32) / 255.0
# make patch embeddings
x = einops.rearrange(
x, "batch (h1 h2) (w1 w2) -> batch (h1 w1) (h2 w2)", h2=7, w2=7
)
x = elegy.nn.Linear(self.size)(x)
# add predict token
predict_token = jnp.zeros(shape=[batch_size, 1, self.size])
x = jnp.concatenate([predict_token, x], axis=1)
# create positional embeddings
positional_embeddings = self.add_parameter(
"positional_embeddings",
lambda: elegy.initializers.TruncatedNormal()(x.shape[-2:], jnp.float32),
)
positional_embeddings = einops.repeat(
positional_embeddings,
"... -> batch ...",
batch=batch_size,
)
# add positional embeddings
x = x + positional_embeddings
# apply N transformers encoder layers
x = elegy.nn.transformers.TransformerEncoder(
lambda: elegy.nn.transformers.TransformerEncoderLayer(
head_size=self.size,
num_heads=self.num_heads,
dropout=self.dropout,
),
num_layers=self.num_layers,
)(x)
# get predict output token
x = x[:, 0]
# apply predict head
logits = elegy.nn.Linear(10)(x)
return logits
def main(
debug: bool = False,
eager: bool = False,
logdir: str = "runs",
steps_per_epoch: int = 200,
batch_size: int = 64,
epochs: int = 100,
size: int = 32,
num_layers: int = 3,
num_heads: int = 8,
dropout: float = 0.0,
):
if debug:
import debugpy
print("Waiting for debugger...")
debugpy.listen(5678)
debugpy.wait_for_client()
current_time = datetime.now().strftime("%b%d_%H-%M-%S")
logdir = os.path.join(logdir, current_time)
X_train, y_train, X_test, y_test = dataget.image.mnist(global_cache=True).get()
print("X_train:", X_train.shape, X_train.dtype)
print("y_train:", y_train.shape, y_train.dtype)
print("X_test:", X_test.shape, X_test.dtype)
print("y_test:", y_test.shape, y_test.dtype)
model = elegy.Model(
module=ViT(
size=size,
num_layers=num_layers,
num_heads=num_heads,
dropout=dropout,
),
loss=[
elegy.losses.SparseCategoricalCrossentropy(from_logits=True),
# elegy.regularizers.GlobalL2(l=1e-4),
],
metrics=elegy.metrics.SparseCategoricalAccuracy(),
optimizer=optax.adamw(1e-3),
run_eagerly=eager,
)
model.init(X_train, y_train)
model.summary(X_train[:64])
history = model.fit(
x=X_train,
y=y_train,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
batch_size=batch_size,
validation_data=(X_test, y_test),
shuffle=True,
callbacks=[elegy.callbacks.TensorBoard(logdir=logdir)],
)
elegy.utils.plot_history(history)
# get random samples
idxs = np.random.randint(0, 10000, size=(9,))
x_sample = X_test[idxs]
# get predictions
y_pred = model.predict(x=x_sample)
# plot and save results
with SummaryWriter(os.path.join(logdir, "val")) as tbwriter:
figure = plt.figure(figsize=(12, 12))
for i in range(3):
for j in range(3):
k = 3 * i + j
plt.subplot(3, 3, k + 1)
plt.title(f"{np.argmax(y_pred[k])}")
plt.imshow(x_sample[k], cmap="gray")
# tbwriter.add_figure("Predictions", figure, 100)
plt.show()
print(
"\n\n\nMetrics and images can be explored using tensorboard using:",
f"\n \t\t\t tensorboard --logdir {logdir}",
)
if __name__ == "__main__":
typer.run(main) | en | 0.4616 | Standard LeNet-300-100 MLP network. # normalize data # make patch embeddings # add predict token # create positional embeddings # add positional embeddings # apply N transformers encoder layers # get predict output token # apply predict head # elegy.regularizers.GlobalL2(l=1e-4), # get random samples # get predictions # plot and save results # tbwriter.add_figure("Predictions", figure, 100) | 2.028142 | 2 |
ip/models/prefix.py | xUndero/noc | 0 | 6631642 | <filename>ip/models/prefix.py
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Prefix model
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
import operator
from threading import Lock
from collections import defaultdict
# Third-party modules
import six
from django.db import models, connection
import cachetools
# NOC modules
from noc.config import config
from noc.core.model.base import NOCModel
from noc.aaa.models.user import User
from noc.project.models.project import Project
from noc.peer.models.asn import AS
from noc.vc.models.vc import VC
from noc.core.model.fields import TagsField, CIDRField, DocumentReferenceField, CachedForeignKey
from noc.lib.app.site import site
from noc.core.validators import check_ipv4_prefix, check_ipv6_prefix, ValidationError
from noc.core.ip import IP, IPv4
from noc.main.models.textindex import full_text_search
from noc.core.translation import ugettext as _
from noc.core.wf.decorator import workflow
from noc.core.model.decorator import on_delete_check
from noc.wf.models.state import State
from noc.core.datastream.decorator import datastream
from .vrf import VRF
from .afi import AFI_CHOICES
from .prefixprofile import PrefixProfile
id_lock = Lock()
@full_text_search
@workflow
@datastream
@on_delete_check(
ignore=[
("ip.PrefixBookmark", "prefix"),
("ip.Prefix", "parent"),
("ip.Prefix", "ipv6_transition"),
("ip.Address", "prefix"),
]
)
@six.python_2_unicode_compatible
class Prefix(NOCModel):
"""
Allocated prefix
"""
class Meta(object):
verbose_name = _("Prefix")
verbose_name_plural = _("Prefixes")
db_table = "ip_prefix"
app_label = "ip"
unique_together = [("vrf", "afi", "prefix")]
parent = models.ForeignKey(
"self",
related_name="children_set",
verbose_name=_("Parent"),
null=True,
blank=True,
on_delete=models.CASCADE,
)
vrf = CachedForeignKey(
VRF, verbose_name=_("VRF"), default=VRF.get_global, on_delete=models.CASCADE
)
afi = models.CharField(_("Address Family"), max_length=1, choices=AFI_CHOICES)
prefix = CIDRField(_("Prefix"))
name = models.CharField(_("Name"), max_length=255, null=True, blank=True)
profile = DocumentReferenceField(PrefixProfile, null=False, blank=False)
asn = CachedForeignKey(
AS,
verbose_name=_("AS"),
help_text=_("Autonomous system granted with prefix"),
null=True,
blank=True,
on_delete=models.CASCADE,
)
project = CachedForeignKey(
Project,
verbose_name="Project",
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="prefix_set",
)
vc = models.ForeignKey(
VC,
verbose_name=_("VC"),
null=True,
blank=True,
on_delete=models.SET_NULL,
help_text=_("VC bound to prefix"),
)
description = models.TextField(_("Description"), blank=True, null=True)
tags = TagsField("Tags", null=True, blank=True)
tt = models.IntegerField("TT", blank=True, null=True, help_text=_("Ticket #"))
state = DocumentReferenceField(State, null=True, blank=True)
allocated_till = models.DateField(
_("Allocated till"),
null=True,
blank=True,
help_text=_("Prefix temporary allocated till the date"),
)
ipv6_transition = models.OneToOneField(
"self",
related_name="ipv4_transition",
null=True,
blank=True,
limit_choices_to={"afi": "6"},
on_delete=models.SET_NULL,
)
prefix_discovery_policy = models.CharField(
_("Prefix Discovery Policy"),
max_length=1,
choices=[("P", "Profile"), ("E", "Enable"), ("D", "Disable")],
default="P",
blank=False,
null=False,
)
address_discovery_policy = models.CharField(
_("Address Discovery Policy"),
max_length=1,
choices=[("P", "Profile"), ("E", "Enable"), ("D", "Disable")],
default="P",
blank=False,
null=False,
)
source = models.CharField(
"Source",
max_length=1,
choices=[("M", "Manual"), ("i", "Interface"), ("w", "Whois"), ("n", "Neighbor")],
null=False,
blank=False,
default="M",
)
csv_ignored_fields = ["parent"]
_id_cache = cachetools.TTLCache(maxsize=1000, ttl=60)
def __str__(self):
return "%s(%s): %s" % (self.vrf.name, self.afi, self.prefix)
@classmethod
@cachetools.cachedmethod(operator.attrgetter("_id_cache"), lock=lambda _: id_lock)
def get_by_id(cls, id):
mo = Prefix.objects.filter(id=id)[:1]
if mo:
return mo[0]
return None
def get_absolute_url(self):
return site.reverse("ip:ipam:vrf_index", self.vrf.id, self.afi, self.prefix)
@property
def has_transition(self):
"""
Check prefix has ipv4/ipv6 transition
:return:
"""
if self.is_ipv4:
return bool(self.ipv6_transition)
else:
try:
# pylint: disable=pointless-statement
self.ipv4_transition # noqa
return True
except Prefix.DoesNotExist:
return False
@classmethod
def get_parent(cls, vrf, afi, prefix):
"""
Get nearest closing prefix
"""
r = Prefix.objects.filter(vrf=vrf, afi=str(afi)).extra(
select={"masklen": "masklen(prefix)"},
where=["prefix >> %s"],
params=[str(prefix)],
order_by=["-masklen"],
)[:1]
if r:
return r[0]
return None
@property
def is_ipv4(self):
return self.afi == "4"
@property
def is_ipv6(self):
return self.afi == "6"
@property
def is_root(self):
"""
Returns true if the prefix is a root of VRF
"""
return (self.is_ipv4 and self.prefix == "0.0.0.0/0") or (
self.is_ipv6 and self.prefix == "::/0"
)
def clean(self):
"""
Field validation
"""
super(Prefix, self).clean()
# Set defaults
self.afi = "6" if ":" in self.prefix else "4"
# Check prefix is of AFI type
if self.is_ipv4:
check_ipv4_prefix(self.prefix)
elif self.is_ipv6:
check_ipv6_prefix(self.prefix)
# Set defaults
if not self.vrf:
self.vrf = VRF.get_global()
if not self.is_root:
# Set proper parent
self.parent = Prefix.get_parent(self.vrf, self.afi, self.prefix)
# Check root prefix have no parent
if self.is_root and self.parent:
raise ValidationError("Root prefix cannot have parent")
def iter_changed_datastream(self, changed_fields=None):
if config.datastream.enable_prefix:
yield "prefix", self.id
def save(self, *args, **kwargs):
"""
Save prefix
"""
self.clean()
super(Prefix, self).save(*args, **kwargs)
# Rebuild tree if necessary
# Reconnect children children prefixes
c = connection.cursor()
c.execute(
"""
UPDATE %s
SET parent_id=%%s
WHERE
vrf_id=%%s
AND afi=%%s
AND prefix << %%s
AND parent_id=%%s
"""
% Prefix._meta.db_table,
[self.id, self.vrf.id, self.afi, self.prefix, self.parent.id if self.parent else None],
)
# Reconnect children addresses
c.execute(
"""
UPDATE %s
SET prefix_id=%%s
WHERE
prefix_id=%%s
AND address << %%s
"""
% Address._meta.db_table,
[self.id, self.parent.id if self.parent else None, self.prefix],
)
def delete(self, *args, **kwargs):
"""
Delete prefix
"""
if self.is_root and not getattr(self, "_disable_delete_protection", False):
raise ValidationError("Cannot delete root prefix")
# Reconnect children prefixes
self.children_set.update(parent=self.parent)
# Reconnect children addresses
self.address_set.update(prefix=self.parent)
# Unlink dual-stack allocations
# self.clear_transition()
# Remove bookmarks
self.prefixbookmark_set.all().delete()
# Finally delete
super(Prefix, self).delete(*args, **kwargs)
def delete_recursive(self):
"""
Delete prefix and all descendancies
"""
# Unlink dual-stack allocations
# self.clear_transition()
# Recursive delete
# Get nested prefixes
ids = (
Prefix.objects.filter(vrf=self.vrf, afi=self.afi)
.extra(where=["prefix <<= %s"], params=[self.prefix])
.values_list("id", flat=True)
)
#
# Delete nested addresses
Address.objects.filter(prefix__in=ids).delete()
# Delete nested prefixes
Prefix.objects.filter(id__in=ids).delete()
# Delete permissions
PrefixAccess.objects.filter(vrf=self.vrf, afi=self.afi).extra(
where=["prefix <<= %s"], params=[self.prefix]
)
@property
def maintainers(self):
"""
List of persons having write access
@todo: PostgreSQL-independent implementation
"""
return User.objects.raw(
"""
SELECT id,username,first_name,last_name
FROM %s u
WHERE
is_active=TRUE
AND
(is_superuser=TRUE
OR
EXISTS(SELECT id
FROM %s a
WHERE
user_id=u.id
AND vrf_id=%%s
AND afi=%%s
AND prefix>>=%%s
AND can_change=TRUE
))
ORDER BY username
"""
% (User._meta.db_table, PrefixAccess._meta.db_table),
[self.vrf.id, self.afi, self.prefix],
)
@property
def short_description(self):
"""
Returns first line of description
:return:
"""
if self.description:
return self.description.split("\n", 1)[0].strip()
return ""
@property
def netmask(self):
"""
returns Netmask for IPv4
:return:
"""
if self.is_ipv4:
return IPv4(self.prefix).netmask.address
return None
@property
def broadcast(self):
"""
Returns Broadcast for IPv4
:return:
"""
if self.is_ipv4:
return IPv4(self.prefix).last.address
return None
@property
def wildcard(self):
"""
Returns Cisco wildcard for IPv4
:return:
"""
if self.is_ipv4:
return IPv4(self.prefix).wildcard.address
return ""
@property
def size(self):
"""
Returns IPv4 prefix size
:return:
"""
if self.is_ipv4:
return IPv4(self.prefix).size
return None
def can_view(self, user):
"""
Returns True if user has view access
:param user:
:return:
"""
return PrefixAccess.user_can_view(user, self.vrf, self.afi, self.prefix)
def can_change(self, user):
"""
Returns True if user has change access
:param user:
:return:
"""
return PrefixAccess.user_can_change(user, self.vrf, self.afi, self.prefix)
def has_bookmark(self, user):
"""
Check the user has bookmark on prefix
:param user:
:return:
"""
from .prefixbookmark import PrefixBookmark # noqa
try:
PrefixBookmark.objects.get(user=user, prefix=self)
return True
except PrefixBookmark.DoesNotExist:
return False
def toggle_bookmark(self, user):
"""
Toggle user bookmark. Returns new bookmark state
:param user:
:return:
"""
from .prefixbookmark import PrefixBookmark # noqa
b, created = PrefixBookmark.objects.get_or_create(user=user, prefix=self)
if created:
return True
b.delete()
return False
def get_index(self):
"""
Full-text search
"""
content = [self.prefix]
card = "Prefix %s" % self.prefix
if self.description:
content += [self.description]
card += " (%s)" % self.description
r = {
"id": "ip.prefix:%s" % self.id,
"title": self.prefix,
"content": "\n".join(content),
"card": card,
}
if self.tags:
r["tags"] = self.tags
return r
@classmethod
def get_search_result_url(cls, obj_id):
return "/api/card/view/prefix/%s/" % obj_id
def get_path(self):
return (
Prefix.objects.filter(vrf=self.vrf, afi=self.afi)
.extra(where=["prefix >>= %s"], params=[self.prefix])
.order_by("prefix")
.values_list("id", flat=True)
)
@property
def address_ranges(self):
"""
All prefix-related address ranges
:return:
"""
return list(
AddressRange.objects.raw(
"""
SELECT *
FROM ip_addressrange
WHERE
vrf_id=%s
AND afi=%s
AND is_active=TRUE
AND
(
from_address << %s
OR to_address << %s
OR %s BETWEEN from_address AND to_address
)
ORDER BY from_address, to_address
""",
[self.vrf.id, self.afi, self.prefix, self.prefix, self.prefix],
)
)
def rebase(self, vrf, new_prefix):
"""
Rebase prefix to a new location
:param vrf:
:param new_prefix:
:return:
"""
#
b = IP.prefix(self.prefix)
nb = IP.prefix(new_prefix)
# Validation
if vrf == self.vrf and self.prefix == new_prefix:
raise ValueError("Cannot rebase to self")
if b.afi != nb.afi:
raise ValueError("Cannot change address family during rebase")
if b.mask < nb.mask:
raise ValueError("Cannot rebase to prefix of lesser size")
# Rebase prefix and all nested prefixes
# Parents are left untouched
for p in Prefix.objects.filter(vrf=self.vrf, afi=self.afi).extra(
where=["prefix <<= %s"], params=[self.prefix]
):
np = IP.prefix(p.prefix).rebase(b, nb).prefix
# Prefix.objects.filter(pk=p.pk).update(prefix=np, vrf=vrf)
p.prefix = np
p.vrf = vrf
p.save() # Raise events
# Rebase addresses
# Parents are left untouched
for a in Address.objects.filter(vrf=self.vrf, afi=self.afi).extra(
where=["address <<= %s"], params=[self.prefix]
):
na = IP.prefix(a.address).rebase(b, nb).address
# Address.objects.filter(pk=a.pk).update(address=na, vrf=vrf)
a.address = na
a.vrf = vrf
a.save() # Raise events
# Rebase permissions
# move all permissions to the nested blocks
for pa in PrefixAccess.objects.filter(vrf=self.vrf).extra(
where=["prefix <<= %s"], params=[self.prefix]
):
np = IP.prefix(pa.prefix).rebase(b, nb).prefix
PrefixAccess.objects.filter(pk=pa.pk).update(prefix=np, vrf=vrf)
# create permissions for covered blocks
for pa in PrefixAccess.objects.filter(vrf=self.vrf).extra(
where=["prefix >> %s"], params=[self.prefix]
):
PrefixAccess(
user=pa.user,
vrf=vrf,
afi=pa.afi,
prefix=new_prefix,
can_view=pa.can_view,
can_change=pa.can_change,
).save()
# @todo: Rebase bookmarks
# @todo: Update caches
# Return rebased prefix
return Prefix.objects.get(pk=self.pk) # Updated object
@property
def nested_prefix_set(self):
"""
Queryset returning all nested prefixes inside the prefix
"""
return Prefix.objects.filter(vrf=self.vrf, afi=self.afi).extra(
where=["prefix <<= %s"], params=[self.prefix]
)
@property
def nested_address_set(self):
"""
Queryset returning all nested addresses inside the prefix
"""
return Address.objects.filter(vrf=self.vrf, afi=self.afi).extra(
where=["address <<= %s"], params=[self.prefix]
)
def iter_free(self):
"""
Generator returning all available free prefixes inside
:return:
"""
for fp in IP.prefix(self.prefix).iter_free([p.prefix for p in self.children_set.all()]):
yield str(fp)
@property
def effective_address_discovery(self):
if self.address_discovery_policy == "P":
return self.profile.address_discovery_policy
return self.address_discovery_policy
@property
def effective_prefix_discovery(self):
if self.prefix_discovery_policy == "P":
return self.profile.prefix_discovery_policy
return self.prefix_discovery_policy
@property
def effective_prefix_special_address(self):
return self.profile.prefix_special_address_policy
@property
def usage(self):
if self.is_ipv4:
usage = getattr(self, "_usage_cache", None)
if usage is not None:
# Use update_prefixes_usage results
return usage
size = IPv4(self.prefix).size
if not size:
return 100.0
n_ips = Address.objects.filter(prefix=self).count()
if n_ips and size > 2 and self.effective_prefix_special_address == "X":
# Exclude special addresses
size -= len(IPv4(self.prefix).special_addresses)
n_pfx = sum(
IPv4(p).size
for p in Prefix.objects.filter(parent=self)
.only("prefix")
.values_list("prefix", flat=True)
)
return float(n_ips + n_pfx) * 100.0 / float(size)
return None
@property
def usage_percent(self):
u = self.usage
if u is None:
return ""
return "%.2f%%" % u
@staticmethod
def update_prefixes_usage(prefixes):
"""
Bulk calculate and update prefixes usages
:param prefixes: List of Prefix instances
:return:
"""
# Filter IPv4 only
ipv4_prefixes = [p for p in prefixes if p.is_ipv4]
# Calculate nested prefixes
usage = defaultdict(int)
address_usage = defaultdict(int)
for parent, prefix in Prefix.objects.filter(parent__in=ipv4_prefixes).values_list(
"parent", "prefix"
):
ln = int(prefix.split("/")[1])
usage[parent] += 2 ** (32 - ln)
# Calculate nested addresses
has_address = set()
for parent, count in (
Address.objects.filter(prefix__in=ipv4_prefixes)
.values("prefix")
.annotate(count=models.Count("prefix"))
.values_list("prefix", "count")
):
usage[parent] += count
has_address.add(parent)
address_usage[parent] += count
# Update usage cache
for p in ipv4_prefixes:
ln = int(p.prefix.split("/")[1])
size = 2 ** (32 - ln)
if p.id in has_address and size > 2: # Not /31 or /32
if p.effective_prefix_special_address == "X":
size -= 2 # Exclude broadcast and network
p._address_usage_cache = float(address_usage[p.id]) * 100.0 / float(size)
p._usage_cache = float(usage[p.id]) * 100.0 / float(size)
@property
def address_usage(self):
if self.is_ipv4:
usage = getattr(self, "_address_usage_cache", None)
if usage is not None:
# Use update_prefixes_usage results
return usage
size = IPv4(self.prefix).size
if not size:
return 100.0
n_ips = (
Address.objects.filter(vrf=self.vrf, afi=self.afi)
.extra(where=["address <<= %s"], params=[str(self.prefix)])
.count()
)
if self.effective_prefix_special_address == "X":
n_pfx = (
Prefix.objects.filter(vrf=self.vrf, afi=self.afi)
.extra(where=["prefix <<= %s"], params=[str(self.prefix)])
.count()
)
size -= len(IPv4(self.prefix).special_addresses) * n_pfx
return float(n_ips) * 100.0 / float(size) if n_ips else 0.0
else:
return None
@property
def address_usage_percent(self):
u = self.address_usage
if u is None:
return ""
return "%.2f%%" % u
def is_empty(self):
"""
Check prefix is empty and does not contain nested prefixes
and addresses
:return:
"""
if Prefix.objects.filter(parent=self).count() > 0:
return False
if Address.objects.filter(prefix=self).count() > 0:
return False
return True
def disable_delete_protection(self):
"""
Disable root delete protection
:return:
"""
self._disable_delete_protection = True
def get_effective_as(self):
"""
Return effective AS (first found upwards)
:return: AS instance or None
"""
if self.asn:
return self.asn
if not self.parent:
return None
return self.parent.get_effective_as()
# Avoid circular references
from .address import Address
from .prefixaccess import PrefixAccess
from .addressrange import AddressRange
| <filename>ip/models/prefix.py
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Prefix model
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
import operator
from threading import Lock
from collections import defaultdict
# Third-party modules
import six
from django.db import models, connection
import cachetools
# NOC modules
from noc.config import config
from noc.core.model.base import NOCModel
from noc.aaa.models.user import User
from noc.project.models.project import Project
from noc.peer.models.asn import AS
from noc.vc.models.vc import VC
from noc.core.model.fields import TagsField, CIDRField, DocumentReferenceField, CachedForeignKey
from noc.lib.app.site import site
from noc.core.validators import check_ipv4_prefix, check_ipv6_prefix, ValidationError
from noc.core.ip import IP, IPv4
from noc.main.models.textindex import full_text_search
from noc.core.translation import ugettext as _
from noc.core.wf.decorator import workflow
from noc.core.model.decorator import on_delete_check
from noc.wf.models.state import State
from noc.core.datastream.decorator import datastream
from .vrf import VRF
from .afi import AFI_CHOICES
from .prefixprofile import PrefixProfile
id_lock = Lock()
@full_text_search
@workflow
@datastream
@on_delete_check(
ignore=[
("ip.PrefixBookmark", "prefix"),
("ip.Prefix", "parent"),
("ip.Prefix", "ipv6_transition"),
("ip.Address", "prefix"),
]
)
@six.python_2_unicode_compatible
class Prefix(NOCModel):
"""
Allocated prefix
"""
class Meta(object):
verbose_name = _("Prefix")
verbose_name_plural = _("Prefixes")
db_table = "ip_prefix"
app_label = "ip"
unique_together = [("vrf", "afi", "prefix")]
parent = models.ForeignKey(
"self",
related_name="children_set",
verbose_name=_("Parent"),
null=True,
blank=True,
on_delete=models.CASCADE,
)
vrf = CachedForeignKey(
VRF, verbose_name=_("VRF"), default=VRF.get_global, on_delete=models.CASCADE
)
afi = models.CharField(_("Address Family"), max_length=1, choices=AFI_CHOICES)
prefix = CIDRField(_("Prefix"))
name = models.CharField(_("Name"), max_length=255, null=True, blank=True)
profile = DocumentReferenceField(PrefixProfile, null=False, blank=False)
asn = CachedForeignKey(
AS,
verbose_name=_("AS"),
help_text=_("Autonomous system granted with prefix"),
null=True,
blank=True,
on_delete=models.CASCADE,
)
project = CachedForeignKey(
Project,
verbose_name="Project",
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="prefix_set",
)
vc = models.ForeignKey(
VC,
verbose_name=_("VC"),
null=True,
blank=True,
on_delete=models.SET_NULL,
help_text=_("VC bound to prefix"),
)
description = models.TextField(_("Description"), blank=True, null=True)
tags = TagsField("Tags", null=True, blank=True)
tt = models.IntegerField("TT", blank=True, null=True, help_text=_("Ticket #"))
state = DocumentReferenceField(State, null=True, blank=True)
allocated_till = models.DateField(
_("Allocated till"),
null=True,
blank=True,
help_text=_("Prefix temporary allocated till the date"),
)
ipv6_transition = models.OneToOneField(
"self",
related_name="ipv4_transition",
null=True,
blank=True,
limit_choices_to={"afi": "6"},
on_delete=models.SET_NULL,
)
prefix_discovery_policy = models.CharField(
_("Prefix Discovery Policy"),
max_length=1,
choices=[("P", "Profile"), ("E", "Enable"), ("D", "Disable")],
default="P",
blank=False,
null=False,
)
address_discovery_policy = models.CharField(
_("Address Discovery Policy"),
max_length=1,
choices=[("P", "Profile"), ("E", "Enable"), ("D", "Disable")],
default="P",
blank=False,
null=False,
)
source = models.CharField(
"Source",
max_length=1,
choices=[("M", "Manual"), ("i", "Interface"), ("w", "Whois"), ("n", "Neighbor")],
null=False,
blank=False,
default="M",
)
csv_ignored_fields = ["parent"]
_id_cache = cachetools.TTLCache(maxsize=1000, ttl=60)
def __str__(self):
return "%s(%s): %s" % (self.vrf.name, self.afi, self.prefix)
@classmethod
@cachetools.cachedmethod(operator.attrgetter("_id_cache"), lock=lambda _: id_lock)
def get_by_id(cls, id):
mo = Prefix.objects.filter(id=id)[:1]
if mo:
return mo[0]
return None
def get_absolute_url(self):
return site.reverse("ip:ipam:vrf_index", self.vrf.id, self.afi, self.prefix)
@property
def has_transition(self):
"""
Check prefix has ipv4/ipv6 transition
:return:
"""
if self.is_ipv4:
return bool(self.ipv6_transition)
else:
try:
# pylint: disable=pointless-statement
self.ipv4_transition # noqa
return True
except Prefix.DoesNotExist:
return False
@classmethod
def get_parent(cls, vrf, afi, prefix):
"""
Get nearest closing prefix
"""
r = Prefix.objects.filter(vrf=vrf, afi=str(afi)).extra(
select={"masklen": "masklen(prefix)"},
where=["prefix >> %s"],
params=[str(prefix)],
order_by=["-masklen"],
)[:1]
if r:
return r[0]
return None
@property
def is_ipv4(self):
return self.afi == "4"
@property
def is_ipv6(self):
return self.afi == "6"
@property
def is_root(self):
"""
Returns true if the prefix is a root of VRF
"""
return (self.is_ipv4 and self.prefix == "0.0.0.0/0") or (
self.is_ipv6 and self.prefix == "::/0"
)
def clean(self):
"""
Field validation
"""
super(Prefix, self).clean()
# Set defaults
self.afi = "6" if ":" in self.prefix else "4"
# Check prefix is of AFI type
if self.is_ipv4:
check_ipv4_prefix(self.prefix)
elif self.is_ipv6:
check_ipv6_prefix(self.prefix)
# Set defaults
if not self.vrf:
self.vrf = VRF.get_global()
if not self.is_root:
# Set proper parent
self.parent = Prefix.get_parent(self.vrf, self.afi, self.prefix)
# Check root prefix have no parent
if self.is_root and self.parent:
raise ValidationError("Root prefix cannot have parent")
def iter_changed_datastream(self, changed_fields=None):
if config.datastream.enable_prefix:
yield "prefix", self.id
def save(self, *args, **kwargs):
"""
Save prefix
"""
self.clean()
super(Prefix, self).save(*args, **kwargs)
# Rebuild tree if necessary
# Reconnect children children prefixes
c = connection.cursor()
c.execute(
"""
UPDATE %s
SET parent_id=%%s
WHERE
vrf_id=%%s
AND afi=%%s
AND prefix << %%s
AND parent_id=%%s
"""
% Prefix._meta.db_table,
[self.id, self.vrf.id, self.afi, self.prefix, self.parent.id if self.parent else None],
)
# Reconnect children addresses
c.execute(
"""
UPDATE %s
SET prefix_id=%%s
WHERE
prefix_id=%%s
AND address << %%s
"""
% Address._meta.db_table,
[self.id, self.parent.id if self.parent else None, self.prefix],
)
def delete(self, *args, **kwargs):
"""
Delete prefix
"""
if self.is_root and not getattr(self, "_disable_delete_protection", False):
raise ValidationError("Cannot delete root prefix")
# Reconnect children prefixes
self.children_set.update(parent=self.parent)
# Reconnect children addresses
self.address_set.update(prefix=self.parent)
# Unlink dual-stack allocations
# self.clear_transition()
# Remove bookmarks
self.prefixbookmark_set.all().delete()
# Finally delete
super(Prefix, self).delete(*args, **kwargs)
def delete_recursive(self):
"""
Delete prefix and all descendancies
"""
# Unlink dual-stack allocations
# self.clear_transition()
# Recursive delete
# Get nested prefixes
ids = (
Prefix.objects.filter(vrf=self.vrf, afi=self.afi)
.extra(where=["prefix <<= %s"], params=[self.prefix])
.values_list("id", flat=True)
)
#
# Delete nested addresses
Address.objects.filter(prefix__in=ids).delete()
# Delete nested prefixes
Prefix.objects.filter(id__in=ids).delete()
# Delete permissions
PrefixAccess.objects.filter(vrf=self.vrf, afi=self.afi).extra(
where=["prefix <<= %s"], params=[self.prefix]
)
@property
def maintainers(self):
"""
List of persons having write access
@todo: PostgreSQL-independent implementation
"""
return User.objects.raw(
"""
SELECT id,username,first_name,last_name
FROM %s u
WHERE
is_active=TRUE
AND
(is_superuser=TRUE
OR
EXISTS(SELECT id
FROM %s a
WHERE
user_id=u.id
AND vrf_id=%%s
AND afi=%%s
AND prefix>>=%%s
AND can_change=TRUE
))
ORDER BY username
"""
% (User._meta.db_table, PrefixAccess._meta.db_table),
[self.vrf.id, self.afi, self.prefix],
)
@property
def short_description(self):
"""
Returns first line of description
:return:
"""
if self.description:
return self.description.split("\n", 1)[0].strip()
return ""
@property
def netmask(self):
"""
returns Netmask for IPv4
:return:
"""
if self.is_ipv4:
return IPv4(self.prefix).netmask.address
return None
@property
def broadcast(self):
"""
Returns Broadcast for IPv4
:return:
"""
if self.is_ipv4:
return IPv4(self.prefix).last.address
return None
@property
def wildcard(self):
"""
Returns Cisco wildcard for IPv4
:return:
"""
if self.is_ipv4:
return IPv4(self.prefix).wildcard.address
return ""
@property
def size(self):
"""
Returns IPv4 prefix size
:return:
"""
if self.is_ipv4:
return IPv4(self.prefix).size
return None
def can_view(self, user):
"""
Returns True if user has view access
:param user:
:return:
"""
return PrefixAccess.user_can_view(user, self.vrf, self.afi, self.prefix)
def can_change(self, user):
"""
Returns True if user has change access
:param user:
:return:
"""
return PrefixAccess.user_can_change(user, self.vrf, self.afi, self.prefix)
def has_bookmark(self, user):
"""
Check the user has bookmark on prefix
:param user:
:return:
"""
from .prefixbookmark import PrefixBookmark # noqa
try:
PrefixBookmark.objects.get(user=user, prefix=self)
return True
except PrefixBookmark.DoesNotExist:
return False
def toggle_bookmark(self, user):
"""
Toggle user bookmark. Returns new bookmark state
:param user:
:return:
"""
from .prefixbookmark import PrefixBookmark # noqa
b, created = PrefixBookmark.objects.get_or_create(user=user, prefix=self)
if created:
return True
b.delete()
return False
def get_index(self):
"""
Full-text search
"""
content = [self.prefix]
card = "Prefix %s" % self.prefix
if self.description:
content += [self.description]
card += " (%s)" % self.description
r = {
"id": "ip.prefix:%s" % self.id,
"title": self.prefix,
"content": "\n".join(content),
"card": card,
}
if self.tags:
r["tags"] = self.tags
return r
@classmethod
def get_search_result_url(cls, obj_id):
return "/api/card/view/prefix/%s/" % obj_id
def get_path(self):
return (
Prefix.objects.filter(vrf=self.vrf, afi=self.afi)
.extra(where=["prefix >>= %s"], params=[self.prefix])
.order_by("prefix")
.values_list("id", flat=True)
)
@property
def address_ranges(self):
"""
All prefix-related address ranges
:return:
"""
return list(
AddressRange.objects.raw(
"""
SELECT *
FROM ip_addressrange
WHERE
vrf_id=%s
AND afi=%s
AND is_active=TRUE
AND
(
from_address << %s
OR to_address << %s
OR %s BETWEEN from_address AND to_address
)
ORDER BY from_address, to_address
""",
[self.vrf.id, self.afi, self.prefix, self.prefix, self.prefix],
)
)
def rebase(self, vrf, new_prefix):
"""
Rebase prefix to a new location
:param vrf:
:param new_prefix:
:return:
"""
#
b = IP.prefix(self.prefix)
nb = IP.prefix(new_prefix)
# Validation
if vrf == self.vrf and self.prefix == new_prefix:
raise ValueError("Cannot rebase to self")
if b.afi != nb.afi:
raise ValueError("Cannot change address family during rebase")
if b.mask < nb.mask:
raise ValueError("Cannot rebase to prefix of lesser size")
# Rebase prefix and all nested prefixes
# Parents are left untouched
for p in Prefix.objects.filter(vrf=self.vrf, afi=self.afi).extra(
where=["prefix <<= %s"], params=[self.prefix]
):
np = IP.prefix(p.prefix).rebase(b, nb).prefix
# Prefix.objects.filter(pk=p.pk).update(prefix=np, vrf=vrf)
p.prefix = np
p.vrf = vrf
p.save() # Raise events
# Rebase addresses
# Parents are left untouched
for a in Address.objects.filter(vrf=self.vrf, afi=self.afi).extra(
where=["address <<= %s"], params=[self.prefix]
):
na = IP.prefix(a.address).rebase(b, nb).address
# Address.objects.filter(pk=a.pk).update(address=na, vrf=vrf)
a.address = na
a.vrf = vrf
a.save() # Raise events
# Rebase permissions
# move all permissions to the nested blocks
for pa in PrefixAccess.objects.filter(vrf=self.vrf).extra(
where=["prefix <<= %s"], params=[self.prefix]
):
np = IP.prefix(pa.prefix).rebase(b, nb).prefix
PrefixAccess.objects.filter(pk=pa.pk).update(prefix=np, vrf=vrf)
# create permissions for covered blocks
for pa in PrefixAccess.objects.filter(vrf=self.vrf).extra(
where=["prefix >> %s"], params=[self.prefix]
):
PrefixAccess(
user=pa.user,
vrf=vrf,
afi=pa.afi,
prefix=new_prefix,
can_view=pa.can_view,
can_change=pa.can_change,
).save()
# @todo: Rebase bookmarks
# @todo: Update caches
# Return rebased prefix
return Prefix.objects.get(pk=self.pk) # Updated object
@property
def nested_prefix_set(self):
"""
Queryset returning all nested prefixes inside the prefix
"""
return Prefix.objects.filter(vrf=self.vrf, afi=self.afi).extra(
where=["prefix <<= %s"], params=[self.prefix]
)
@property
def nested_address_set(self):
"""
Queryset returning all nested addresses inside the prefix
"""
return Address.objects.filter(vrf=self.vrf, afi=self.afi).extra(
where=["address <<= %s"], params=[self.prefix]
)
def iter_free(self):
"""
Generator returning all available free prefixes inside
:return:
"""
for fp in IP.prefix(self.prefix).iter_free([p.prefix for p in self.children_set.all()]):
yield str(fp)
@property
def effective_address_discovery(self):
if self.address_discovery_policy == "P":
return self.profile.address_discovery_policy
return self.address_discovery_policy
@property
def effective_prefix_discovery(self):
if self.prefix_discovery_policy == "P":
return self.profile.prefix_discovery_policy
return self.prefix_discovery_policy
@property
def effective_prefix_special_address(self):
return self.profile.prefix_special_address_policy
@property
def usage(self):
if self.is_ipv4:
usage = getattr(self, "_usage_cache", None)
if usage is not None:
# Use update_prefixes_usage results
return usage
size = IPv4(self.prefix).size
if not size:
return 100.0
n_ips = Address.objects.filter(prefix=self).count()
if n_ips and size > 2 and self.effective_prefix_special_address == "X":
# Exclude special addresses
size -= len(IPv4(self.prefix).special_addresses)
n_pfx = sum(
IPv4(p).size
for p in Prefix.objects.filter(parent=self)
.only("prefix")
.values_list("prefix", flat=True)
)
return float(n_ips + n_pfx) * 100.0 / float(size)
return None
@property
def usage_percent(self):
u = self.usage
if u is None:
return ""
return "%.2f%%" % u
@staticmethod
def update_prefixes_usage(prefixes):
"""
Bulk calculate and update prefixes usages
:param prefixes: List of Prefix instances
:return:
"""
# Filter IPv4 only
ipv4_prefixes = [p for p in prefixes if p.is_ipv4]
# Calculate nested prefixes
usage = defaultdict(int)
address_usage = defaultdict(int)
for parent, prefix in Prefix.objects.filter(parent__in=ipv4_prefixes).values_list(
"parent", "prefix"
):
ln = int(prefix.split("/")[1])
usage[parent] += 2 ** (32 - ln)
# Calculate nested addresses
has_address = set()
for parent, count in (
Address.objects.filter(prefix__in=ipv4_prefixes)
.values("prefix")
.annotate(count=models.Count("prefix"))
.values_list("prefix", "count")
):
usage[parent] += count
has_address.add(parent)
address_usage[parent] += count
# Update usage cache
for p in ipv4_prefixes:
ln = int(p.prefix.split("/")[1])
size = 2 ** (32 - ln)
if p.id in has_address and size > 2: # Not /31 or /32
if p.effective_prefix_special_address == "X":
size -= 2 # Exclude broadcast and network
p._address_usage_cache = float(address_usage[p.id]) * 100.0 / float(size)
p._usage_cache = float(usage[p.id]) * 100.0 / float(size)
@property
def address_usage(self):
if self.is_ipv4:
usage = getattr(self, "_address_usage_cache", None)
if usage is not None:
# Use update_prefixes_usage results
return usage
size = IPv4(self.prefix).size
if not size:
return 100.0
n_ips = (
Address.objects.filter(vrf=self.vrf, afi=self.afi)
.extra(where=["address <<= %s"], params=[str(self.prefix)])
.count()
)
if self.effective_prefix_special_address == "X":
n_pfx = (
Prefix.objects.filter(vrf=self.vrf, afi=self.afi)
.extra(where=["prefix <<= %s"], params=[str(self.prefix)])
.count()
)
size -= len(IPv4(self.prefix).special_addresses) * n_pfx
return float(n_ips) * 100.0 / float(size) if n_ips else 0.0
else:
return None
@property
def address_usage_percent(self):
u = self.address_usage
if u is None:
return ""
return "%.2f%%" % u
def is_empty(self):
"""
Check prefix is empty and does not contain nested prefixes
and addresses
:return:
"""
if Prefix.objects.filter(parent=self).count() > 0:
return False
if Address.objects.filter(prefix=self).count() > 0:
return False
return True
def disable_delete_protection(self):
"""
Disable root delete protection
:return:
"""
self._disable_delete_protection = True
def get_effective_as(self):
"""
Return effective AS (first found upwards)
:return: AS instance or None
"""
if self.asn:
return self.asn
if not self.parent:
return None
return self.parent.get_effective_as()
# Avoid circular references
from .address import Address
from .prefixaccess import PrefixAccess
from .addressrange import AddressRange
| en | 0.55731 | # -*- coding: utf-8 -*- # --------------------------------------------------------------------- # Prefix model # --------------------------------------------------------------------- # Copyright (C) 2007-2019 The NOC Project # See LICENSE for details # --------------------------------------------------------------------- # Python modules # Third-party modules # NOC modules Allocated prefix #")) Check prefix has ipv4/ipv6 transition :return: # pylint: disable=pointless-statement # noqa Get nearest closing prefix Returns true if the prefix is a root of VRF Field validation # Set defaults # Check prefix is of AFI type # Set defaults # Set proper parent # Check root prefix have no parent Save prefix # Rebuild tree if necessary # Reconnect children children prefixes UPDATE %s SET parent_id=%%s WHERE vrf_id=%%s AND afi=%%s AND prefix << %%s AND parent_id=%%s # Reconnect children addresses UPDATE %s SET prefix_id=%%s WHERE prefix_id=%%s AND address << %%s Delete prefix # Reconnect children prefixes # Reconnect children addresses # Unlink dual-stack allocations # self.clear_transition() # Remove bookmarks # Finally delete Delete prefix and all descendancies # Unlink dual-stack allocations # self.clear_transition() # Recursive delete # Get nested prefixes # # Delete nested addresses # Delete nested prefixes # Delete permissions List of persons having write access @todo: PostgreSQL-independent implementation SELECT id,username,first_name,last_name FROM %s u WHERE is_active=TRUE AND (is_superuser=TRUE OR EXISTS(SELECT id FROM %s a WHERE user_id=u.id AND vrf_id=%%s AND afi=%%s AND prefix>>=%%s AND can_change=TRUE )) ORDER BY username Returns first line of description :return: returns Netmask for IPv4 :return: Returns Broadcast for IPv4 :return: Returns Cisco wildcard for IPv4 :return: Returns IPv4 prefix size :return: Returns True if user has view access :param user: :return: Returns True if user has change access :param user: :return: Check the user has bookmark on prefix :param user: :return: # noqa Toggle user bookmark. Returns new bookmark state :param user: :return: # noqa Full-text search All prefix-related address ranges :return: SELECT * FROM ip_addressrange WHERE vrf_id=%s AND afi=%s AND is_active=TRUE AND ( from_address << %s OR to_address << %s OR %s BETWEEN from_address AND to_address ) ORDER BY from_address, to_address Rebase prefix to a new location :param vrf: :param new_prefix: :return: # # Validation # Rebase prefix and all nested prefixes # Parents are left untouched # Prefix.objects.filter(pk=p.pk).update(prefix=np, vrf=vrf) # Raise events # Rebase addresses # Parents are left untouched # Address.objects.filter(pk=a.pk).update(address=na, vrf=vrf) # Raise events # Rebase permissions # move all permissions to the nested blocks # create permissions for covered blocks # @todo: Rebase bookmarks # @todo: Update caches # Return rebased prefix # Updated object Queryset returning all nested prefixes inside the prefix Queryset returning all nested addresses inside the prefix Generator returning all available free prefixes inside :return: # Use update_prefixes_usage results # Exclude special addresses Bulk calculate and update prefixes usages :param prefixes: List of Prefix instances :return: # Filter IPv4 only # Calculate nested prefixes # Calculate nested addresses # Update usage cache # Not /31 or /32 # Exclude broadcast and network # Use update_prefixes_usage results Check prefix is empty and does not contain nested prefixes and addresses :return: Disable root delete protection :return: Return effective AS (first found upwards) :return: AS instance or None # Avoid circular references | 1.752978 | 2 |
i18n_l18n.py | britodfbr/curso-i18n-1602670068 | 0 | 6631643 | # /bin/env python
# -*- encode: utf-8 -*-
__author__ = '@britodfbr'
"""https://under-linux.org/entry.php?b=1273"""
import time
import locale
numero = 154623.56
valor = 1123.5
sdate = "%A, %d %B %Y"
# default
print(f'{numero}; {valor}; {time.strftime(sdate)}')
# locale local
locale.setlocale(locale.LC_ALL, '')
print(f'{locale.format_string("%.2f", numero, True)}; {locale.currency(valor)}; {time.strftime(sdate)}')
# locale en
locale.setlocale(locale.LC_ALL, 'en_US')
print(f'{locale.format_string("%.2f", numero, True)}; {locale.currency(valor)}; {time.strftime(sdate)}')
# locale de-DE
locale.setlocale(locale.LC_ALL, 'de_DE')
print(f'{locale.format_string("%.2f", numero, True)}; {locale.currency(valor)}; {time.strftime(sdate)}')
# locale es-Es
locale.setlocale(locale.LC_ALL, 'es_ES')
print(f'{locale.format_string("%.2f", numero, True)}; {locale.currency(valor)}; {time.strftime(sdate)}')
# locale fr_FR
locale.setlocale(locale.LC_ALL, 'fr_FR')
print(f'{locale.format_string("%.2f", numero, True)}; {locale.currency(valor)}; {time.strftime(sdate)}')
# locale ar_LB
locale.setlocale(locale.LC_ALL, 'ar_LB')
print(f'{locale.format_string("%.2f", numero, True)}; {locale.currency(valor)}; {time.strftime(sdate)}')
# locale hi-IN
locale.setlocale(locale.LC_ALL, 'hi_IN')
print(f'{locale.format_string("%.2f", numero, True)}; {locale.currency(valor)}; {time.strftime(sdate)}')
# locale ja-JP
locale.setlocale(locale.LC_ALL, 'ja_JP')
print(f'{locale.format_string("%.2f", numero, True)}; {locale.currency(valor)}; {time.strftime(sdate)}')
| # /bin/env python
# -*- encode: utf-8 -*-
__author__ = '@britodfbr'
"""https://under-linux.org/entry.php?b=1273"""
import time
import locale
numero = 154623.56
valor = 1123.5
sdate = "%A, %d %B %Y"
# default
print(f'{numero}; {valor}; {time.strftime(sdate)}')
# locale local
locale.setlocale(locale.LC_ALL, '')
print(f'{locale.format_string("%.2f", numero, True)}; {locale.currency(valor)}; {time.strftime(sdate)}')
# locale en
locale.setlocale(locale.LC_ALL, 'en_US')
print(f'{locale.format_string("%.2f", numero, True)}; {locale.currency(valor)}; {time.strftime(sdate)}')
# locale de-DE
locale.setlocale(locale.LC_ALL, 'de_DE')
print(f'{locale.format_string("%.2f", numero, True)}; {locale.currency(valor)}; {time.strftime(sdate)}')
# locale es-Es
locale.setlocale(locale.LC_ALL, 'es_ES')
print(f'{locale.format_string("%.2f", numero, True)}; {locale.currency(valor)}; {time.strftime(sdate)}')
# locale fr_FR
locale.setlocale(locale.LC_ALL, 'fr_FR')
print(f'{locale.format_string("%.2f", numero, True)}; {locale.currency(valor)}; {time.strftime(sdate)}')
# locale ar_LB
locale.setlocale(locale.LC_ALL, 'ar_LB')
print(f'{locale.format_string("%.2f", numero, True)}; {locale.currency(valor)}; {time.strftime(sdate)}')
# locale hi-IN
locale.setlocale(locale.LC_ALL, 'hi_IN')
print(f'{locale.format_string("%.2f", numero, True)}; {locale.currency(valor)}; {time.strftime(sdate)}')
# locale ja-JP
locale.setlocale(locale.LC_ALL, 'ja_JP')
print(f'{locale.format_string("%.2f", numero, True)}; {locale.currency(valor)}; {time.strftime(sdate)}')
| en | 0.540748 | # /bin/env python # -*- encode: utf-8 -*- https://under-linux.org/entry.php?b=1273 # default # locale local # locale en # locale de-DE # locale es-Es # locale fr_FR # locale ar_LB # locale hi-IN # locale ja-JP | 3.080256 | 3 |
src/game.py | adoth/Blackjack | 0 | 6631644 | <reponame>adoth/Blackjack<filename>src/game.py
from Blackjack.src.deck import Deck
from Blackjack.src.player import Player
from Blackjack.src.dealer import Dealer
class Game:
def __init__(self):
self.player = Player('player')
self.dealer = Dealer('dealer')
self.deck = Deck()
self.player_left = self.player_right = None
def play(self):
self.first_turn(self.player)
self.first_turn(self.dealer)
self.player_turn(self.player)
self.dealer_turn()
self.result()
def first_turn(self, player):
player.hand.set_hand(self.deck.draw_card())
if type(player) == Dealer:
player.hand.set_hand(self.deck.draw_card(), display=False)
else:
player.hand.set_hand(self.deck.draw_card())
def split_func(self, player):
self.player_left = Player('player left')
self.player_right = Player('player right')
print('\n---left')
self.player_left.hand.set_hand(player.hand.hand[0])
self.player_left.hand.set_hand(self.deck.draw_card())
self.player_left.hand.done_split = True
self.player_turn(self.player_left)
print('\n--right')
self.player_right.hand.set_hand(player.hand.hand[1])
self.player_right.hand.set_hand(self.deck.draw_card())
self.player_right.hand.done_split = True
self.player_turn(self.player_right)
def player_turn(self, player):
while True:
player.display_score()
if player.hand.is_busted():
break
print()
player_intention = player.input_player_intention()
if player_intention == 'hit':
player.hand.set_hand(self.deck.draw_card())
elif player_intention in ['double down', 'doubledown', 'double']:
player.hand.set_hand(self.deck.draw_card())
player.display_score()
break
elif player_intention == 'split':
player.done_split = True
self.split_func(player)
break
elif player_intention == 'stand':
player.display_score()
break
def dealer_turn(self):
self.dealer.display_hole_card()
while self.dealer.is_continue():
self.dealer.display_score()
self.dealer.hand.set_hand(self.deck.draw_card())
self.dealer.display_score()
def display_result(self, player1):
player1.display_score()
self.dealer.display_score()
print('-' * 100)
if player1.hand.is_natural_blackjack() and self.dealer.hand.is_natural_blackjack():
print('引き分け')
elif player1.hand.is_natural_blackjack():
print('{} is natural black jack'.format(player1.name))
print('あなたの勝ちです')
elif self.dealer.hand.is_natural_blackjack():
print('{} is natural black jack'.format(self.dealer.name))
print('あなたの負けです')
elif player1.hand.is_busted():
print('あなたの負けです')
elif self.dealer.hand.is_busted():
print('あなたの勝ちです')
elif player1.hand.calculate_total_score() > self.dealer.hand.calculate_total_score():
print('あなたの勝ちです')
elif self.dealer.hand.calculate_total_score() > player1.hand.calculate_total_score():
print('あなたの負けです')
elif player1.hand.calculate_total_score() == self.dealer.hand.calculate_total_score():
print('引き分け')
def result(self):
if self.player.done_split:
self.display_result(self.player_left)
print('-' * 100)
print()
self.display_result(self.player_right)
else:
self.display_result(self.player)
if __name__ == '__main__':
Game().play()
| from Blackjack.src.deck import Deck
from Blackjack.src.player import Player
from Blackjack.src.dealer import Dealer
class Game:
def __init__(self):
self.player = Player('player')
self.dealer = Dealer('dealer')
self.deck = Deck()
self.player_left = self.player_right = None
def play(self):
self.first_turn(self.player)
self.first_turn(self.dealer)
self.player_turn(self.player)
self.dealer_turn()
self.result()
def first_turn(self, player):
player.hand.set_hand(self.deck.draw_card())
if type(player) == Dealer:
player.hand.set_hand(self.deck.draw_card(), display=False)
else:
player.hand.set_hand(self.deck.draw_card())
def split_func(self, player):
self.player_left = Player('player left')
self.player_right = Player('player right')
print('\n---left')
self.player_left.hand.set_hand(player.hand.hand[0])
self.player_left.hand.set_hand(self.deck.draw_card())
self.player_left.hand.done_split = True
self.player_turn(self.player_left)
print('\n--right')
self.player_right.hand.set_hand(player.hand.hand[1])
self.player_right.hand.set_hand(self.deck.draw_card())
self.player_right.hand.done_split = True
self.player_turn(self.player_right)
def player_turn(self, player):
while True:
player.display_score()
if player.hand.is_busted():
break
print()
player_intention = player.input_player_intention()
if player_intention == 'hit':
player.hand.set_hand(self.deck.draw_card())
elif player_intention in ['double down', 'doubledown', 'double']:
player.hand.set_hand(self.deck.draw_card())
player.display_score()
break
elif player_intention == 'split':
player.done_split = True
self.split_func(player)
break
elif player_intention == 'stand':
player.display_score()
break
def dealer_turn(self):
self.dealer.display_hole_card()
while self.dealer.is_continue():
self.dealer.display_score()
self.dealer.hand.set_hand(self.deck.draw_card())
self.dealer.display_score()
def display_result(self, player1):
player1.display_score()
self.dealer.display_score()
print('-' * 100)
if player1.hand.is_natural_blackjack() and self.dealer.hand.is_natural_blackjack():
print('引き分け')
elif player1.hand.is_natural_blackjack():
print('{} is natural black jack'.format(player1.name))
print('あなたの勝ちです')
elif self.dealer.hand.is_natural_blackjack():
print('{} is natural black jack'.format(self.dealer.name))
print('あなたの負けです')
elif player1.hand.is_busted():
print('あなたの負けです')
elif self.dealer.hand.is_busted():
print('あなたの勝ちです')
elif player1.hand.calculate_total_score() > self.dealer.hand.calculate_total_score():
print('あなたの勝ちです')
elif self.dealer.hand.calculate_total_score() > player1.hand.calculate_total_score():
print('あなたの負けです')
elif player1.hand.calculate_total_score() == self.dealer.hand.calculate_total_score():
print('引き分け')
def result(self):
if self.player.done_split:
self.display_result(self.player_left)
print('-' * 100)
print()
self.display_result(self.player_right)
else:
self.display_result(self.player)
if __name__ == '__main__':
Game().play() | none | 1 | 3.089242 | 3 |
|
elegantrl_helloworld/env.py | SaadSaeed150/local_elegant | 752 | 6631645 | import gym
gym.logger.set_level(40) # Block warning
class PreprocessEnv(gym.Wrapper): # environment wrapper
def __init__(self, env, if_print=True):
self.env = gym.make(env) if isinstance(env, str) else env
super().__init__(self.env)
(self.env_name, self.state_dim, self.action_dim, self.action_max, self.max_step,
self.if_discrete, self.target_return) = get_gym_env_info(self.env, if_print)
def reset(self) -> np.ndarray:
state = self.env.reset()
return state.astype(np.float32)
def step(self, action: np.ndarray) -> (np.ndarray, float, bool, dict):
state, reward, done, info_dict = self.env.step(action * self.action_max)
return state.astype(np.float32), reward, done, info_dict
def get_gym_env_info(env, if_print) -> (str, int, int, int, int, bool, float):
assert isinstance(env, gym.Env)
env_name = getattr(env, 'env_name', None)
env_name = env.unwrapped.spec.id if env_name is None else env_name
if isinstance(env.observation_space, gym.spaces.discrete.Discrete):
raise RuntimeError("| <class 'gym.spaces.discrete.Discrete'> does not support environment with discrete observation (state) space.")
state_shape = env.observation_space.shape
state_dim = state_shape[0] if len(state_shape) == 1 else state_shape # sometimes state_dim is a list
target_return = getattr(env.spec, 'reward_threshold', 2 ** 16)
max_step = getattr(env, 'max_step', None)
max_step_default = getattr(env, '_max_episode_steps', None)
if max_step is None:
max_step = max_step_default
if max_step is None:
max_step = 2 ** 10
if_discrete = isinstance(env.action_space, gym.spaces.Discrete)
if if_discrete: # for discrete action space
action_dim = env.action_space.n
action_max = int(1)
elif isinstance(env.action_space, gym.spaces.Box): # for continuous action space
action_dim = env.action_space.shape[0]
action_max = float(env.action_space.high[0])
assert not any(env.action_space.high + env.action_space.low)
else:
raise RuntimeError('| Please set these value manually: if_discrete=bool, action_dim=int, action_max=1.0')
print(f"\n| env_name: {env_name}, action if_discrete: {if_discrete}"
f"\n| state_dim: {state_dim}, action_dim: {action_dim}, action_max: {action_max}"
f"\n| max_step: {max_step:4}, target_return: {target_return}") if if_print else None
return env_name, state_dim, action_dim, action_max, max_step, if_discrete, target_return
| import gym
gym.logger.set_level(40) # Block warning
class PreprocessEnv(gym.Wrapper): # environment wrapper
def __init__(self, env, if_print=True):
self.env = gym.make(env) if isinstance(env, str) else env
super().__init__(self.env)
(self.env_name, self.state_dim, self.action_dim, self.action_max, self.max_step,
self.if_discrete, self.target_return) = get_gym_env_info(self.env, if_print)
def reset(self) -> np.ndarray:
state = self.env.reset()
return state.astype(np.float32)
def step(self, action: np.ndarray) -> (np.ndarray, float, bool, dict):
state, reward, done, info_dict = self.env.step(action * self.action_max)
return state.astype(np.float32), reward, done, info_dict
def get_gym_env_info(env, if_print) -> (str, int, int, int, int, bool, float):
assert isinstance(env, gym.Env)
env_name = getattr(env, 'env_name', None)
env_name = env.unwrapped.spec.id if env_name is None else env_name
if isinstance(env.observation_space, gym.spaces.discrete.Discrete):
raise RuntimeError("| <class 'gym.spaces.discrete.Discrete'> does not support environment with discrete observation (state) space.")
state_shape = env.observation_space.shape
state_dim = state_shape[0] if len(state_shape) == 1 else state_shape # sometimes state_dim is a list
target_return = getattr(env.spec, 'reward_threshold', 2 ** 16)
max_step = getattr(env, 'max_step', None)
max_step_default = getattr(env, '_max_episode_steps', None)
if max_step is None:
max_step = max_step_default
if max_step is None:
max_step = 2 ** 10
if_discrete = isinstance(env.action_space, gym.spaces.Discrete)
if if_discrete: # for discrete action space
action_dim = env.action_space.n
action_max = int(1)
elif isinstance(env.action_space, gym.spaces.Box): # for continuous action space
action_dim = env.action_space.shape[0]
action_max = float(env.action_space.high[0])
assert not any(env.action_space.high + env.action_space.low)
else:
raise RuntimeError('| Please set these value manually: if_discrete=bool, action_dim=int, action_max=1.0')
print(f"\n| env_name: {env_name}, action if_discrete: {if_discrete}"
f"\n| state_dim: {state_dim}, action_dim: {action_dim}, action_max: {action_max}"
f"\n| max_step: {max_step:4}, target_return: {target_return}") if if_print else None
return env_name, state_dim, action_dim, action_max, max_step, if_discrete, target_return
| en | 0.77642 | # Block warning # environment wrapper # sometimes state_dim is a list # for discrete action space # for continuous action space | 2.625421 | 3 |
figures/tmb/pcawg/VICC_01_R2/analysis.py | OmnesRes/ATGC | 0 | 6631646 | import numpy as np
import pickle
import pandas as pd
import tensorflow as tf
from tensorflow.python.framework.ops import disable_eager_execution
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import r2_score
disable_eager_execution()
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[-1], True)
tf.config.experimental.set_visible_devices(physical_devices[-1], 'GPU')
import pathlib
path = pathlib.Path.cwd()
if path.stem == 'ATGC':
cwd = path
else:
cwd = list(path.parents)[::-1][path.parts.index('ATGC')]
import sys
sys.path.append(str(cwd))
from model.CustomKerasModels import InputFeatures, ATGC
from model.CustomKerasTools import BatchGenerator, Losses
D, samples, maf, sample_df = pickle.load(open(cwd / 'figures' / 'tmb' / 'pcawg' / 'VICC_01_R2' / 'data' / 'data.pkl', 'rb'))
panels = pickle.load(open(cwd / 'files' / 'tcga_panel_table.pkl', 'rb'))
strand_emb_mat = np.concatenate([np.zeros(2)[np.newaxis, :], np.diag(np.ones(2))], axis=0)
D['strand_emb'] = strand_emb_mat[D['strand'].astype(int)]
chr_emb_mat = np.concatenate([np.zeros(24)[np.newaxis, :], np.diag(np.ones(24))], axis=0)
D['chr_emb'] = chr_emb_mat[D['chr']]
frame_emb_mat = np.concatenate([np.zeros(3)[np.newaxis, :], np.diag(np.ones(3))], axis=0)
D['cds_emb'] = frame_emb_mat[D['cds'].astype(int)]
hist_emb_mat = np.concatenate([np.zeros(samples['histology'].shape[1])[np.newaxis, :], np.diag(np.ones(samples['histology'].shape[1]))], axis=0)
samples['hist_emb'] = hist_emb_mat[np.argmax(samples['histology'], axis=-1)]
##bin position
def pos_one_hot(pos):
one_pos = int(pos * 100)
return one_pos, (pos * 100) - one_pos
result = np.apply_along_axis(pos_one_hot, -1, D['pos_float'][:, np.newaxis])
D['pos_bin'] = np.stack(result[:, 0]) + 1
D['pos_loc'] = np.stack(result[:, 1])
sample_features = ()
# set y label
y_label = np.log(sample_df['non_syn_counts'].values/(panels.loc[panels['Panel'] == 'Agilent_kit']['cds'].values[0]/1e6) + 1)[:, np.newaxis]
y_strat = np.argmax(samples['histology'], axis=-1)
y_label = np.repeat(y_label, 3, axis=-1)
metrics = [Losses.Weighted.QuantileLoss.quantile_loss]
all_features = [[InputFeatures.OnesLike({'position': D['pos_float'][:, np.newaxis]})],
[InputFeatures.VariantPositionBin(
24, 100, {'position_loc': D['pos_loc'], 'position_bin': D['pos_bin'], 'chromosome': D['chr']})],
[InputFeatures.VariantSequence(6, 4, 2, [16, 16, 8, 8],
{'5p': D['seq_5p'], '3p': D['seq_3p'], 'ref': D['seq_ref'], 'alt': D['seq_alt'], 'strand': D['strand_emb'], 'cds': D['cds_emb']},
fusion_dimension=32,
use_frame=False)]
]
all_weights = [pickle.load(open(cwd / 'figures' / 'tmb' / 'pcawg' / 'VICC_01_R2' / 'results' / 'run_naive.pkl', 'rb')),
pickle.load(open(cwd / 'figures' / 'tmb' / 'pcawg' / 'VICC_01_R2' / 'results' / 'run_position.pkl', 'rb')),
pickle.load(open(cwd / 'figures' / 'tmb' / 'pcawg' / 'VICC_01_R2' / 'results' / 'run_sequence.pkl', 'rb'))
]
results = {}
for features, weights, name in zip(all_features, all_weights, ['naive', 'position', 'sequence']):
atgc = ATGC(features, aggregation_dimension=64, fusion_dimension=32, sample_features=sample_features)
atgc.build_instance_encoder_model(return_latent=False)
atgc.build_sample_encoder_model()
atgc.build_mil_model(output_dim=8, output_extra=1, output_type='quantiles', aggregation='recursion', mil_hidden=(16,))
atgc.mil_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss=Losses.Weighted.QuantileLoss.quantile_loss_weighted, metrics=metrics)
##test eval
test_idx = []
predictions = []
genes = []
evaluations = []
for index, (idx_train, idx_test) in enumerate(StratifiedKFold(n_splits=8, random_state=0, shuffle=True).split(y_strat, y_strat)):
atgc.mil_model.set_weights(weights[index])
data_test = next(BatchGenerator(x_instance_sample_idx=D['sample_idx'], x_instance_features=features, x_sample=sample_features,
y_label=y_label, y_stratification=y_strat, sampling_approach=None, idx_sample=idx_test).data_generator())
evaluations.append(atgc.mil_model.evaluate(data_test[0], data_test[1])[1])
predictions.append(atgc.mil_model.predict(data_test[0])[0, :, :-1])
test_idx.append(idx_test)
#mse
print(round(np.mean((y_label[:, 0][np.concatenate(test_idx)] - np.concatenate(predictions)[:, 1])**2), 4))
#mae
print(round(np.mean(np.absolute(y_label[:, 0][np.concatenate(test_idx)] - np.concatenate(predictions)[:, 1])), 4))
#r2
print(round(r2_score(np.concatenate(predictions)[:, 1], y_label[:, 0][np.concatenate(test_idx)]), 4))
results[name] = np.concatenate(predictions)
results['y_true'] = y_label[np.concatenate(test_idx)]
##counting has to be nonsyn to nonsyn
non_syn = ['Missense_Mutation', 'Nonsense_Mutation', 'Frame_Shift_Del', 'Frame_Shift_Ins', 'In_Frame_Del', 'In_Frame_Ins', 'Nonstop_Mutation']
panel_counts = maf[['Variant_Classification', 'Tumor_Sample_Barcode']].groupby('Tumor_Sample_Barcode').apply(lambda x: pd.Series([len(x), (x['Variant_Classification'].isin(non_syn)).sum()], index=['panel_all_counts', 'panel_non_syn_counts']))
sample_df = pd.merge(sample_df, panel_counts, how='left', on='Tumor_Sample_Barcode')
sample_df.fillna({'panel_non_syn_counts': 0}, inplace=True)
results['counting'] = np.log(sample_df['panel_non_syn_counts'].values[np.concatenate(test_idx)] / (panels.loc[panels['Panel'] == 'VICC-01-R2']['cds'].values[0]/1e6) + 1)
with open(cwd / 'figures' / 'tmb' / 'pcawg' / 'VICC_01_R2' / 'results' / 'predictions.pkl', 'wb') as f:
pickle.dump(results, f)
##counting stats
#mse
print(round(np.mean((y_label[:, 0][np.concatenate(test_idx)] - results['counting'])**2), 4))
#mae
print(round(np.mean(np.absolute((y_label[:, 0][np.concatenate(test_idx)] - results['counting']))), 4))
#r2
print(round(r2_score(results['counting'], y_label[np.concatenate(test_idx)][:, 0]), 4))
| import numpy as np
import pickle
import pandas as pd
import tensorflow as tf
from tensorflow.python.framework.ops import disable_eager_execution
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import r2_score
disable_eager_execution()
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[-1], True)
tf.config.experimental.set_visible_devices(physical_devices[-1], 'GPU')
import pathlib
path = pathlib.Path.cwd()
if path.stem == 'ATGC':
cwd = path
else:
cwd = list(path.parents)[::-1][path.parts.index('ATGC')]
import sys
sys.path.append(str(cwd))
from model.CustomKerasModels import InputFeatures, ATGC
from model.CustomKerasTools import BatchGenerator, Losses
D, samples, maf, sample_df = pickle.load(open(cwd / 'figures' / 'tmb' / 'pcawg' / 'VICC_01_R2' / 'data' / 'data.pkl', 'rb'))
panels = pickle.load(open(cwd / 'files' / 'tcga_panel_table.pkl', 'rb'))
strand_emb_mat = np.concatenate([np.zeros(2)[np.newaxis, :], np.diag(np.ones(2))], axis=0)
D['strand_emb'] = strand_emb_mat[D['strand'].astype(int)]
chr_emb_mat = np.concatenate([np.zeros(24)[np.newaxis, :], np.diag(np.ones(24))], axis=0)
D['chr_emb'] = chr_emb_mat[D['chr']]
frame_emb_mat = np.concatenate([np.zeros(3)[np.newaxis, :], np.diag(np.ones(3))], axis=0)
D['cds_emb'] = frame_emb_mat[D['cds'].astype(int)]
hist_emb_mat = np.concatenate([np.zeros(samples['histology'].shape[1])[np.newaxis, :], np.diag(np.ones(samples['histology'].shape[1]))], axis=0)
samples['hist_emb'] = hist_emb_mat[np.argmax(samples['histology'], axis=-1)]
##bin position
def pos_one_hot(pos):
one_pos = int(pos * 100)
return one_pos, (pos * 100) - one_pos
result = np.apply_along_axis(pos_one_hot, -1, D['pos_float'][:, np.newaxis])
D['pos_bin'] = np.stack(result[:, 0]) + 1
D['pos_loc'] = np.stack(result[:, 1])
sample_features = ()
# set y label
y_label = np.log(sample_df['non_syn_counts'].values/(panels.loc[panels['Panel'] == 'Agilent_kit']['cds'].values[0]/1e6) + 1)[:, np.newaxis]
y_strat = np.argmax(samples['histology'], axis=-1)
y_label = np.repeat(y_label, 3, axis=-1)
metrics = [Losses.Weighted.QuantileLoss.quantile_loss]
all_features = [[InputFeatures.OnesLike({'position': D['pos_float'][:, np.newaxis]})],
[InputFeatures.VariantPositionBin(
24, 100, {'position_loc': D['pos_loc'], 'position_bin': D['pos_bin'], 'chromosome': D['chr']})],
[InputFeatures.VariantSequence(6, 4, 2, [16, 16, 8, 8],
{'5p': D['seq_5p'], '3p': D['seq_3p'], 'ref': D['seq_ref'], 'alt': D['seq_alt'], 'strand': D['strand_emb'], 'cds': D['cds_emb']},
fusion_dimension=32,
use_frame=False)]
]
all_weights = [pickle.load(open(cwd / 'figures' / 'tmb' / 'pcawg' / 'VICC_01_R2' / 'results' / 'run_naive.pkl', 'rb')),
pickle.load(open(cwd / 'figures' / 'tmb' / 'pcawg' / 'VICC_01_R2' / 'results' / 'run_position.pkl', 'rb')),
pickle.load(open(cwd / 'figures' / 'tmb' / 'pcawg' / 'VICC_01_R2' / 'results' / 'run_sequence.pkl', 'rb'))
]
results = {}
for features, weights, name in zip(all_features, all_weights, ['naive', 'position', 'sequence']):
atgc = ATGC(features, aggregation_dimension=64, fusion_dimension=32, sample_features=sample_features)
atgc.build_instance_encoder_model(return_latent=False)
atgc.build_sample_encoder_model()
atgc.build_mil_model(output_dim=8, output_extra=1, output_type='quantiles', aggregation='recursion', mil_hidden=(16,))
atgc.mil_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss=Losses.Weighted.QuantileLoss.quantile_loss_weighted, metrics=metrics)
##test eval
test_idx = []
predictions = []
genes = []
evaluations = []
for index, (idx_train, idx_test) in enumerate(StratifiedKFold(n_splits=8, random_state=0, shuffle=True).split(y_strat, y_strat)):
atgc.mil_model.set_weights(weights[index])
data_test = next(BatchGenerator(x_instance_sample_idx=D['sample_idx'], x_instance_features=features, x_sample=sample_features,
y_label=y_label, y_stratification=y_strat, sampling_approach=None, idx_sample=idx_test).data_generator())
evaluations.append(atgc.mil_model.evaluate(data_test[0], data_test[1])[1])
predictions.append(atgc.mil_model.predict(data_test[0])[0, :, :-1])
test_idx.append(idx_test)
#mse
print(round(np.mean((y_label[:, 0][np.concatenate(test_idx)] - np.concatenate(predictions)[:, 1])**2), 4))
#mae
print(round(np.mean(np.absolute(y_label[:, 0][np.concatenate(test_idx)] - np.concatenate(predictions)[:, 1])), 4))
#r2
print(round(r2_score(np.concatenate(predictions)[:, 1], y_label[:, 0][np.concatenate(test_idx)]), 4))
results[name] = np.concatenate(predictions)
results['y_true'] = y_label[np.concatenate(test_idx)]
##counting has to be nonsyn to nonsyn
non_syn = ['Missense_Mutation', 'Nonsense_Mutation', 'Frame_Shift_Del', 'Frame_Shift_Ins', 'In_Frame_Del', 'In_Frame_Ins', 'Nonstop_Mutation']
panel_counts = maf[['Variant_Classification', 'Tumor_Sample_Barcode']].groupby('Tumor_Sample_Barcode').apply(lambda x: pd.Series([len(x), (x['Variant_Classification'].isin(non_syn)).sum()], index=['panel_all_counts', 'panel_non_syn_counts']))
sample_df = pd.merge(sample_df, panel_counts, how='left', on='Tumor_Sample_Barcode')
sample_df.fillna({'panel_non_syn_counts': 0}, inplace=True)
results['counting'] = np.log(sample_df['panel_non_syn_counts'].values[np.concatenate(test_idx)] / (panels.loc[panels['Panel'] == 'VICC-01-R2']['cds'].values[0]/1e6) + 1)
with open(cwd / 'figures' / 'tmb' / 'pcawg' / 'VICC_01_R2' / 'results' / 'predictions.pkl', 'wb') as f:
pickle.dump(results, f)
##counting stats
#mse
print(round(np.mean((y_label[:, 0][np.concatenate(test_idx)] - results['counting'])**2), 4))
#mae
print(round(np.mean(np.absolute((y_label[:, 0][np.concatenate(test_idx)] - results['counting']))), 4))
#r2
print(round(r2_score(results['counting'], y_label[np.concatenate(test_idx)][:, 0]), 4))
| en | 0.455479 | ##bin position # set y label ##test eval #mse #mae #r2 ##counting has to be nonsyn to nonsyn ##counting stats #mse #mae #r2 | 1.740971 | 2 |
applications/incompressible_fluid_application/python_scripts/monolithic_solver_lagrangian_compressible_two_fluids_splited.py | lcirrott/Kratos | 2 | 6631647 | from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# importing the Kratos Library
from KratosMultiphysics import *
from KratosMultiphysics.IncompressibleFluidApplication import *
from KratosMultiphysics.PFEMApplication import *
from KratosMultiphysics.MeshingApplication import *
from KratosMultiphysics.ExternalSolversApplication import *
def AddVariables(model_part):
model_part.AddNodalSolutionStepVariable(VELOCITY)
model_part.AddNodalSolutionStepVariable(ACCELERATION)
model_part.AddNodalSolutionStepVariable(MESH_VELOCITY)
model_part.AddNodalSolutionStepVariable(PRESSURE)
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE)
model_part.AddNodalSolutionStepVariable(WATER_PRESSURE)
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE_DT)
model_part.AddNodalSolutionStepVariable(WATER_PRESSURE_DT)
model_part.AddNodalSolutionStepVariable(IS_FLUID)
model_part.AddNodalSolutionStepVariable(IS_WATER)
model_part.AddNodalSolutionStepVariable(IS_VISITED)
model_part.AddNodalSolutionStepVariable(IS_POROUS)
model_part.AddNodalSolutionStepVariable(IS_STRUCTURE)
model_part.AddNodalSolutionStepVariable(IS_FREE_SURFACE)
model_part.AddNodalSolutionStepVariable(IS_INTERFACE)
model_part.AddNodalSolutionStepVariable(IS_BOUNDARY)
model_part.AddNodalSolutionStepVariable(ERASE_FLAG)
model_part.AddNodalSolutionStepVariable(DISPLACEMENT)
model_part.AddNodalSolutionStepVariable(VISCOSITY)
model_part.AddNodalSolutionStepVariable(VISCOSITY_AIR)
model_part.AddNodalSolutionStepVariable(VISCOSITY_WATER)
model_part.AddNodalSolutionStepVariable(DENSITY)
model_part.AddNodalSolutionStepVariable(DENSITY_AIR)
model_part.AddNodalSolutionStepVariable(DENSITY_WATER)
model_part.AddNodalSolutionStepVariable(AIR_SOUND_VELOCITY)
model_part.AddNodalSolutionStepVariable(WATER_SOUND_VELOCITY)
model_part.AddNodalSolutionStepVariable(SOUND_VELOCITY)
model_part.AddNodalSolutionStepVariable(BODY_FORCE)
model_part.AddNodalSolutionStepVariable(NODAL_AREA)
model_part.AddNodalSolutionStepVariable(NODAL_H)
model_part.AddNodalSolutionStepVariable(ADVPROJ)
model_part.AddNodalSolutionStepVariable(DIVPROJ)
model_part.AddNodalSolutionStepVariable(THAWONE)
model_part.AddNodalSolutionStepVariable(THAWTWO)
model_part.AddNodalSolutionStepVariable(REACTION)
model_part.AddNodalSolutionStepVariable(REACTION_WATER_PRESSURE)
model_part.AddNodalSolutionStepVariable(EXTERNAL_PRESSURE)
model_part.AddNodalSolutionStepVariable(ARRHENIUS)
model_part.AddNodalSolutionStepVariable(DISTANCE)
model_part.AddNodalSolutionStepVariable(AUX_INDEX)
print("variables for monolithic solver lagrangian compressible solution added correctly")
def AddDofs(model_part):
for node in model_part.Nodes:
# adding dofs
node.AddDof(VELOCITY_X, REACTION_X)
node.AddDof(VELOCITY_Y, REACTION_Y)
node.AddDof(VELOCITY_Z, REACTION_Z)
node.AddDof(WATER_PRESSURE, REACTION_WATER_PRESSURE)
node.AddDof(AIR_PRESSURE, REACTION_AIR_PRESSURE)
print("dofs for the monolithic solver lagrangian compressible added correctly")
class MonolithicSolver:
#
def __init__(self, model_part, domain_size, box_corner1, box_corner2):
self.model_part = model_part
self.alpha = -0.1
self.move_mesh_strategy = 2
self.time_scheme = ResidualBasedPredictorCorrectorVelocityBossakSchemeCompressible(
self.alpha, self.move_mesh_strategy)
# definition of the solvers
# self.linear_solver = SkylineLUFactorizationSolver()
# self.linear_solver =SuperLUSolver()
pPrecond = DiagonalPreconditioner()
# pPrecond = ILU0Preconditioner()
self.linear_solver = BICGSTABSolver(1e-6, 5000, pPrecond)
# definition of the convergence criteria
# self.conv_criteria = UPCriteria(1e-7,1e-9,1e-7,1e-9)
self.conv_criteria = UPCriteria(1e-5, 1e-6, 1e-5, 1e-6)
self.max_iter = 2
self.SetDivided = ElemBasedBCUtilities(model_part)
self.ChooseElement = ChooseElementProcess(model_part, 2)
# default settings
self.echo_level = 1
self.CalculateReactionFlag = False
self.ReformDofSetAtEachStep = True
self.CalculateNormDxFlag = True
self.MoveMeshFlag = True
self.remeshing_flag = True
# MESH CHANGES
self.PfemUtils = PfemUtils()
self.MeshMover = MoveMeshProcess(self.model_part)
self.node_erase_process = NodeEraseProcess(model_part)
# self.Mesher = TriGenPFEMModeler()
# self.Mesher = MSuitePFEMModeler()
self.Mesher = TriGenPFEMSegment()
self.neigh_finder = FindNodalNeighboursProcess(model_part, 9, 18)
self.elem_neighbor_finder = FindElementalNeighboursProcess(
model_part, 2, 10)
self.alpha_shape = 10000.0
self.h_factor = 0.5
# assign IS_FLUID to all nodes
# for node in self.model_part.Nodes:
# node.SetSolutionStepValue(IS_FLUID,0,1.0)
# detecting free_surface to all nodes
for node in self.model_part.Nodes:
if (node.GetSolutionStepValue(IS_BOUNDARY) == 1 and node.GetSolutionStepValue(IS_STRUCTURE) != 1):
node.SetSolutionStepValue(IS_FREE_SURFACE, 0, 1.0)
# U NEED IT FOR ALPHA-shape
(self.neigh_finder).Execute()
self.Hfinder = FindNodalHProcess(model_part)
(self.Hfinder).Execute()
# runtime box
self.box_corner1 = box_corner1
self.box_corner2 = box_corner2
#
def Initialize(self, output_time_increment):
# creating the solution strategy
self.solver = NewtonRaphsonStrategy(
self.model_part,
self.time_scheme,
self.linear_solver,
self.conv_criteria,
self.max_iter,
self.CalculateReactionFlag,
self.ReformDofSetAtEachStep,
self.MoveMeshFlag)
(self.solver).SetEchoLevel(self.echo_level)
# time increment for output
self.output_time_increment = output_time_increment
self.next_output_time = self.output_time_increment
# self.CalculateDistanceAndDiviedSet(2);
# (self.neigh_finder).Execute();
# FIND NEIGHBOUR ELEMENTS AND COLORing
# (self.elem_neighbor_finder).ClearNeighbours()
# (self.elem_neighbor_finder).Execute()
# (self.PfemUtils).ColourAirWaterElement(self.model_part,2)
#
def Solve(self, time, gid_io):
# (self.neigh_finder).Execute();
# (self.solver).Solve()
# print"After solve before clear"
# (self.solver).Clear()
# print"After clear"
# (self.PfemUtils).MarkOuterNodes(self.box_corner1,self.box_corner2,(self.model_part).Nodes );
# (self.PfemUtils).MarkExcessivelyCloseNodes((self.model_part).Nodes, .05)
# (self.node_erase_process).Execute();
# self.Remesh()
# self.OutputStep(time,gid_io)
self.CalculateDistanceAndDiviedSet(2)
# self.AssignH()
# self.ImplosionDistToH()
# (FindElementalNeighboursProcess(self.model_part, 2, 10)).Execute()
(self.solver).Predict()
print("AFTER PREDICT")
self.Remesh()
print("AFTER REMESH")
self.DistToH()
(self.solver).Solve()
print("AFTER SOLVE")
(self.PfemUtils).MoveNodes(self.model_part)
print("AFTER Move")
(self.solver).Clear()
self.OutputStep(time, gid_io)
#
def EstimateDeltaTime(self, min_dt, max_dt):
print("Estimating delta time")
calc_dt = (
self.PfemUtils).EstimateDeltaTime(
min_dt,
max_dt,
self.model_part)
print("calculated dt")
return calc_dt
# def EstimateDeltaTime(self,min_dt,max_dt):
# print "Estimating delta time"
# return (self.UlfUtils).EstimateDeltaTime(max_dt,domain_size)
#
def SetEchoLevel(self, level):
(self.solver).SetEchoLevel(level)
#
# def Remesh(self):
#
# if (self.remeshing_flag==True):
# print "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
# (self.Mesher).ReGenerateMesh("ASGSCompressible2D", "Monolithic2DNeumann",self.model_part,self.node_erase_process,True, True, self.alpha_shape, self.h_factor)
# (self.Mesher).ReGenerateMesh("ASGSCompressible2D", "Monolithic2DNeumann",self.model_part,self.node_erase_process,True, False, self.alpha_shape, self.h_factor)
# print "AAAAAAAAAAFFFFFFFFFFFFFTTTTTTTTTTTTTERRRRRRRRRRRRRR"
# calculating fluid neighbours before applying boundary conditions
# (self.neigh_finder).Execute();
#
def Remesh(self):
if (self.remeshing_flag):
(self.PfemUtils).MoveLonelyNodes(self.model_part)
#(self.MeshMover).Execute();
print(self.box_corner1)
(self.PfemUtils).MarkOuterNodes(
self.box_corner1, self.box_corner2, (self.model_part).Nodes)
(self.PfemUtils).MarkNodesTouchingWall(self.model_part, 2, .05)
(self.PfemUtils).MarkExcessivelyCloseNodes(
(self.model_part).Nodes, 0.5)
(self.PfemUtils).MarkNodesTouchingInterface(self.model_part, 2, .1)
# FIND NEIGHBOUR ELEMENTS AND COLORing
(self.elem_neighbor_finder).ClearNeighbours()
(self.elem_neighbor_finder).Execute()
(self.PfemUtils).ColourAirWaterElement(self.model_part, 2)
#
# (self.PfemUtils).InterfaceDetecting(self.model_part,2, .9)
# (self.PfemUtils).ChangeWallWaterFlag(self.model_part,2)
# (self.PfemUtils).ChangeInterfaceWaterFlag(self.model_part,2)
# for node in (self.model_part).Nodes:
# if(node.GetSolutionStepValue(IS_INTERFACE) == 1.0):
# print node.GetValue(ERASE_FLAG)
#(self.node_erase_process).Execute(); to be able to compute neighbors earase process is done inside the mesher
(self.neigh_finder).ClearNeighbours()
(self.neigh_finder).Execute()
# ((self.model_part).Elements).clear();
# ((self.model_part).Conditions).clear();
(self.Mesher).ReGenerateMesh("ASGSCompressible2D", "Monolithic2DNeumann",
self.model_part, self.node_erase_process, True, True, self.alpha_shape, self.h_factor)
# (self.Mesher).ReGenerateMesh("ASGSCOMPPRDC2D", "Monolithic2DNeumann",self.model_part,self.node_erase_process,True, False, self.alpha_shape, self.h_factor)
(self.elem_neighbor_finder).ClearNeighbours()
(self.elem_neighbor_finder).Execute()
# (self.neigh_finder).Execute();
(self.PfemUtils).ColourAirWaterElement(self.model_part, 2)
(self.PfemUtils).InterfaceDetecting(self.model_part, 2, .9)
(self.ChooseElement).Execute()
# calculating fluid neighbours before applying boundary conditions
(self.neigh_finder).ClearNeighbours()
(self.neigh_finder).Execute()
(self.PfemUtils).ApplyBoundaryConditions(self.model_part, 2)
(self.PfemUtils).IdentifyFluidNodes(self.model_part)
# (self.PfemUtils).ApplyMinimalPressureConditions(self.model_part);
# (self.PfemUtils).InterfaceDetecting(self.model_part,2, .9)
# (self.PfemUtils).ChangeWallWaterFlag(self.model_part,2)
# (self.PfemUtils).ChangeInterfaceWaterFlag(self.model_part,2)
# (self.PfemUtils).ColourAirWaterElement(self.model_part,2)
# for node in self.model_part.Nodes:
# node.SetSolutionStepValue(IS_FREE_SURFACE,0,0.0)
#
# for node in self.model_part.Nodes:
# if (node.GetSolutionStepValue(IS_BOUNDARY)==1 and node.GetSolutionStepValue(IS_STRUCTURE)!=1):
# node.SetSolutionStepValue(IS_FREE_SURFACE,0,1.0)
#
def FindNeighbours(self):
(self.neigh_finder).Execute()
#
def OutputStep(self, time, gid_io):
if(time >= self.next_output_time):
self.next_output_time = self.next_output_time + \
self.output_time_increment
# writing mesh
gid_io.InitializeMesh(time)
gid_io.WriteNodeMesh((self.model_part).GetMesh())
gid_io.WriteMesh((self.model_part).GetMesh())
gid_io.FinalizeMesh()
gid_io.InitializeResults(time, (self.model_part).GetMesh())
gid_io.WriteNodalResults(
PRESSURE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
EXTERNAL_PRESSURE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_FREE_SURFACE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_BOUNDARY,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_STRUCTURE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_INTERFACE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
VELOCITY,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
MESH_VELOCITY,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
DENSITY,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
AIR_PRESSURE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
WATER_PRESSURE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
DENSITY_AIR,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
DENSITY_WATER,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
AIR_SOUND_VELOCITY,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
WATER_SOUND_VELOCITY,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_FLUID,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_WATER,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
NODAL_H,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
DISTANCE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
DISPLACEMENT,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_VISITED,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
AUX_INDEX,
(self.model_part).Nodes,
time,
0)
gid_io.PrintOnGaussPoints(IS_WATER_ELEMENT, self.model_part, time)
gid_io.Flush()
gid_io.FinalizeResults()
#
def CalculateDistanceAndDiviedSet(self, domain_size):
(self.neigh_finder).Execute()
distance_tools = ElemBasedDistanceUtilities(self.model_part)
distance_calculator = BodyDistanceCalculationUtils()
# assign IS_VISITED1 to elem with DISTANCE>=0 and change DSITANCE to posetive for external ones
# Assign Zero distance to interface nodes
for node in (self.model_part).Nodes:
if(node.GetSolutionStepValue(IS_INTERFACE) == 1.0):
node.SetSolutionStepValue(DISTANCE, 0, 0.0)
distance_tools.MarkExternalAndMixedNodes()
distance_tools.ChangeSignToDistance()
# calculate distances towards the interior of the domain
if(domain_size == 2):
distance_calculator.CalculateDistances2D(
(self.model_part).Elements,
DISTANCE,
True)
else:
distance_calculator.CalculateDistances3D(
(self.model_part).Elements,
DISTANCE,
True)
# change sign
distance_tools.ChangeSignToDistance()
# mark as visited all of the nodes inside the fluid domain
distance_tools.MarkInternalAndMixedNodes()
print(((self.model_part).Elements).Size())
# calculate distances towards the outside
if(domain_size == 2):
distance_calculator.CalculateDistances2D(
(self.model_part).Elements,
DISTANCE,
True)
else:
distance_calculator.CalculateDistances3D(
(self.model_part).Elements,
DISTANCE,
True)
# Decide IS_WATER flag due to DISTANCE
# for node in (self.model_part).Nodes:
# if(node.GetSolutionStepValue(DISTANCE)<= 0.0):
# node.SetSolutionStepValue(IS_WATER,0,0.0)
# else:
# node.SetSolutionStepValue(IS_WATER,0,1.0)
# if(node.GetSolutionStepValue(DISTANCE)== 0.0):
# print"This node has distance zero, is_interface is assigned"
# node.SetSolutionStepValue(IS_INTERFACE,0,1.0)
# node.SetSolutionStepValue(IS_VISITED,0,1.0)
# save as distance of the old time step
distance_tools.SaveScalarVariableToOldStep(DISTANCE)
print("finished RecalculateDistanceFunction")
# (self.SetDivided).SetDividedElem_2D()
print(">>>>>ELEMENTS ARE DIVIDED<<<<<<<<<<<<")
#
def DistToH(self):
possible_h = self.CalculateRadius()
print(possible_h)
min_H = possible_h * 3.14 / 200
# min_H = .0007#0.001
sec_min_H = 10 * min_H # .004
max_H = .02
ref_dist = 4 * min_H
sec_ref_dist = 20 * min_H
third_ref_dist = 200 * min_H
slope = (sec_min_H - min_H) / (sec_ref_dist - ref_dist)
second_slope = (max_H - sec_min_H) / (third_ref_dist - sec_ref_dist)
# search for min an max of H
# for node in (self.model_part).Nodes:
# node_H = node.GetSolutionStepValue(NODAL_H,0)
# if(node_H<self.min_H):
# self.min_H = node_H
# else:
# if(node_H > self.max_H):
# self.max_H = node_H
# H = H + dist * dist
# print ">>>>>DISt TO H ASSIGNMENT<<<<<<<<<<<<"
for node in (self.model_part).Nodes:
current_dist = node.GetSolutionStepValue(DISTANCE, 0)
if(abs(current_dist) <= ref_dist):
node_H = min_H # + slope*abs(current_dist)
node.SetSolutionStepValue(NODAL_H, 0, node_H)
if(ref_dist < abs(current_dist) and abs(current_dist) <= sec_ref_dist):
node_H = min_H + slope * (abs(current_dist) - ref_dist)
node.SetSolutionStepValue(NODAL_H, 0, node_H)
if(sec_ref_dist < abs(current_dist) and abs(current_dist) <= third_ref_dist):
node_H = sec_min_H + second_slope * \
(abs(current_dist) - sec_ref_dist)
node.SetSolutionStepValue(NODAL_H, 0, node_H)
if(abs(current_dist) > third_ref_dist):
node_H = max_H
node.SetSolutionStepValue(NODAL_H, 0, node_H)
# assign new value
# node.SetSolutionStepValue(NODAL_H,0,node_H)
# NearboundaryH
(self.PfemUtils).AssignNearBoundaryH(self.model_part, 5.0)
#
def CalculateRadius(self):
max_radi = 0.0
for node in (self.model_part).Nodes:
if node.GetSolutionStepValue(IS_INTERFACE) == 1.0:
X_ref = node.X
Y_ref = node.Y
for node in (self.model_part).Nodes:
if node.GetSolutionStepValue(IS_INTERFACE) == 1.0:
radi = pow(node.X - X_ref, 2) + pow(node.Y - Y_ref, 2)
if(radi > max_radi):
max_radi = radi
max_radi = pow(max_radi, 0.5)
return max_radi
#
def AssignH(self):
for node in (self.model_part).Nodes:
if(node.GetSolutionStepValue(IS_INTERFACE) == 1.0):
node.SetSolutionStepValue(NODAL_H, 0, .03)
else:
node.SetSolutionStepValue(NODAL_H, 0, .1)
print(">>>>>HHHHHH ASSIGNMENT<<<<<<<<<<<<")
#
#
def ImplosionDistToH(self):
min_H = .0005
max_H = .05
ref_dist = .0025
tol = .001
slope = (max_H - min_H) / ref_dist
# search for min an max of H
# for node in (self.model_part).Nodes:
# node_H = node.GetSolutionStepValue(NODAL_H,0)
# if(node_H<self.min_H):
# self.min_H = node_H
# else:
# if(node_H > self.max_H):
# self.max_H = node_H
# H = H + dist * dist
print(">>>>>DISt TO H ASSIGNMENT<<<<<<<<<<<<")
for node in (self.model_part).Nodes:
current_dist = node.GetSolutionStepValue(DISTANCE, 0)
if(current_dist > tol):
if(abs(current_dist) <= ref_dist):
node_H = min_H + slope * abs(current_dist)
else:
node_H = max_H
if(current_dist < -tol):
node_H = min_H
# assign new value
node.SetSolutionStepValue(NODAL_H, 0, node_H)
print(">>>>>DISt TO H ASSIGNMENT<<<<<<<<<<<<")
#
| from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# importing the Kratos Library
from KratosMultiphysics import *
from KratosMultiphysics.IncompressibleFluidApplication import *
from KratosMultiphysics.PFEMApplication import *
from KratosMultiphysics.MeshingApplication import *
from KratosMultiphysics.ExternalSolversApplication import *
def AddVariables(model_part):
model_part.AddNodalSolutionStepVariable(VELOCITY)
model_part.AddNodalSolutionStepVariable(ACCELERATION)
model_part.AddNodalSolutionStepVariable(MESH_VELOCITY)
model_part.AddNodalSolutionStepVariable(PRESSURE)
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE)
model_part.AddNodalSolutionStepVariable(WATER_PRESSURE)
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE_DT)
model_part.AddNodalSolutionStepVariable(WATER_PRESSURE_DT)
model_part.AddNodalSolutionStepVariable(IS_FLUID)
model_part.AddNodalSolutionStepVariable(IS_WATER)
model_part.AddNodalSolutionStepVariable(IS_VISITED)
model_part.AddNodalSolutionStepVariable(IS_POROUS)
model_part.AddNodalSolutionStepVariable(IS_STRUCTURE)
model_part.AddNodalSolutionStepVariable(IS_FREE_SURFACE)
model_part.AddNodalSolutionStepVariable(IS_INTERFACE)
model_part.AddNodalSolutionStepVariable(IS_BOUNDARY)
model_part.AddNodalSolutionStepVariable(ERASE_FLAG)
model_part.AddNodalSolutionStepVariable(DISPLACEMENT)
model_part.AddNodalSolutionStepVariable(VISCOSITY)
model_part.AddNodalSolutionStepVariable(VISCOSITY_AIR)
model_part.AddNodalSolutionStepVariable(VISCOSITY_WATER)
model_part.AddNodalSolutionStepVariable(DENSITY)
model_part.AddNodalSolutionStepVariable(DENSITY_AIR)
model_part.AddNodalSolutionStepVariable(DENSITY_WATER)
model_part.AddNodalSolutionStepVariable(AIR_SOUND_VELOCITY)
model_part.AddNodalSolutionStepVariable(WATER_SOUND_VELOCITY)
model_part.AddNodalSolutionStepVariable(SOUND_VELOCITY)
model_part.AddNodalSolutionStepVariable(BODY_FORCE)
model_part.AddNodalSolutionStepVariable(NODAL_AREA)
model_part.AddNodalSolutionStepVariable(NODAL_H)
model_part.AddNodalSolutionStepVariable(ADVPROJ)
model_part.AddNodalSolutionStepVariable(DIVPROJ)
model_part.AddNodalSolutionStepVariable(THAWONE)
model_part.AddNodalSolutionStepVariable(THAWTWO)
model_part.AddNodalSolutionStepVariable(REACTION)
model_part.AddNodalSolutionStepVariable(REACTION_WATER_PRESSURE)
model_part.AddNodalSolutionStepVariable(EXTERNAL_PRESSURE)
model_part.AddNodalSolutionStepVariable(ARRHENIUS)
model_part.AddNodalSolutionStepVariable(DISTANCE)
model_part.AddNodalSolutionStepVariable(AUX_INDEX)
print("variables for monolithic solver lagrangian compressible solution added correctly")
def AddDofs(model_part):
for node in model_part.Nodes:
# adding dofs
node.AddDof(VELOCITY_X, REACTION_X)
node.AddDof(VELOCITY_Y, REACTION_Y)
node.AddDof(VELOCITY_Z, REACTION_Z)
node.AddDof(WATER_PRESSURE, REACTION_WATER_PRESSURE)
node.AddDof(AIR_PRESSURE, REACTION_AIR_PRESSURE)
print("dofs for the monolithic solver lagrangian compressible added correctly")
class MonolithicSolver:
#
def __init__(self, model_part, domain_size, box_corner1, box_corner2):
self.model_part = model_part
self.alpha = -0.1
self.move_mesh_strategy = 2
self.time_scheme = ResidualBasedPredictorCorrectorVelocityBossakSchemeCompressible(
self.alpha, self.move_mesh_strategy)
# definition of the solvers
# self.linear_solver = SkylineLUFactorizationSolver()
# self.linear_solver =SuperLUSolver()
pPrecond = DiagonalPreconditioner()
# pPrecond = ILU0Preconditioner()
self.linear_solver = BICGSTABSolver(1e-6, 5000, pPrecond)
# definition of the convergence criteria
# self.conv_criteria = UPCriteria(1e-7,1e-9,1e-7,1e-9)
self.conv_criteria = UPCriteria(1e-5, 1e-6, 1e-5, 1e-6)
self.max_iter = 2
self.SetDivided = ElemBasedBCUtilities(model_part)
self.ChooseElement = ChooseElementProcess(model_part, 2)
# default settings
self.echo_level = 1
self.CalculateReactionFlag = False
self.ReformDofSetAtEachStep = True
self.CalculateNormDxFlag = True
self.MoveMeshFlag = True
self.remeshing_flag = True
# MESH CHANGES
self.PfemUtils = PfemUtils()
self.MeshMover = MoveMeshProcess(self.model_part)
self.node_erase_process = NodeEraseProcess(model_part)
# self.Mesher = TriGenPFEMModeler()
# self.Mesher = MSuitePFEMModeler()
self.Mesher = TriGenPFEMSegment()
self.neigh_finder = FindNodalNeighboursProcess(model_part, 9, 18)
self.elem_neighbor_finder = FindElementalNeighboursProcess(
model_part, 2, 10)
self.alpha_shape = 10000.0
self.h_factor = 0.5
# assign IS_FLUID to all nodes
# for node in self.model_part.Nodes:
# node.SetSolutionStepValue(IS_FLUID,0,1.0)
# detecting free_surface to all nodes
for node in self.model_part.Nodes:
if (node.GetSolutionStepValue(IS_BOUNDARY) == 1 and node.GetSolutionStepValue(IS_STRUCTURE) != 1):
node.SetSolutionStepValue(IS_FREE_SURFACE, 0, 1.0)
# U NEED IT FOR ALPHA-shape
(self.neigh_finder).Execute()
self.Hfinder = FindNodalHProcess(model_part)
(self.Hfinder).Execute()
# runtime box
self.box_corner1 = box_corner1
self.box_corner2 = box_corner2
#
def Initialize(self, output_time_increment):
# creating the solution strategy
self.solver = NewtonRaphsonStrategy(
self.model_part,
self.time_scheme,
self.linear_solver,
self.conv_criteria,
self.max_iter,
self.CalculateReactionFlag,
self.ReformDofSetAtEachStep,
self.MoveMeshFlag)
(self.solver).SetEchoLevel(self.echo_level)
# time increment for output
self.output_time_increment = output_time_increment
self.next_output_time = self.output_time_increment
# self.CalculateDistanceAndDiviedSet(2);
# (self.neigh_finder).Execute();
# FIND NEIGHBOUR ELEMENTS AND COLORing
# (self.elem_neighbor_finder).ClearNeighbours()
# (self.elem_neighbor_finder).Execute()
# (self.PfemUtils).ColourAirWaterElement(self.model_part,2)
#
def Solve(self, time, gid_io):
# (self.neigh_finder).Execute();
# (self.solver).Solve()
# print"After solve before clear"
# (self.solver).Clear()
# print"After clear"
# (self.PfemUtils).MarkOuterNodes(self.box_corner1,self.box_corner2,(self.model_part).Nodes );
# (self.PfemUtils).MarkExcessivelyCloseNodes((self.model_part).Nodes, .05)
# (self.node_erase_process).Execute();
# self.Remesh()
# self.OutputStep(time,gid_io)
self.CalculateDistanceAndDiviedSet(2)
# self.AssignH()
# self.ImplosionDistToH()
# (FindElementalNeighboursProcess(self.model_part, 2, 10)).Execute()
(self.solver).Predict()
print("AFTER PREDICT")
self.Remesh()
print("AFTER REMESH")
self.DistToH()
(self.solver).Solve()
print("AFTER SOLVE")
(self.PfemUtils).MoveNodes(self.model_part)
print("AFTER Move")
(self.solver).Clear()
self.OutputStep(time, gid_io)
#
def EstimateDeltaTime(self, min_dt, max_dt):
print("Estimating delta time")
calc_dt = (
self.PfemUtils).EstimateDeltaTime(
min_dt,
max_dt,
self.model_part)
print("calculated dt")
return calc_dt
# def EstimateDeltaTime(self,min_dt,max_dt):
# print "Estimating delta time"
# return (self.UlfUtils).EstimateDeltaTime(max_dt,domain_size)
#
def SetEchoLevel(self, level):
(self.solver).SetEchoLevel(level)
#
# def Remesh(self):
#
# if (self.remeshing_flag==True):
# print "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
# (self.Mesher).ReGenerateMesh("ASGSCompressible2D", "Monolithic2DNeumann",self.model_part,self.node_erase_process,True, True, self.alpha_shape, self.h_factor)
# (self.Mesher).ReGenerateMesh("ASGSCompressible2D", "Monolithic2DNeumann",self.model_part,self.node_erase_process,True, False, self.alpha_shape, self.h_factor)
# print "AAAAAAAAAAFFFFFFFFFFFFFTTTTTTTTTTTTTERRRRRRRRRRRRRR"
# calculating fluid neighbours before applying boundary conditions
# (self.neigh_finder).Execute();
#
def Remesh(self):
if (self.remeshing_flag):
(self.PfemUtils).MoveLonelyNodes(self.model_part)
#(self.MeshMover).Execute();
print(self.box_corner1)
(self.PfemUtils).MarkOuterNodes(
self.box_corner1, self.box_corner2, (self.model_part).Nodes)
(self.PfemUtils).MarkNodesTouchingWall(self.model_part, 2, .05)
(self.PfemUtils).MarkExcessivelyCloseNodes(
(self.model_part).Nodes, 0.5)
(self.PfemUtils).MarkNodesTouchingInterface(self.model_part, 2, .1)
# FIND NEIGHBOUR ELEMENTS AND COLORing
(self.elem_neighbor_finder).ClearNeighbours()
(self.elem_neighbor_finder).Execute()
(self.PfemUtils).ColourAirWaterElement(self.model_part, 2)
#
# (self.PfemUtils).InterfaceDetecting(self.model_part,2, .9)
# (self.PfemUtils).ChangeWallWaterFlag(self.model_part,2)
# (self.PfemUtils).ChangeInterfaceWaterFlag(self.model_part,2)
# for node in (self.model_part).Nodes:
# if(node.GetSolutionStepValue(IS_INTERFACE) == 1.0):
# print node.GetValue(ERASE_FLAG)
#(self.node_erase_process).Execute(); to be able to compute neighbors earase process is done inside the mesher
(self.neigh_finder).ClearNeighbours()
(self.neigh_finder).Execute()
# ((self.model_part).Elements).clear();
# ((self.model_part).Conditions).clear();
(self.Mesher).ReGenerateMesh("ASGSCompressible2D", "Monolithic2DNeumann",
self.model_part, self.node_erase_process, True, True, self.alpha_shape, self.h_factor)
# (self.Mesher).ReGenerateMesh("ASGSCOMPPRDC2D", "Monolithic2DNeumann",self.model_part,self.node_erase_process,True, False, self.alpha_shape, self.h_factor)
(self.elem_neighbor_finder).ClearNeighbours()
(self.elem_neighbor_finder).Execute()
# (self.neigh_finder).Execute();
(self.PfemUtils).ColourAirWaterElement(self.model_part, 2)
(self.PfemUtils).InterfaceDetecting(self.model_part, 2, .9)
(self.ChooseElement).Execute()
# calculating fluid neighbours before applying boundary conditions
(self.neigh_finder).ClearNeighbours()
(self.neigh_finder).Execute()
(self.PfemUtils).ApplyBoundaryConditions(self.model_part, 2)
(self.PfemUtils).IdentifyFluidNodes(self.model_part)
# (self.PfemUtils).ApplyMinimalPressureConditions(self.model_part);
# (self.PfemUtils).InterfaceDetecting(self.model_part,2, .9)
# (self.PfemUtils).ChangeWallWaterFlag(self.model_part,2)
# (self.PfemUtils).ChangeInterfaceWaterFlag(self.model_part,2)
# (self.PfemUtils).ColourAirWaterElement(self.model_part,2)
# for node in self.model_part.Nodes:
# node.SetSolutionStepValue(IS_FREE_SURFACE,0,0.0)
#
# for node in self.model_part.Nodes:
# if (node.GetSolutionStepValue(IS_BOUNDARY)==1 and node.GetSolutionStepValue(IS_STRUCTURE)!=1):
# node.SetSolutionStepValue(IS_FREE_SURFACE,0,1.0)
#
def FindNeighbours(self):
(self.neigh_finder).Execute()
#
def OutputStep(self, time, gid_io):
if(time >= self.next_output_time):
self.next_output_time = self.next_output_time + \
self.output_time_increment
# writing mesh
gid_io.InitializeMesh(time)
gid_io.WriteNodeMesh((self.model_part).GetMesh())
gid_io.WriteMesh((self.model_part).GetMesh())
gid_io.FinalizeMesh()
gid_io.InitializeResults(time, (self.model_part).GetMesh())
gid_io.WriteNodalResults(
PRESSURE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
EXTERNAL_PRESSURE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_FREE_SURFACE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_BOUNDARY,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_STRUCTURE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_INTERFACE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
VELOCITY,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
MESH_VELOCITY,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
DENSITY,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
AIR_PRESSURE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
WATER_PRESSURE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
DENSITY_AIR,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
DENSITY_WATER,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
AIR_SOUND_VELOCITY,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
WATER_SOUND_VELOCITY,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_FLUID,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_WATER,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
NODAL_H,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
DISTANCE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
DISPLACEMENT,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_VISITED,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
AUX_INDEX,
(self.model_part).Nodes,
time,
0)
gid_io.PrintOnGaussPoints(IS_WATER_ELEMENT, self.model_part, time)
gid_io.Flush()
gid_io.FinalizeResults()
#
def CalculateDistanceAndDiviedSet(self, domain_size):
(self.neigh_finder).Execute()
distance_tools = ElemBasedDistanceUtilities(self.model_part)
distance_calculator = BodyDistanceCalculationUtils()
# assign IS_VISITED1 to elem with DISTANCE>=0 and change DSITANCE to posetive for external ones
# Assign Zero distance to interface nodes
for node in (self.model_part).Nodes:
if(node.GetSolutionStepValue(IS_INTERFACE) == 1.0):
node.SetSolutionStepValue(DISTANCE, 0, 0.0)
distance_tools.MarkExternalAndMixedNodes()
distance_tools.ChangeSignToDistance()
# calculate distances towards the interior of the domain
if(domain_size == 2):
distance_calculator.CalculateDistances2D(
(self.model_part).Elements,
DISTANCE,
True)
else:
distance_calculator.CalculateDistances3D(
(self.model_part).Elements,
DISTANCE,
True)
# change sign
distance_tools.ChangeSignToDistance()
# mark as visited all of the nodes inside the fluid domain
distance_tools.MarkInternalAndMixedNodes()
print(((self.model_part).Elements).Size())
# calculate distances towards the outside
if(domain_size == 2):
distance_calculator.CalculateDistances2D(
(self.model_part).Elements,
DISTANCE,
True)
else:
distance_calculator.CalculateDistances3D(
(self.model_part).Elements,
DISTANCE,
True)
# Decide IS_WATER flag due to DISTANCE
# for node in (self.model_part).Nodes:
# if(node.GetSolutionStepValue(DISTANCE)<= 0.0):
# node.SetSolutionStepValue(IS_WATER,0,0.0)
# else:
# node.SetSolutionStepValue(IS_WATER,0,1.0)
# if(node.GetSolutionStepValue(DISTANCE)== 0.0):
# print"This node has distance zero, is_interface is assigned"
# node.SetSolutionStepValue(IS_INTERFACE,0,1.0)
# node.SetSolutionStepValue(IS_VISITED,0,1.0)
# save as distance of the old time step
distance_tools.SaveScalarVariableToOldStep(DISTANCE)
print("finished RecalculateDistanceFunction")
# (self.SetDivided).SetDividedElem_2D()
print(">>>>>ELEMENTS ARE DIVIDED<<<<<<<<<<<<")
#
def DistToH(self):
possible_h = self.CalculateRadius()
print(possible_h)
min_H = possible_h * 3.14 / 200
# min_H = .0007#0.001
sec_min_H = 10 * min_H # .004
max_H = .02
ref_dist = 4 * min_H
sec_ref_dist = 20 * min_H
third_ref_dist = 200 * min_H
slope = (sec_min_H - min_H) / (sec_ref_dist - ref_dist)
second_slope = (max_H - sec_min_H) / (third_ref_dist - sec_ref_dist)
# search for min an max of H
# for node in (self.model_part).Nodes:
# node_H = node.GetSolutionStepValue(NODAL_H,0)
# if(node_H<self.min_H):
# self.min_H = node_H
# else:
# if(node_H > self.max_H):
# self.max_H = node_H
# H = H + dist * dist
# print ">>>>>DISt TO H ASSIGNMENT<<<<<<<<<<<<"
for node in (self.model_part).Nodes:
current_dist = node.GetSolutionStepValue(DISTANCE, 0)
if(abs(current_dist) <= ref_dist):
node_H = min_H # + slope*abs(current_dist)
node.SetSolutionStepValue(NODAL_H, 0, node_H)
if(ref_dist < abs(current_dist) and abs(current_dist) <= sec_ref_dist):
node_H = min_H + slope * (abs(current_dist) - ref_dist)
node.SetSolutionStepValue(NODAL_H, 0, node_H)
if(sec_ref_dist < abs(current_dist) and abs(current_dist) <= third_ref_dist):
node_H = sec_min_H + second_slope * \
(abs(current_dist) - sec_ref_dist)
node.SetSolutionStepValue(NODAL_H, 0, node_H)
if(abs(current_dist) > third_ref_dist):
node_H = max_H
node.SetSolutionStepValue(NODAL_H, 0, node_H)
# assign new value
# node.SetSolutionStepValue(NODAL_H,0,node_H)
# NearboundaryH
(self.PfemUtils).AssignNearBoundaryH(self.model_part, 5.0)
#
def CalculateRadius(self):
max_radi = 0.0
for node in (self.model_part).Nodes:
if node.GetSolutionStepValue(IS_INTERFACE) == 1.0:
X_ref = node.X
Y_ref = node.Y
for node in (self.model_part).Nodes:
if node.GetSolutionStepValue(IS_INTERFACE) == 1.0:
radi = pow(node.X - X_ref, 2) + pow(node.Y - Y_ref, 2)
if(radi > max_radi):
max_radi = radi
max_radi = pow(max_radi, 0.5)
return max_radi
#
def AssignH(self):
for node in (self.model_part).Nodes:
if(node.GetSolutionStepValue(IS_INTERFACE) == 1.0):
node.SetSolutionStepValue(NODAL_H, 0, .03)
else:
node.SetSolutionStepValue(NODAL_H, 0, .1)
print(">>>>>HHHHHH ASSIGNMENT<<<<<<<<<<<<")
#
#
def ImplosionDistToH(self):
min_H = .0005
max_H = .05
ref_dist = .0025
tol = .001
slope = (max_H - min_H) / ref_dist
# search for min an max of H
# for node in (self.model_part).Nodes:
# node_H = node.GetSolutionStepValue(NODAL_H,0)
# if(node_H<self.min_H):
# self.min_H = node_H
# else:
# if(node_H > self.max_H):
# self.max_H = node_H
# H = H + dist * dist
print(">>>>>DISt TO H ASSIGNMENT<<<<<<<<<<<<")
for node in (self.model_part).Nodes:
current_dist = node.GetSolutionStepValue(DISTANCE, 0)
if(current_dist > tol):
if(abs(current_dist) <= ref_dist):
node_H = min_H + slope * abs(current_dist)
else:
node_H = max_H
if(current_dist < -tol):
node_H = min_H
# assign new value
node.SetSolutionStepValue(NODAL_H, 0, node_H)
print(">>>>>DISt TO H ASSIGNMENT<<<<<<<<<<<<")
#
| en | 0.404152 | #makes KratosMultiphysics backward compatible with python 2.6 and 2.7 # importing the Kratos Library # adding dofs # # definition of the solvers # self.linear_solver = SkylineLUFactorizationSolver() # self.linear_solver =SuperLUSolver() # pPrecond = ILU0Preconditioner() # definition of the convergence criteria # self.conv_criteria = UPCriteria(1e-7,1e-9,1e-7,1e-9) # default settings # MESH CHANGES # self.Mesher = TriGenPFEMModeler() # self.Mesher = MSuitePFEMModeler() # assign IS_FLUID to all nodes # for node in self.model_part.Nodes: # node.SetSolutionStepValue(IS_FLUID,0,1.0) # detecting free_surface to all nodes # U NEED IT FOR ALPHA-shape # runtime box # # creating the solution strategy # time increment for output # self.CalculateDistanceAndDiviedSet(2); # (self.neigh_finder).Execute(); # FIND NEIGHBOUR ELEMENTS AND COLORing # (self.elem_neighbor_finder).ClearNeighbours() # (self.elem_neighbor_finder).Execute() # (self.PfemUtils).ColourAirWaterElement(self.model_part,2) # # (self.neigh_finder).Execute(); # (self.solver).Solve() # print"After solve before clear" # (self.solver).Clear() # print"After clear" # (self.PfemUtils).MarkOuterNodes(self.box_corner1,self.box_corner2,(self.model_part).Nodes ); # (self.PfemUtils).MarkExcessivelyCloseNodes((self.model_part).Nodes, .05) # (self.node_erase_process).Execute(); # self.Remesh() # self.OutputStep(time,gid_io) # self.AssignH() # self.ImplosionDistToH() # (FindElementalNeighboursProcess(self.model_part, 2, 10)).Execute() # # def EstimateDeltaTime(self,min_dt,max_dt): # print "Estimating delta time" # return (self.UlfUtils).EstimateDeltaTime(max_dt,domain_size) # # # def Remesh(self): # # if (self.remeshing_flag==True): # print "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB" # (self.Mesher).ReGenerateMesh("ASGSCompressible2D", "Monolithic2DNeumann",self.model_part,self.node_erase_process,True, True, self.alpha_shape, self.h_factor) # (self.Mesher).ReGenerateMesh("ASGSCompressible2D", "Monolithic2DNeumann",self.model_part,self.node_erase_process,True, False, self.alpha_shape, self.h_factor) # print "AAAAAAAAAAFFFFFFFFFFFFFTTTTTTTTTTTTTERRRRRRRRRRRRRR" # calculating fluid neighbours before applying boundary conditions # (self.neigh_finder).Execute(); # #(self.MeshMover).Execute(); # FIND NEIGHBOUR ELEMENTS AND COLORing # # (self.PfemUtils).InterfaceDetecting(self.model_part,2, .9) # (self.PfemUtils).ChangeWallWaterFlag(self.model_part,2) # (self.PfemUtils).ChangeInterfaceWaterFlag(self.model_part,2) # for node in (self.model_part).Nodes: # if(node.GetSolutionStepValue(IS_INTERFACE) == 1.0): # print node.GetValue(ERASE_FLAG) #(self.node_erase_process).Execute(); to be able to compute neighbors earase process is done inside the mesher # ((self.model_part).Elements).clear(); # ((self.model_part).Conditions).clear(); # (self.Mesher).ReGenerateMesh("ASGSCOMPPRDC2D", "Monolithic2DNeumann",self.model_part,self.node_erase_process,True, False, self.alpha_shape, self.h_factor) # (self.neigh_finder).Execute(); # calculating fluid neighbours before applying boundary conditions # (self.PfemUtils).ApplyMinimalPressureConditions(self.model_part); # (self.PfemUtils).InterfaceDetecting(self.model_part,2, .9) # (self.PfemUtils).ChangeWallWaterFlag(self.model_part,2) # (self.PfemUtils).ChangeInterfaceWaterFlag(self.model_part,2) # (self.PfemUtils).ColourAirWaterElement(self.model_part,2) # for node in self.model_part.Nodes: # node.SetSolutionStepValue(IS_FREE_SURFACE,0,0.0) # # for node in self.model_part.Nodes: # if (node.GetSolutionStepValue(IS_BOUNDARY)==1 and node.GetSolutionStepValue(IS_STRUCTURE)!=1): # node.SetSolutionStepValue(IS_FREE_SURFACE,0,1.0) # # # writing mesh # # assign IS_VISITED1 to elem with DISTANCE>=0 and change DSITANCE to posetive for external ones # Assign Zero distance to interface nodes # calculate distances towards the interior of the domain # change sign # mark as visited all of the nodes inside the fluid domain # calculate distances towards the outside # Decide IS_WATER flag due to DISTANCE # for node in (self.model_part).Nodes: # if(node.GetSolutionStepValue(DISTANCE)<= 0.0): # node.SetSolutionStepValue(IS_WATER,0,0.0) # else: # node.SetSolutionStepValue(IS_WATER,0,1.0) # if(node.GetSolutionStepValue(DISTANCE)== 0.0): # print"This node has distance zero, is_interface is assigned" # node.SetSolutionStepValue(IS_INTERFACE,0,1.0) # node.SetSolutionStepValue(IS_VISITED,0,1.0) # save as distance of the old time step # (self.SetDivided).SetDividedElem_2D() # # min_H = .0007#0.001 # .004 # search for min an max of H # for node in (self.model_part).Nodes: # node_H = node.GetSolutionStepValue(NODAL_H,0) # if(node_H<self.min_H): # self.min_H = node_H # else: # if(node_H > self.max_H): # self.max_H = node_H # H = H + dist * dist # print ">>>>>DISt TO H ASSIGNMENT<<<<<<<<<<<<" # + slope*abs(current_dist) # assign new value # node.SetSolutionStepValue(NODAL_H,0,node_H) # NearboundaryH # # # # # search for min an max of H # for node in (self.model_part).Nodes: # node_H = node.GetSolutionStepValue(NODAL_H,0) # if(node_H<self.min_H): # self.min_H = node_H # else: # if(node_H > self.max_H): # self.max_H = node_H # H = H + dist * dist # assign new value # | 1.771124 | 2 |
quex/engine/analyzer/mega_state/path_walker/state.py | Liby99/quex | 0 | 6631648 | # Project Quex (http://quex.sourceforge.net); License: MIT;
# (C) 2005-2020 <NAME>;
#_______________________________________________________________________________
# (C) 2010-2014 <NAME>
from quex.engine.operations.operation_list import Op
from quex.engine.analyzer.mega_state.core import MegaState, \
StateKeyIndexDB
from quex.engine.analyzer.mega_state.path_walker.find import DropOutConsideration_cmp, \
DropOutConsideration_relate
import quex.engine.state_machine.index as index
from quex.engine.misc.tools import UniformObject
from quex.constants import E_Compression, E_Op
class PathWalkerState(MegaState):
"""________________________________________________________________________
A path walker state is a state that can walk along one or more paths
with the same remaining transition map. Objects of this class are the basis
for code generation.
path ['w', 'h', 'i', 'l', 'e', PTC]
path ['f', 'o', 'r', PTC]
path ['f', 'u', 'n', 'c', 't', 'i', 'o', 'n', PTC]
path ['p', 'r', 'i', 'n', 't', PTC]
path_iterator ---->--'
path ['s', 't', 'r', 'u', 'c', 't', PTC]
.-------------------------.
| path_iterator = path[0] |
'-------------------------'
|
|<-----------------------------------.
| |
.-----------'''--------------. true .-----------------.
/ *input_p == *path_iterator ? \----------| ++path_iterator |
\______________________________/ | ++input_p |
| '-----------------'
|
.------------------------.
| |----- [a-h] ----> state 21
| |----- [j] ----> state 185
| transition_map(*input) |----- 'o' ----> state 312
| |----- [p-z] ----> state 21
| |----- [a-h] ----> state 21
'------------------------'
The 'group()' function in 'path_walker.core.py' develops a set of path
walkers for a set of given CharacterPath list.
___________________________________________________________________________
"""
def __init__(self, FirstPath, TheAnalyzer):
my_index = index.get()
ski_db = StateKeyIndexDB([x.state_index for x in FirstPath.step_list],
IgnoredListIndex=len(FirstPath.step_list)-1)
MegaState.__init__(self, my_index, FirstPath.transition_map, ski_db, TheAnalyzer.dial_db)
# Uniform OpList along entries on the path (optional)
self.uniform_entry_OpList = FirstPath.uniform_entry_OpList.clone()
self.__path_list = [ FirstPath.step_list ]
# Following is set by 'finalize()'.
self.__finalized = None # <-- ._finalize_content()
@property
def door_id_sequence_list(self):
return self.__finalized.door_id_sequence_list
@property
def uniform_door_id(self):
"""At any step along the path commands may be executed upon entry
into the target state. If those commands are uniform, then this
function returns a OpList object of those uniform commands.
RETURNS: None, if the commands at entry of the states on the path
are not uniform.
"""
return self.__finalized.uniform_door_id.content
@property
def uniform_terminal_door_id(self):
"""RETURNS: DoorID -- if all paths which are involved enter the same
terminal state through the same entry door.
None -- if not.
"""
return self.__finalized.uniform_terminal_door_id.content
@property
def path_list(self):
assert type(self.__path_list) == list
return self.__path_list
def accept(self, Path, TheAnalyzer, CompressionType):
"""Checks whether conditions of absorbing the Path are met, and if
so then the Path is absorbed.
RETURNS: False -- Path does not fit the PathWalkerState.
True -- Path can be walked by PathWalkerState and has been
accepted.
"""
if not self.__can_absorb_path(Path, TheAnalyzer, CompressionType):
return False
self.__absorb_path(Path, TheAnalyzer)
return True
def __can_absorb_path(self, Path, TheAnalyzer, CompressionType):
"""Check whether a path can be walked along with the given PathWalkerState.
For this, the following has to hold:
-- The transition_maps must match.
-- If uniformity is required, the entries and drop-outs must
be uniform with the existing onces.
"""
if not self.transition_map.is_equal(Path.transition_map, DropOutConsideration_cmp):
return False
if CompressionType == E_Compression.PATH_UNIFORM:
if not self.uniform_entry_OpList.fit(Path.uniform_entry_OpList):
return False
return True
def __absorb_path(self, Path, TheAnalyzer):
"""-- Absorb the state sequence of the path.
-- Absorb the Entry/DropOut information.
"""
# (Meaningful paths consist of more than one state and a terminal.)
assert len(Path.step_list) > 2
self.__path_list.append(Path.step_list)
# (1) Absorb the state sequence of the path.
#
new_state_index_list = [x.state_index for x in Path.step_list]
terminal_index = len(new_state_index_list) - 1
# Assert: A state cannot be implemented on two different paths.
assert self.ski_db.not_implemented_yet(new_state_index_list[:terminal_index])
self.ski_db.extend(new_state_index_list, IgnoredListIndex=terminal_index)
# (2) Absorb Entry/DropOut Information
#
self.uniform_entry_OpList <<= Path.uniform_entry_OpList
return True
def _finalize_entry_OpLists(self):
"""If a state is entered from outside the path walker, then the 'state_key',
respectively, the 'path_iterator' needs to be set. During the walk along
a path, the 'path_iterator' is simply incremented--and this happens in the
code generated for the path walker (later on).
NOTE: Here, it must be ensured that the DoorID-s for entries from
outside remain the same! This way, any external transition map
may remain the same.
"""
# Entries along the path: PathIterator Increment
# ... but this is handled better by the code generator.
# Entries from outside: PathIteratorSet
for path_id, step_list in enumerate(self.__path_list):
prev_state_index = None
# Terminal is not element of path => consider only 'step_list[:-1]'
for offset, step in enumerate(step_list[:-1]):
# Inside transition: 'prev_state.index --> step.state_index'
# All other transitions: ' * --> step.state_index'
# are transitions from states outside the path.
state_key = offset
# Update sets inside transition's 'door_id = None' and adds
# the transition to 'transition_reassignment_candidate_list'.
self.entry.action_db_update(From = prev_state_index,
To = step.state_index,
FromOutsideOp = Op.PathIteratorSet(self.index, path_id, state_key),
FromInsideOp = None)
prev_state_index = step.state_index
# Make sure, that the OpList-s on the paths are organized and
# assigned with new DoorID-s.
assert len(self.entry.transition_reassignment_candidate_list) > 0
def _finalize_transition_map(self, TheAnalyzer):
"""All drop-outs of this path walker enter a common door in the drop-out
catcher. There, they are routed to the drop-outs for the current state
which the path walker representes. The current state is given by the
state key.
"""
# Any drop-out in the transition map must become a 'goto path walker's i
# drop-out'. In the path walker's drop-out it is routed to the drop-out of
# the state which it currently represented.
drop_out_door_id = TheAnalyzer.drop_out_DoorID(self.index)
self.transition_map.adapt_targets(drop_out_door_id, DropOutConsideration_relate)
self.transition_map.combine_adjacents()
def _finalize_content(self, TheAnalyzer):
self.__finalized = FinalizedContent(self, TheAnalyzer)
def get_Trigger_DoorID_by_state_key(self, StateKey):
# Find (transition char, DoorID) for given StateKey
i = 0
offset = 0
for i in range(len(self.path_list)):
end = offset + len(self.path_list[i]) - 1
if StateKey < end:
break
offset = end + 1
step_i = StateKey - offset
return self.path_list[i][step_i].trigger, self.door_id_sequence_list[i][step_i]
def _get_target_by_state_key(self, Begin, End, TargetScheme, StateKey):
"""In a PathWalkerState's transition map, the targets are DoorID-s. They
do not depend on a StateKey. The ones which depend on a state key are
are the ones on a path.
"""
# First, look if the character lies on the path. If not rely on the
# transition map' target DoorID as it is.
if End - Begin == 1:
trigger, door_id = self.get_Trigger_DoorID_by_state_key(StateKey)
if Begin == trigger:
return door_id
return TargetScheme
def _assert_consistency(self, CompressionType, RemainingStateIndexSet, TheAnalyzer):
# If uniform_entry_OpList is claimed, then the DoorID must be
# the same along all paths--and vice versa.
assert (self.uniform_door_id is not None) \
== self.uniform_entry_OpList.is_uniform()
# If uniformity was required, then it must have been maintained.
if CompressionType == E_Compression.PATH_UNIFORM:
assert self.uniform_door_id is not None
assert self.uniform_entry_OpList.is_uniform()
# The door_id_sequence_list corresponds to the path_list.
assert len(self.door_id_sequence_list) == len(self.path_list)
for door_id_sequence, step_list in zip(self.door_id_sequence_list, self.path_list):
# Path entry is not element of door_id_sequence => '-1'
assert len(door_id_sequence) == len(step_list) - 1
# A OpList at a door can at maximum contain 1 path iterator command!
for action in self.entry.values():
path_iterator_cmd_n = 0
for cmd in action.command_list:
if cmd.id != E_Op.PathIteratorSet: continue
path_iterator_cmd_n += 1
assert path_iterator_cmd_n < 2
class FinalizedContent(object):
__slots__ = ("uniform_door_id",
"uniform_terminal_door_id",
"door_id_sequence_list")
def __init__(self, PWState, TheAnalyzer):
self.uniform_door_id = UniformObject()
self.uniform_terminal_door_id = UniformObject()
self.door_id_sequence_list = []
for step_list in PWState.path_list:
# Meaningful paths consist of more than one state and a terminal.
assert len(step_list) > 2
door_id_sequence = self.__determine_door_id_sequence(step_list, TheAnalyzer, PWState)
self.door_id_sequence_list.append(door_id_sequence)
return
def __determine_door_id_sequence(self, step_list, TheAnalyzer, PWState):
"""Determines the sequence of DoorID-s for a given 'step_list' and
adapts 'uniform_door_id', and 'uniform_terminal_door_id'.
"""
# -- States on path
# (entries are considered from the second state on path on)
door_id_sequence = []
prev_step = step_list[0]
action_db = PWState.entry
for step in step_list[1:-1]:
# (Recall: there is only one transition (from, to) => TriggerId == 0)
door_id = action_db.get_door_id(step.state_index, prev_step.state_index, TriggerId=0)
# Every DoorID on the path must be a newly-assigned one to this PathWalkerState.
assert door_id.state_index == PWState.index
door_id_sequence.append(door_id)
self.uniform_door_id <<= door_id
prev_step = step
# -- Terminal
step = step_list[-1]
#! A terminal of one path cannot be element of another path of the
#! same PathWalkerState. This might cause huge trouble!
#! (Ensured by the function '.accept(Path)')
# assert step.state_index not in PWState.implemented_state_index_set()
action_db = TheAnalyzer.state_db[step.state_index].entry
door_id = action_db.get_door_id(step.state_index, prev_step.state_index, TriggerId=0)
door_id_sequence.append(door_id)
self.uniform_terminal_door_id <<= door_id
return door_id_sequence
| # Project Quex (http://quex.sourceforge.net); License: MIT;
# (C) 2005-2020 <NAME>;
#_______________________________________________________________________________
# (C) 2010-2014 <NAME>
from quex.engine.operations.operation_list import Op
from quex.engine.analyzer.mega_state.core import MegaState, \
StateKeyIndexDB
from quex.engine.analyzer.mega_state.path_walker.find import DropOutConsideration_cmp, \
DropOutConsideration_relate
import quex.engine.state_machine.index as index
from quex.engine.misc.tools import UniformObject
from quex.constants import E_Compression, E_Op
class PathWalkerState(MegaState):
"""________________________________________________________________________
A path walker state is a state that can walk along one or more paths
with the same remaining transition map. Objects of this class are the basis
for code generation.
path ['w', 'h', 'i', 'l', 'e', PTC]
path ['f', 'o', 'r', PTC]
path ['f', 'u', 'n', 'c', 't', 'i', 'o', 'n', PTC]
path ['p', 'r', 'i', 'n', 't', PTC]
path_iterator ---->--'
path ['s', 't', 'r', 'u', 'c', 't', PTC]
.-------------------------.
| path_iterator = path[0] |
'-------------------------'
|
|<-----------------------------------.
| |
.-----------'''--------------. true .-----------------.
/ *input_p == *path_iterator ? \----------| ++path_iterator |
\______________________________/ | ++input_p |
| '-----------------'
|
.------------------------.
| |----- [a-h] ----> state 21
| |----- [j] ----> state 185
| transition_map(*input) |----- 'o' ----> state 312
| |----- [p-z] ----> state 21
| |----- [a-h] ----> state 21
'------------------------'
The 'group()' function in 'path_walker.core.py' develops a set of path
walkers for a set of given CharacterPath list.
___________________________________________________________________________
"""
def __init__(self, FirstPath, TheAnalyzer):
my_index = index.get()
ski_db = StateKeyIndexDB([x.state_index for x in FirstPath.step_list],
IgnoredListIndex=len(FirstPath.step_list)-1)
MegaState.__init__(self, my_index, FirstPath.transition_map, ski_db, TheAnalyzer.dial_db)
# Uniform OpList along entries on the path (optional)
self.uniform_entry_OpList = FirstPath.uniform_entry_OpList.clone()
self.__path_list = [ FirstPath.step_list ]
# Following is set by 'finalize()'.
self.__finalized = None # <-- ._finalize_content()
@property
def door_id_sequence_list(self):
return self.__finalized.door_id_sequence_list
@property
def uniform_door_id(self):
"""At any step along the path commands may be executed upon entry
into the target state. If those commands are uniform, then this
function returns a OpList object of those uniform commands.
RETURNS: None, if the commands at entry of the states on the path
are not uniform.
"""
return self.__finalized.uniform_door_id.content
@property
def uniform_terminal_door_id(self):
"""RETURNS: DoorID -- if all paths which are involved enter the same
terminal state through the same entry door.
None -- if not.
"""
return self.__finalized.uniform_terminal_door_id.content
@property
def path_list(self):
assert type(self.__path_list) == list
return self.__path_list
def accept(self, Path, TheAnalyzer, CompressionType):
"""Checks whether conditions of absorbing the Path are met, and if
so then the Path is absorbed.
RETURNS: False -- Path does not fit the PathWalkerState.
True -- Path can be walked by PathWalkerState and has been
accepted.
"""
if not self.__can_absorb_path(Path, TheAnalyzer, CompressionType):
return False
self.__absorb_path(Path, TheAnalyzer)
return True
def __can_absorb_path(self, Path, TheAnalyzer, CompressionType):
"""Check whether a path can be walked along with the given PathWalkerState.
For this, the following has to hold:
-- The transition_maps must match.
-- If uniformity is required, the entries and drop-outs must
be uniform with the existing onces.
"""
if not self.transition_map.is_equal(Path.transition_map, DropOutConsideration_cmp):
return False
if CompressionType == E_Compression.PATH_UNIFORM:
if not self.uniform_entry_OpList.fit(Path.uniform_entry_OpList):
return False
return True
def __absorb_path(self, Path, TheAnalyzer):
"""-- Absorb the state sequence of the path.
-- Absorb the Entry/DropOut information.
"""
# (Meaningful paths consist of more than one state and a terminal.)
assert len(Path.step_list) > 2
self.__path_list.append(Path.step_list)
# (1) Absorb the state sequence of the path.
#
new_state_index_list = [x.state_index for x in Path.step_list]
terminal_index = len(new_state_index_list) - 1
# Assert: A state cannot be implemented on two different paths.
assert self.ski_db.not_implemented_yet(new_state_index_list[:terminal_index])
self.ski_db.extend(new_state_index_list, IgnoredListIndex=terminal_index)
# (2) Absorb Entry/DropOut Information
#
self.uniform_entry_OpList <<= Path.uniform_entry_OpList
return True
def _finalize_entry_OpLists(self):
"""If a state is entered from outside the path walker, then the 'state_key',
respectively, the 'path_iterator' needs to be set. During the walk along
a path, the 'path_iterator' is simply incremented--and this happens in the
code generated for the path walker (later on).
NOTE: Here, it must be ensured that the DoorID-s for entries from
outside remain the same! This way, any external transition map
may remain the same.
"""
# Entries along the path: PathIterator Increment
# ... but this is handled better by the code generator.
# Entries from outside: PathIteratorSet
for path_id, step_list in enumerate(self.__path_list):
prev_state_index = None
# Terminal is not element of path => consider only 'step_list[:-1]'
for offset, step in enumerate(step_list[:-1]):
# Inside transition: 'prev_state.index --> step.state_index'
# All other transitions: ' * --> step.state_index'
# are transitions from states outside the path.
state_key = offset
# Update sets inside transition's 'door_id = None' and adds
# the transition to 'transition_reassignment_candidate_list'.
self.entry.action_db_update(From = prev_state_index,
To = step.state_index,
FromOutsideOp = Op.PathIteratorSet(self.index, path_id, state_key),
FromInsideOp = None)
prev_state_index = step.state_index
# Make sure, that the OpList-s on the paths are organized and
# assigned with new DoorID-s.
assert len(self.entry.transition_reassignment_candidate_list) > 0
def _finalize_transition_map(self, TheAnalyzer):
"""All drop-outs of this path walker enter a common door in the drop-out
catcher. There, they are routed to the drop-outs for the current state
which the path walker representes. The current state is given by the
state key.
"""
# Any drop-out in the transition map must become a 'goto path walker's i
# drop-out'. In the path walker's drop-out it is routed to the drop-out of
# the state which it currently represented.
drop_out_door_id = TheAnalyzer.drop_out_DoorID(self.index)
self.transition_map.adapt_targets(drop_out_door_id, DropOutConsideration_relate)
self.transition_map.combine_adjacents()
def _finalize_content(self, TheAnalyzer):
self.__finalized = FinalizedContent(self, TheAnalyzer)
def get_Trigger_DoorID_by_state_key(self, StateKey):
# Find (transition char, DoorID) for given StateKey
i = 0
offset = 0
for i in range(len(self.path_list)):
end = offset + len(self.path_list[i]) - 1
if StateKey < end:
break
offset = end + 1
step_i = StateKey - offset
return self.path_list[i][step_i].trigger, self.door_id_sequence_list[i][step_i]
def _get_target_by_state_key(self, Begin, End, TargetScheme, StateKey):
"""In a PathWalkerState's transition map, the targets are DoorID-s. They
do not depend on a StateKey. The ones which depend on a state key are
are the ones on a path.
"""
# First, look if the character lies on the path. If not rely on the
# transition map' target DoorID as it is.
if End - Begin == 1:
trigger, door_id = self.get_Trigger_DoorID_by_state_key(StateKey)
if Begin == trigger:
return door_id
return TargetScheme
def _assert_consistency(self, CompressionType, RemainingStateIndexSet, TheAnalyzer):
# If uniform_entry_OpList is claimed, then the DoorID must be
# the same along all paths--and vice versa.
assert (self.uniform_door_id is not None) \
== self.uniform_entry_OpList.is_uniform()
# If uniformity was required, then it must have been maintained.
if CompressionType == E_Compression.PATH_UNIFORM:
assert self.uniform_door_id is not None
assert self.uniform_entry_OpList.is_uniform()
# The door_id_sequence_list corresponds to the path_list.
assert len(self.door_id_sequence_list) == len(self.path_list)
for door_id_sequence, step_list in zip(self.door_id_sequence_list, self.path_list):
# Path entry is not element of door_id_sequence => '-1'
assert len(door_id_sequence) == len(step_list) - 1
# A OpList at a door can at maximum contain 1 path iterator command!
for action in self.entry.values():
path_iterator_cmd_n = 0
for cmd in action.command_list:
if cmd.id != E_Op.PathIteratorSet: continue
path_iterator_cmd_n += 1
assert path_iterator_cmd_n < 2
class FinalizedContent(object):
__slots__ = ("uniform_door_id",
"uniform_terminal_door_id",
"door_id_sequence_list")
def __init__(self, PWState, TheAnalyzer):
self.uniform_door_id = UniformObject()
self.uniform_terminal_door_id = UniformObject()
self.door_id_sequence_list = []
for step_list in PWState.path_list:
# Meaningful paths consist of more than one state and a terminal.
assert len(step_list) > 2
door_id_sequence = self.__determine_door_id_sequence(step_list, TheAnalyzer, PWState)
self.door_id_sequence_list.append(door_id_sequence)
return
def __determine_door_id_sequence(self, step_list, TheAnalyzer, PWState):
"""Determines the sequence of DoorID-s for a given 'step_list' and
adapts 'uniform_door_id', and 'uniform_terminal_door_id'.
"""
# -- States on path
# (entries are considered from the second state on path on)
door_id_sequence = []
prev_step = step_list[0]
action_db = PWState.entry
for step in step_list[1:-1]:
# (Recall: there is only one transition (from, to) => TriggerId == 0)
door_id = action_db.get_door_id(step.state_index, prev_step.state_index, TriggerId=0)
# Every DoorID on the path must be a newly-assigned one to this PathWalkerState.
assert door_id.state_index == PWState.index
door_id_sequence.append(door_id)
self.uniform_door_id <<= door_id
prev_step = step
# -- Terminal
step = step_list[-1]
#! A terminal of one path cannot be element of another path of the
#! same PathWalkerState. This might cause huge trouble!
#! (Ensured by the function '.accept(Path)')
# assert step.state_index not in PWState.implemented_state_index_set()
action_db = TheAnalyzer.state_db[step.state_index].entry
door_id = action_db.get_door_id(step.state_index, prev_step.state_index, TriggerId=0)
door_id_sequence.append(door_id)
self.uniform_terminal_door_id <<= door_id
return door_id_sequence
| en | 0.780258 | # Project Quex (http://quex.sourceforge.net); License: MIT; # (C) 2005-2020 <NAME>; #_______________________________________________________________________________ # (C) 2010-2014 <NAME> ________________________________________________________________________ A path walker state is a state that can walk along one or more paths with the same remaining transition map. Objects of this class are the basis for code generation. path ['w', 'h', 'i', 'l', 'e', PTC] path ['f', 'o', 'r', PTC] path ['f', 'u', 'n', 'c', 't', 'i', 'o', 'n', PTC] path ['p', 'r', 'i', 'n', 't', PTC] path_iterator ---->--' path ['s', 't', 'r', 'u', 'c', 't', PTC] .-------------------------. | path_iterator = path[0] | '-------------------------' | |<-----------------------------------. | | .-----------'''--------------. true .-----------------. / *input_p == *path_iterator ? \----------| ++path_iterator | \______________________________/ | ++input_p | | '-----------------' | .------------------------. | |----- [a-h] ----> state 21 | |----- [j] ----> state 185 | transition_map(*input) |----- 'o' ----> state 312 | |----- [p-z] ----> state 21 | |----- [a-h] ----> state 21 '------------------------' The 'group()' function in 'path_walker.core.py' develops a set of path walkers for a set of given CharacterPath list. ___________________________________________________________________________ # Uniform OpList along entries on the path (optional) # Following is set by 'finalize()'. # <-- ._finalize_content() At any step along the path commands may be executed upon entry into the target state. If those commands are uniform, then this function returns a OpList object of those uniform commands. RETURNS: None, if the commands at entry of the states on the path are not uniform. RETURNS: DoorID -- if all paths which are involved enter the same terminal state through the same entry door. None -- if not. Checks whether conditions of absorbing the Path are met, and if so then the Path is absorbed. RETURNS: False -- Path does not fit the PathWalkerState. True -- Path can be walked by PathWalkerState and has been accepted. Check whether a path can be walked along with the given PathWalkerState. For this, the following has to hold: -- The transition_maps must match. -- If uniformity is required, the entries and drop-outs must be uniform with the existing onces. -- Absorb the state sequence of the path. -- Absorb the Entry/DropOut information. # (Meaningful paths consist of more than one state and a terminal.) # (1) Absorb the state sequence of the path. # # Assert: A state cannot be implemented on two different paths. # (2) Absorb Entry/DropOut Information # If a state is entered from outside the path walker, then the 'state_key', respectively, the 'path_iterator' needs to be set. During the walk along a path, the 'path_iterator' is simply incremented--and this happens in the code generated for the path walker (later on). NOTE: Here, it must be ensured that the DoorID-s for entries from outside remain the same! This way, any external transition map may remain the same. # Entries along the path: PathIterator Increment # ... but this is handled better by the code generator. # Entries from outside: PathIteratorSet # Terminal is not element of path => consider only 'step_list[:-1]' # Inside transition: 'prev_state.index --> step.state_index' # All other transitions: ' * --> step.state_index' # are transitions from states outside the path. # Update sets inside transition's 'door_id = None' and adds # the transition to 'transition_reassignment_candidate_list'. # Make sure, that the OpList-s on the paths are organized and # assigned with new DoorID-s. All drop-outs of this path walker enter a common door in the drop-out catcher. There, they are routed to the drop-outs for the current state which the path walker representes. The current state is given by the state key. # Any drop-out in the transition map must become a 'goto path walker's i # drop-out'. In the path walker's drop-out it is routed to the drop-out of # the state which it currently represented. # Find (transition char, DoorID) for given StateKey In a PathWalkerState's transition map, the targets are DoorID-s. They do not depend on a StateKey. The ones which depend on a state key are are the ones on a path. # First, look if the character lies on the path. If not rely on the # transition map' target DoorID as it is. # If uniform_entry_OpList is claimed, then the DoorID must be # the same along all paths--and vice versa. # If uniformity was required, then it must have been maintained. # The door_id_sequence_list corresponds to the path_list. # Path entry is not element of door_id_sequence => '-1' # A OpList at a door can at maximum contain 1 path iterator command! # Meaningful paths consist of more than one state and a terminal. Determines the sequence of DoorID-s for a given 'step_list' and adapts 'uniform_door_id', and 'uniform_terminal_door_id'. # -- States on path # (entries are considered from the second state on path on) # (Recall: there is only one transition (from, to) => TriggerId == 0) # Every DoorID on the path must be a newly-assigned one to this PathWalkerState. # -- Terminal #! A terminal of one path cannot be element of another path of the #! same PathWalkerState. This might cause huge trouble! #! (Ensured by the function '.accept(Path)') # assert step.state_index not in PWState.implemented_state_index_set() | 1.769523 | 2 |
great_expectations/render/renderer/page_renderer.py | louispotok/great_expectations | 0 | 6631649 | import logging
import pypandoc
from great_expectations.data_context.util import instantiate_class_from_config
from .renderer import Renderer
from ..types import (
RenderedDocumentContent,
RenderedSectionContent,
RenderedComponentContent,
)
from collections import OrderedDict
logger = logging.getLogger(__name__)
class ValidationResultsPageRenderer(Renderer):
def __init__(self, column_section_renderer=None):
if column_section_renderer is None:
column_section_renderer = {
"class_name": "ValidationResultsColumnSectionRenderer"
}
self._column_section_renderer = instantiate_class_from_config(
config=column_section_renderer,
runtime_config={},
config_defaults={
"module_name": column_section_renderer.get(
"module_name", "great_expectations.render.renderer.column_section_renderer")
}
)
def render(self, validation_results={}):
run_id = validation_results['meta']['run_id']
full_data_asset_identifier = validation_results['meta']['data_asset_name'] or ""
expectation_suite_name = validation_results['meta']['expectation_suite_name']
short_data_asset_name = full_data_asset_identifier.split('/')[-1]
# Group EVRs by column
columns = {}
for evr in validation_results["results"]:
if "column" in evr["expectation_config"]["kwargs"]:
column = evr["expectation_config"]["kwargs"]["column"]
else:
column = "Table-Level Expectations"
if column not in columns:
columns[column] = []
columns[column].append(evr)
ordered_columns = Renderer._get_column_list_from_evrs(validation_results)
overview_content_blocks = [
self._render_validation_header(),
self._render_validation_info(validation_results=validation_results),
self._render_validation_statistics(validation_results=validation_results)
]
if "data_asset_name" in validation_results["meta"] and validation_results["meta"]["data_asset_name"]:
data_asset_name = short_data_asset_name
else:
data_asset_name = None
sections = [
RenderedSectionContent(**{
"section_name": "Overview",
"content_blocks": overview_content_blocks
})
]
if "Table-Level Expectations" in columns:
sections += [
self._column_section_renderer.render(
validation_results=columns["Table-Level Expectations"]
)
]
sections += [
self._column_section_renderer.render(
validation_results=columns[column],
) for column in ordered_columns
]
return RenderedDocumentContent(**{
"renderer_type": "ValidationResultsColumnSectionRenderer",
"data_asset_name": data_asset_name,
"full_data_asset_identifier": full_data_asset_identifier,
"page_title": run_id + "-" + expectation_suite_name + "-ValidationResults",
"sections": sections,
"utm_medium": "validation-results-page",
})
@classmethod
def _render_validation_header(cls):
return RenderedComponentContent(**{
"content_block_type": "header",
"header": "Validation Overview",
"styling": {
"classes": ["col-12"],
"header": {
"classes": ["alert", "alert-secondary"]
}
}
})
@classmethod
def _render_validation_info(cls, validation_results):
run_id = validation_results['meta']['run_id']
full_data_asset_identifier = validation_results['meta']['data_asset_name'] or ""
expectation_suite_name = validation_results['meta']['expectation_suite_name']
ge_version = validation_results["meta"]["great_expectations.__version__"]
success = validation_results["success"]
return RenderedComponentContent(**{
"content_block_type": "table",
"header": "Info",
"table": [
["Full Data Asset Identifier", full_data_asset_identifier],
["Expectation Suite Name", expectation_suite_name],
["Great Expectations Version", ge_version],
["Run ID", run_id],
["Validation Succeeded", success]
],
"styling": {
"classes": ["col-12", "table-responsive"],
"styles": {
"margin-top": "20px"
},
"body": {
"classes": ["table", "table-sm"]
}
},
})
@classmethod
def _render_validation_statistics(cls, validation_results):
statistics = validation_results["statistics"]
statistics_dict = OrderedDict([
("evaluated_expectations", "Evaluated Expectations"),
("successful_expectations", "Successful Expectations"),
("unsuccessful_expectations", "Unsuccessful Expectations"),
("success_percent", "Success Percent")
])
table_rows = []
for key, value in statistics_dict.items():
if statistics.get(key) is not None:
if key == "success_percent":
table_rows.append([value, "{0:.2f}%".format(statistics[key])])
else:
table_rows.append([value, statistics[key]])
return RenderedComponentContent(**{
"content_block_type": "table",
"header": "Statistics",
"table": table_rows,
"styling": {
"classes": ["col-6", "table-responsive"],
"styles": {
"margin-top": "20px"
},
"body": {
"classes": ["table", "table-sm"]
}
},
})
class ExpectationSuitePageRenderer(Renderer):
def __init__(self, column_section_renderer=None):
if column_section_renderer is None:
column_section_renderer = {
"class_name": "ExpectationSuiteColumnSectionRenderer"
}
self._column_section_renderer = instantiate_class_from_config(
config=column_section_renderer,
runtime_config={},
config_defaults={
"module_name": column_section_renderer.get(
"module_name", "great_expectations.render.renderer.column_section_renderer")
}
)
def render(self, expectations):
columns, ordered_columns = self._group_and_order_expectations_by_column(expectations)
full_data_asset_identifier = expectations.get("data_asset_name") or ""
expectation_suite_name = self._get_expectation_suite_name(expectations)
overview_content_blocks = [
self._render_asset_header(expectations),
self._render_asset_info(expectations)
]
table_level_expectations_content_block = self._render_table_level_expectations(columns)
if table_level_expectations_content_block is not None:
overview_content_blocks.append(table_level_expectations_content_block)
asset_notes_content_block = self._render_asset_notes(expectations)
if asset_notes_content_block is not None:
overview_content_blocks.append(asset_notes_content_block)
sections = [
RenderedSectionContent(**{
"section_name": "Overview",
"content_blocks": overview_content_blocks,
})
]
sections += [
self._column_section_renderer.render(expectations=columns[column]) for column in ordered_columns if column != "_nocolumn"
]
return RenderedDocumentContent(**{
# "data_asset_name": short_data_asset_name,
"full_data_asset_identifier": full_data_asset_identifier,
"page_title": expectation_suite_name,
"utm_medium": "expectation-suite-page",
"sections": sections
})
def _render_table_level_expectations(self, columns):
table_level_expectations = columns.get("_nocolumn")
if not table_level_expectations:
return None
else:
expectation_bullet_list = self._column_section_renderer.render(
expectations=table_level_expectations).content_blocks[1]
expectation_bullet_list["header"] = "Table-Level Expectations"
return expectation_bullet_list
@classmethod
def _render_asset_header(cls, expectations):
return RenderedComponentContent(**{
"content_block_type": "header",
"header": "Expectation Suite Overview",
"styling": {
"classes": ["col-12"],
"header": {
"classes": ["alert", "alert-secondary"]
}
}
})
@classmethod
def _render_asset_info(cls, expectations):
full_data_asset_identifier = expectations.get("data_asset_name") or ""
data_asset_type = expectations.get("data_asset_type")
expectation_suite_name = expectations.get("expectation_suite_name")
ge_version = expectations["meta"]["great_expectations.__version__"]
return RenderedComponentContent(**{
"content_block_type": "table",
"header": "Info",
"table": [
["Full Data Asset Identifier", full_data_asset_identifier],
["Data Asset Type", data_asset_type],
["Expectation Suite Name", expectation_suite_name],
["Great Expectations Version", ge_version]
],
"styling": {
"classes": ["col-12", "table-responsive"],
"styles": {
"margin-top": "20px",
"margin-bottom": "20px"
},
"body": {
"classes": ["table", "table-sm"]
}
},
})
@classmethod
def _render_asset_notes(cls, expectations):
content = []
if "expectations" in expectations:
# This if statement is a precaution in case the expectation suite doesn't contain expectations.
# Once we have more strongly typed classes for suites, this shouldn't be necessary.
total_expectations = len(expectations["expectations"])
columns = []
for exp in expectations["expectations"]:
if "column" in exp["kwargs"]:
columns.append(exp["kwargs"]["column"])
total_columns = len(set(columns))
content = content + [
# TODO: Leaving these two paragraphs as placeholders for later development.
# "This Expectation suite was first generated by {BasicDatasetProfiler} on {date}, using version {xxx} of Great Expectations.",
# "{name}, {name}, and {name} have also contributed additions and revisions.",
"This Expectation suite currently contains %d total Expectations across %d columns." % (
total_expectations,
total_columns,
),
]
if "notes" in expectations["meta"]:
notes = expectations["meta"]["notes"]
note_content = None
if type(notes) == str:
note_content = [notes]
elif type(notes) == list:
note_content = notes
elif type(notes) == dict:
if "format" in notes:
if notes["format"] == "string":
if type(notes["content"]) == str:
note_content = [notes["content"]]
elif type(notes["content"]) == list:
note_content = notes["content"]
else:
logger.warning("Unrecognized Expectation suite notes format. Skipping rendering.")
elif notes["format"] == "markdown":
# ???: Should converting to markdown be the renderer's job, or the view's job?
# Renderer is easier, but will end up mixing HTML strings with content_block info.
if type(notes["content"]) == str:
try:
note_content = [pypandoc.convert_text(notes["content"], format='md', to="html")]
except OSError:
note_content = [notes["content"]]
elif type(notes["content"]) == list:
try:
note_content = [pypandoc.convert_text(note, format='md', to="html") for note in
notes["content"]]
except OSError:
note_content = [note for note in notes["content"]]
else:
logger.warning("Unrecognized Expectation suite notes format. Skipping rendering.")
else:
logger.warning("Unrecognized Expectation suite notes format. Skipping rendering.")
if note_content != None:
content = content + note_content
return RenderedComponentContent(**{
"content_block_type": "text",
"header": "Notes",
"content": content,
"styling": {
"classes": ["col-12", "table-responsive"],
"styles": {
"margin-top": "20px"
},
"body": {
"classes": ["table", "table-sm"]
}
},
})
class ProfilingResultsPageRenderer(Renderer):
def __init__(self, overview_section_renderer=None, column_section_renderer=None):
if overview_section_renderer is None:
overview_section_renderer = {
"class_name": "ProfilingResultsOverviewSectionRenderer"
}
if column_section_renderer is None:
column_section_renderer = {
"class_name": "ProfilingResultsColumnSectionRenderer"
}
self._overview_section_renderer = instantiate_class_from_config(
config=overview_section_renderer,
runtime_config={},
config_defaults={
"module_name": overview_section_renderer.get(
"module_name", "great_expectations.render.renderer.other_section_renderer")
}
)
self._column_section_renderer = instantiate_class_from_config(
config=column_section_renderer,
runtime_config={},
config_defaults={
"module_name": column_section_renderer.get(
"module_name", "great_expectations.render.renderer.column_section_renderer")
}
)
def render(self, validation_results):
run_id = validation_results['meta']['run_id']
full_data_asset_identifier = validation_results['meta']['data_asset_name'] or ""
expectation_suite_name = validation_results['meta']['expectation_suite_name']
short_data_asset_name = full_data_asset_identifier.split('/')[-1]
# Group EVRs by column
#TODO: When we implement a ValidationResultSuite class, this method will move there.
columns = self._group_evrs_by_column(validation_results)
ordered_columns = Renderer._get_column_list_from_evrs(validation_results)
column_types = self._overview_section_renderer._get_column_types(validation_results)
if "data_asset_name" in validation_results["meta"] and validation_results["meta"]["data_asset_name"]:
data_asset_name = short_data_asset_name
else:
data_asset_name = None
return RenderedDocumentContent(**{
"renderer_type": "ProfilingResultsPageRenderer",
"data_asset_name": data_asset_name,
"full_data_asset_identifier": full_data_asset_identifier,
"page_title": run_id + "-" + expectation_suite_name + "-ProfilingResults",
"utm_medium": "profiling-results-page",
"sections":
[
self._overview_section_renderer.render(
validation_results,
section_name="Overview"
)
] +
[
self._column_section_renderer.render(
columns[column],
section_name=column,
column_type=column_types.get(column),
) for column in ordered_columns
]
})
| import logging
import pypandoc
from great_expectations.data_context.util import instantiate_class_from_config
from .renderer import Renderer
from ..types import (
RenderedDocumentContent,
RenderedSectionContent,
RenderedComponentContent,
)
from collections import OrderedDict
logger = logging.getLogger(__name__)
class ValidationResultsPageRenderer(Renderer):
def __init__(self, column_section_renderer=None):
if column_section_renderer is None:
column_section_renderer = {
"class_name": "ValidationResultsColumnSectionRenderer"
}
self._column_section_renderer = instantiate_class_from_config(
config=column_section_renderer,
runtime_config={},
config_defaults={
"module_name": column_section_renderer.get(
"module_name", "great_expectations.render.renderer.column_section_renderer")
}
)
def render(self, validation_results={}):
run_id = validation_results['meta']['run_id']
full_data_asset_identifier = validation_results['meta']['data_asset_name'] or ""
expectation_suite_name = validation_results['meta']['expectation_suite_name']
short_data_asset_name = full_data_asset_identifier.split('/')[-1]
# Group EVRs by column
columns = {}
for evr in validation_results["results"]:
if "column" in evr["expectation_config"]["kwargs"]:
column = evr["expectation_config"]["kwargs"]["column"]
else:
column = "Table-Level Expectations"
if column not in columns:
columns[column] = []
columns[column].append(evr)
ordered_columns = Renderer._get_column_list_from_evrs(validation_results)
overview_content_blocks = [
self._render_validation_header(),
self._render_validation_info(validation_results=validation_results),
self._render_validation_statistics(validation_results=validation_results)
]
if "data_asset_name" in validation_results["meta"] and validation_results["meta"]["data_asset_name"]:
data_asset_name = short_data_asset_name
else:
data_asset_name = None
sections = [
RenderedSectionContent(**{
"section_name": "Overview",
"content_blocks": overview_content_blocks
})
]
if "Table-Level Expectations" in columns:
sections += [
self._column_section_renderer.render(
validation_results=columns["Table-Level Expectations"]
)
]
sections += [
self._column_section_renderer.render(
validation_results=columns[column],
) for column in ordered_columns
]
return RenderedDocumentContent(**{
"renderer_type": "ValidationResultsColumnSectionRenderer",
"data_asset_name": data_asset_name,
"full_data_asset_identifier": full_data_asset_identifier,
"page_title": run_id + "-" + expectation_suite_name + "-ValidationResults",
"sections": sections,
"utm_medium": "validation-results-page",
})
@classmethod
def _render_validation_header(cls):
return RenderedComponentContent(**{
"content_block_type": "header",
"header": "Validation Overview",
"styling": {
"classes": ["col-12"],
"header": {
"classes": ["alert", "alert-secondary"]
}
}
})
@classmethod
def _render_validation_info(cls, validation_results):
run_id = validation_results['meta']['run_id']
full_data_asset_identifier = validation_results['meta']['data_asset_name'] or ""
expectation_suite_name = validation_results['meta']['expectation_suite_name']
ge_version = validation_results["meta"]["great_expectations.__version__"]
success = validation_results["success"]
return RenderedComponentContent(**{
"content_block_type": "table",
"header": "Info",
"table": [
["Full Data Asset Identifier", full_data_asset_identifier],
["Expectation Suite Name", expectation_suite_name],
["Great Expectations Version", ge_version],
["Run ID", run_id],
["Validation Succeeded", success]
],
"styling": {
"classes": ["col-12", "table-responsive"],
"styles": {
"margin-top": "20px"
},
"body": {
"classes": ["table", "table-sm"]
}
},
})
@classmethod
def _render_validation_statistics(cls, validation_results):
statistics = validation_results["statistics"]
statistics_dict = OrderedDict([
("evaluated_expectations", "Evaluated Expectations"),
("successful_expectations", "Successful Expectations"),
("unsuccessful_expectations", "Unsuccessful Expectations"),
("success_percent", "Success Percent")
])
table_rows = []
for key, value in statistics_dict.items():
if statistics.get(key) is not None:
if key == "success_percent":
table_rows.append([value, "{0:.2f}%".format(statistics[key])])
else:
table_rows.append([value, statistics[key]])
return RenderedComponentContent(**{
"content_block_type": "table",
"header": "Statistics",
"table": table_rows,
"styling": {
"classes": ["col-6", "table-responsive"],
"styles": {
"margin-top": "20px"
},
"body": {
"classes": ["table", "table-sm"]
}
},
})
class ExpectationSuitePageRenderer(Renderer):
def __init__(self, column_section_renderer=None):
if column_section_renderer is None:
column_section_renderer = {
"class_name": "ExpectationSuiteColumnSectionRenderer"
}
self._column_section_renderer = instantiate_class_from_config(
config=column_section_renderer,
runtime_config={},
config_defaults={
"module_name": column_section_renderer.get(
"module_name", "great_expectations.render.renderer.column_section_renderer")
}
)
def render(self, expectations):
columns, ordered_columns = self._group_and_order_expectations_by_column(expectations)
full_data_asset_identifier = expectations.get("data_asset_name") or ""
expectation_suite_name = self._get_expectation_suite_name(expectations)
overview_content_blocks = [
self._render_asset_header(expectations),
self._render_asset_info(expectations)
]
table_level_expectations_content_block = self._render_table_level_expectations(columns)
if table_level_expectations_content_block is not None:
overview_content_blocks.append(table_level_expectations_content_block)
asset_notes_content_block = self._render_asset_notes(expectations)
if asset_notes_content_block is not None:
overview_content_blocks.append(asset_notes_content_block)
sections = [
RenderedSectionContent(**{
"section_name": "Overview",
"content_blocks": overview_content_blocks,
})
]
sections += [
self._column_section_renderer.render(expectations=columns[column]) for column in ordered_columns if column != "_nocolumn"
]
return RenderedDocumentContent(**{
# "data_asset_name": short_data_asset_name,
"full_data_asset_identifier": full_data_asset_identifier,
"page_title": expectation_suite_name,
"utm_medium": "expectation-suite-page",
"sections": sections
})
def _render_table_level_expectations(self, columns):
table_level_expectations = columns.get("_nocolumn")
if not table_level_expectations:
return None
else:
expectation_bullet_list = self._column_section_renderer.render(
expectations=table_level_expectations).content_blocks[1]
expectation_bullet_list["header"] = "Table-Level Expectations"
return expectation_bullet_list
@classmethod
def _render_asset_header(cls, expectations):
return RenderedComponentContent(**{
"content_block_type": "header",
"header": "Expectation Suite Overview",
"styling": {
"classes": ["col-12"],
"header": {
"classes": ["alert", "alert-secondary"]
}
}
})
@classmethod
def _render_asset_info(cls, expectations):
full_data_asset_identifier = expectations.get("data_asset_name") or ""
data_asset_type = expectations.get("data_asset_type")
expectation_suite_name = expectations.get("expectation_suite_name")
ge_version = expectations["meta"]["great_expectations.__version__"]
return RenderedComponentContent(**{
"content_block_type": "table",
"header": "Info",
"table": [
["Full Data Asset Identifier", full_data_asset_identifier],
["Data Asset Type", data_asset_type],
["Expectation Suite Name", expectation_suite_name],
["Great Expectations Version", ge_version]
],
"styling": {
"classes": ["col-12", "table-responsive"],
"styles": {
"margin-top": "20px",
"margin-bottom": "20px"
},
"body": {
"classes": ["table", "table-sm"]
}
},
})
@classmethod
def _render_asset_notes(cls, expectations):
content = []
if "expectations" in expectations:
# This if statement is a precaution in case the expectation suite doesn't contain expectations.
# Once we have more strongly typed classes for suites, this shouldn't be necessary.
total_expectations = len(expectations["expectations"])
columns = []
for exp in expectations["expectations"]:
if "column" in exp["kwargs"]:
columns.append(exp["kwargs"]["column"])
total_columns = len(set(columns))
content = content + [
# TODO: Leaving these two paragraphs as placeholders for later development.
# "This Expectation suite was first generated by {BasicDatasetProfiler} on {date}, using version {xxx} of Great Expectations.",
# "{name}, {name}, and {name} have also contributed additions and revisions.",
"This Expectation suite currently contains %d total Expectations across %d columns." % (
total_expectations,
total_columns,
),
]
if "notes" in expectations["meta"]:
notes = expectations["meta"]["notes"]
note_content = None
if type(notes) == str:
note_content = [notes]
elif type(notes) == list:
note_content = notes
elif type(notes) == dict:
if "format" in notes:
if notes["format"] == "string":
if type(notes["content"]) == str:
note_content = [notes["content"]]
elif type(notes["content"]) == list:
note_content = notes["content"]
else:
logger.warning("Unrecognized Expectation suite notes format. Skipping rendering.")
elif notes["format"] == "markdown":
# ???: Should converting to markdown be the renderer's job, or the view's job?
# Renderer is easier, but will end up mixing HTML strings with content_block info.
if type(notes["content"]) == str:
try:
note_content = [pypandoc.convert_text(notes["content"], format='md', to="html")]
except OSError:
note_content = [notes["content"]]
elif type(notes["content"]) == list:
try:
note_content = [pypandoc.convert_text(note, format='md', to="html") for note in
notes["content"]]
except OSError:
note_content = [note for note in notes["content"]]
else:
logger.warning("Unrecognized Expectation suite notes format. Skipping rendering.")
else:
logger.warning("Unrecognized Expectation suite notes format. Skipping rendering.")
if note_content != None:
content = content + note_content
return RenderedComponentContent(**{
"content_block_type": "text",
"header": "Notes",
"content": content,
"styling": {
"classes": ["col-12", "table-responsive"],
"styles": {
"margin-top": "20px"
},
"body": {
"classes": ["table", "table-sm"]
}
},
})
class ProfilingResultsPageRenderer(Renderer):
def __init__(self, overview_section_renderer=None, column_section_renderer=None):
if overview_section_renderer is None:
overview_section_renderer = {
"class_name": "ProfilingResultsOverviewSectionRenderer"
}
if column_section_renderer is None:
column_section_renderer = {
"class_name": "ProfilingResultsColumnSectionRenderer"
}
self._overview_section_renderer = instantiate_class_from_config(
config=overview_section_renderer,
runtime_config={},
config_defaults={
"module_name": overview_section_renderer.get(
"module_name", "great_expectations.render.renderer.other_section_renderer")
}
)
self._column_section_renderer = instantiate_class_from_config(
config=column_section_renderer,
runtime_config={},
config_defaults={
"module_name": column_section_renderer.get(
"module_name", "great_expectations.render.renderer.column_section_renderer")
}
)
def render(self, validation_results):
run_id = validation_results['meta']['run_id']
full_data_asset_identifier = validation_results['meta']['data_asset_name'] or ""
expectation_suite_name = validation_results['meta']['expectation_suite_name']
short_data_asset_name = full_data_asset_identifier.split('/')[-1]
# Group EVRs by column
#TODO: When we implement a ValidationResultSuite class, this method will move there.
columns = self._group_evrs_by_column(validation_results)
ordered_columns = Renderer._get_column_list_from_evrs(validation_results)
column_types = self._overview_section_renderer._get_column_types(validation_results)
if "data_asset_name" in validation_results["meta"] and validation_results["meta"]["data_asset_name"]:
data_asset_name = short_data_asset_name
else:
data_asset_name = None
return RenderedDocumentContent(**{
"renderer_type": "ProfilingResultsPageRenderer",
"data_asset_name": data_asset_name,
"full_data_asset_identifier": full_data_asset_identifier,
"page_title": run_id + "-" + expectation_suite_name + "-ProfilingResults",
"utm_medium": "profiling-results-page",
"sections":
[
self._overview_section_renderer.render(
validation_results,
section_name="Overview"
)
] +
[
self._column_section_renderer.render(
columns[column],
section_name=column,
column_type=column_types.get(column),
) for column in ordered_columns
]
})
| en | 0.875778 | # Group EVRs by column # "data_asset_name": short_data_asset_name, # This if statement is a precaution in case the expectation suite doesn't contain expectations. # Once we have more strongly typed classes for suites, this shouldn't be necessary. # TODO: Leaving these two paragraphs as placeholders for later development. # "This Expectation suite was first generated by {BasicDatasetProfiler} on {date}, using version {xxx} of Great Expectations.", # "{name}, {name}, and {name} have also contributed additions and revisions.", # ???: Should converting to markdown be the renderer's job, or the view's job? # Renderer is easier, but will end up mixing HTML strings with content_block info. # Group EVRs by column #TODO: When we implement a ValidationResultSuite class, this method will move there. | 2.069104 | 2 |
tests/tests.py | dparrini/python-comtrade | 39 | 6631650 | <gh_stars>10-100
import datetime as dt
import math
import os
import struct
import time
import unittest
import comtrade
from comtrade import Comtrade
COMTRADE_SAMPLE_1_CFG = """STATION_NAME,EQUIPMENT,2001
2,1A,1D
1, IA ,,,A,2.762,0,0, -32768,32767,1,1,S
1, Diff Trip A ,,,0
60
0
0,2
01/01/2000, 10:30:00.228000
01/01/2000,10:30:00.722000
ASCII
1
"""
COMTRADE_SAMPLE_1_CFG_LAZY = """,,1999
2,1A,1D
1,,,,A,2.762,0,0, -32768,32767,1,1,S
1,,,,
0
0,2
,
ASCII
1
"""
COMTRADE_SAMPLE_1_DAT = "1, 0, 0,0\n2,347,-1,1\n"
COMTRADE_SAMPLE_3_CFG = """STATION_NAME,EQUIPMENT,2013
2,1A,1D
1, Signal,,,A,1,0,0,-1,1,1,1,S
1, Status,,,0
60
0
0,{samples}
01/01/2019,00:00:00.000000000
01/01/2019,00:00:{seconds:012.9f}
{format}
1
"""
COMTRADE_SAMPLE_4_CFG_FILE = "tests/sample_files/sample_bin.cfg"
COMTRADE_SAMPLE_4_DAT_FILE = "tests/sample_files/sample_bin.dat"
class TestTimestamp(unittest.TestCase):
"""Test timestamp parsing."""
def setUp(self):
self.complete_date = "01/01/2000"
self.complete_time = "10:30:00.228000"
self.incomplete_fraction_date = "01/01/2020"
self.incomplete_fraction_time = "00:00:00.23"
self.nanoseconds = "00:00:00.123456789"
def test_complete_date(self):
day, month, year = comtrade._get_date(self.complete_date)
self.assertEqual(day, 1)
self.assertEqual(month, 1)
self.assertEqual(year, 2000)
def test_complete_time(self):
hour, minute, second, microsecond, \
in_nanoseconds = comtrade._get_time(self.complete_time)
self.assertEqual(hour, 10)
self.assertEqual(minute, 30)
self.assertEqual(second, 0)
self.assertEqual(microsecond, 228000)
self.assertFalse(in_nanoseconds)
def test_incomplete_fraction_time(self):
hour, minute, second, microsecond, \
in_nanoseconds = comtrade._get_time(self.incomplete_fraction_time)
self.assertEqual(hour, 0)
self.assertEqual(minute, 0)
self.assertEqual(second, 0)
self.assertEqual(microsecond, 230000)
self.assertFalse(in_nanoseconds)
def test_nanoseconds(self):
ignore = True
hour, minute, second, microsecond, \
in_nanoseconds = comtrade._get_time(self.nanoseconds, ignore)
self.assertEqual(hour, 0)
self.assertEqual(minute, 0)
self.assertEqual(second, 0)
self.assertEqual(microsecond, 123456) # s the decimal .789
self.assertTrue(in_nanoseconds)
class TestCfg1Reading(unittest.TestCase):
"""String CFG and DAT 1999 pair test case."""
def setUp(self):
self.comtrade = Comtrade(ignore_warnings=True)
self.comtrade.read(COMTRADE_SAMPLE_1_CFG, COMTRADE_SAMPLE_1_DAT)
def test_station(self):
self.assertEqual(self.comtrade.station_name, "STATION_NAME")
def test_rec_dev_id(self):
self.assertEqual(self.comtrade.rec_dev_id, "EQUIPMENT")
def test_rev_year(self):
self.assertEqual(self.comtrade.rev_year, "2001")
def test_1a(self):
self.assertEqual(self.comtrade.analog_count, 1)
def test_1d(self):
self.assertEqual(self.comtrade.status_count, 1)
def test_2c(self):
self.assertEqual(self.comtrade.channels_count, 2)
def test_frequency(self):
self.assertEqual(float(self.comtrade.frequency), 60.0)
def test_total_samples(self):
self.assertEqual(self.comtrade.total_samples, 2)
def test_timestamp(self):
self.assertEqual(self.comtrade.start_timestamp,
dt.datetime(2000, 1, 1, 10, 30, 0, 228000, None))
self.assertEqual(self.comtrade.trigger_timestamp,
dt.datetime(2000, 1, 1, 10, 30, 0, 722000, None))
def test_time_base(self):
self.assertEqual(self.comtrade.time_base,
self.comtrade.cfg.TIME_BASE_MICROSEC)
def test_ft(self):
self.assertEqual(self.comtrade.ft, "ASCII")
class TestCfg1LazyReading(unittest.TestCase):
"""String CFG and DAT 1999 pair test case, abusing missing values in CFG."""
def setUp(self):
self.comtrade = Comtrade(ignore_warnings=True)
self.comtrade.read(COMTRADE_SAMPLE_1_CFG_LAZY, COMTRADE_SAMPLE_1_DAT)
def test_station(self):
self.assertEqual(self.comtrade.station_name, "")
def test_rec_dev_id(self):
self.assertEqual(self.comtrade.rec_dev_id, "")
def test_rev_year(self):
self.assertEqual(self.comtrade.rev_year, "1999")
def test_1a(self):
self.assertEqual(self.comtrade.analog_count, 1)
def test_1d(self):
self.assertEqual(self.comtrade.status_count, 1)
def test_2c(self):
self.assertEqual(self.comtrade.channels_count, 2)
def test_frequency(self):
self.assertEqual(float(self.comtrade.frequency), 0.0)
def test_total_samples(self):
self.assertEqual(self.comtrade.total_samples, 2)
def test_timestamp(self):
self.assertEqual(self.comtrade.start_timestamp,
dt.datetime(1, 1, 1, 0, 0, 0, 0, None))
self.assertEqual(self.comtrade.trigger_timestamp,
dt.datetime(1, 1, 1, 0, 0, 0, 0, None))
def test_time_base(self):
self.assertEqual(self.comtrade.time_base,
self.comtrade.cfg.TIME_BASE_MICROSEC)
def test_ft(self):
self.assertEqual(self.comtrade.ft, "ASCII")
class TestCffReading(unittest.TestCase):
"""CFF 2013 file test case."""
def setUp(self):
self.comtrade = Comtrade(ignore_warnings=True)
self.comtrade.load("tests/sample_files/sample_ascii.cff")
def test_station(self):
self.assertEqual(self.comtrade.station_name, "SMARTSTATION")
def test_rec_dev_id(self):
self.assertEqual(self.comtrade.rec_dev_id, "IED123")
def test_rev_year(self):
self.assertEqual(self.comtrade.rev_year, "2013")
def test_1a(self):
self.assertEqual(self.comtrade.analog_count, 4)
def test_1d(self):
self.assertEqual(self.comtrade.status_count, 4)
def test_2c(self):
self.assertEqual(self.comtrade.channels_count, 8)
def test_frequency(self):
self.assertEqual(float(self.comtrade.frequency), 60.0)
def test_total_samples(self):
self.assertEqual(self.comtrade.total_samples, 40)
def test_time_base(self):
self.assertEqual(self.comtrade.time_base,
self.comtrade.cfg.TIME_BASE_MICROSEC)
def test_ft(self):
self.assertEqual(self.comtrade.ft, "ASCII")
def test_hdr(self):
self.assertIsNone(self.comtrade.hdr)
def test_inf(self):
self.assertIsNone(self.comtrade.inf)
class TestCfg2Reading(TestCffReading):
"""CFG and DAT 2013 file pair test case (same content as the CFF test).
"""
def setUp(self):
self.comtrade = Comtrade(ignore_warnings=True)
self.comtrade.load("tests/sample_files/sample_ascii.cfg")
def test_hdr(self):
self.assertIsNone(self.comtrade.hdr)
def test_inf(self):
self.assertIsNone(self.comtrade.inf)
class TestCfgAsciiEncodingReading(TestCffReading):
"""CFG and DAT 2013 file pair test case (same content as the CFF test), but
this time with the file using ASCII text encoding.
"""
def setUp(self):
self.comtrade = Comtrade(ignore_warnings=True)
self.comtrade.load("tests/sample_files/sample_ascii_utf-8.cfg", "tests/sample_files/sample_ascii.dat")
def test_hdr(self):
self.assertIsNone(self.comtrade.hdr)
def test_inf(self):
self.assertIsNone(self.comtrade.inf)
def test_station(self):
self.assertEqual(self.comtrade.station_name, "SMARTSTATION testing text encoding: hgvcj터파크387")
class TestBinaryReading(unittest.TestCase):
dat_format = comtrade.TYPE_BINARY
filename = "temp_binary"
def parseAnalog(self, analog_value):
return int(analog_value)
def getFormat(self):
if struct.calcsize("L") == 4:
return 'Lf h H'
else:
return 'If h H'
def setUp(self):
# Sample auto-generated Comtrade file.
timebase = 1e+6 # seconds to microseconds
timemult = 1
max_time = 2
self.samples = 10000
sample_freq = max_time / self.samples
# Create temporary cfg file.
cfg_contents = COMTRADE_SAMPLE_3_CFG.format(samples=self.samples,
seconds=max_time,
format=self.dat_format)
file_path = os.path.abspath("tests/{}.cfg".format(self.filename))
with open(file_path, 'w') as file:
file.write(cfg_contents)
# Struct object to write data.
datawriter = struct.Struct(self.getFormat())
# Create temporary binary dat file, with one analog and one status
# channel.
max_time = 2.0
def analog(t: float) -> float:
return math.cos(2*math.pi*60*t)*100
def status(t: float) -> bool:
return t > max_time/2.0 and 1 or 0
file_path = os.path.abspath("tests/{}.dat".format(self.filename))
with open(file_path, 'wb') as file:
for isample in range(0, self.samples):
t = isample * sample_freq
t_us = t * timebase * timemult
y_analog = self.parseAnalog(analog(t))
y_status = status(t)
file.write(datawriter.pack(isample +1, t_us, y_analog, y_status))
# Load file
file_path = os.path.abspath("tests/{}".format(self.filename))
self.comtrade = Comtrade(ignore_warnings=True)
self.comtrade.load(file_path + ".cfg".format(self.filename))
def tearDown(self):
# Remove temporary files.
os.remove("tests/{}.cfg".format(self.filename))
os.remove("tests/{}.dat".format(self.filename))
def test_total_samples(self):
self.assertEqual(self.comtrade.total_samples, self.samples)
self.assertEqual(len(self.comtrade.analog[0]), self.samples)
self.assertEqual(len(self.comtrade.status[0]), self.samples)
self.assertEqual(len(self.comtrade.time), self.samples)
def test_analog_channels(self):
self.assertEqual(self.comtrade.analog_count, 1)
self.assertEqual(len(self.comtrade.analog), 1)
def test_status_channels(self):
self.assertEqual(self.comtrade.status_count, 1)
self.assertEqual(len(self.comtrade.status), 1)
def test_max_analog_value(self):
tolerance = 2
self.assertLessEqual(100 - max(self.comtrade.analog[0]), 2)
def test_last_status_value(self):
self.assertEqual(self.comtrade.status[0][-1], 1)
def test_timestamps(self):
self.assertEqual(self.comtrade.start_timestamp,
dt.datetime(2019, 1, 1, 0, 0, 0, 0, None))
self.assertEqual(self.comtrade.trigger_timestamp,
dt.datetime(2019, 1, 1, 0, 0, 2, 0, None))
def test_time_base(self):
self.assertEqual(self.comtrade.time_base,
self.comtrade.cfg.TIME_BASE_NANOSEC)
def test_ft(self):
self.assertEqual(self.comtrade.ft, self.dat_format)
class TestBinary32Reading(TestBinaryReading):
dat_format = comtrade.TYPE_BINARY32
filename = "temp_binary32"
def parseAnalog(self, analog_value):
return int(analog_value)
def getFormat(self):
if struct.calcsize("L") == 4:
return 'Lf l H'
else:
return 'If i H'
class TestFloat32Reading(TestBinaryReading):
dat_format = comtrade.TYPE_FLOAT32
filename = "temp_float32"
def parseAnalog(self, analog_value):
return int(analog_value)
def getFormat(self):
if struct.calcsize("L") == 4:
return 'Lf f H'
else:
return 'If f H'
class TestRealBinaryReading(unittest.TestCase):
def setUp(self):
self.comtrade = Comtrade(ignore_warnings=True)
self.comtrade.load(COMTRADE_SAMPLE_4_CFG_FILE,
COMTRADE_SAMPLE_4_DAT_FILE)
def test_value_conversion(self):
va_4 = -23425 * 0.000361849
self.assertAlmostEqual(va_4, self.comtrade.analog[0][3], places=6)
def test_values(self):
va = self.comtrade.analog[0][0]
vb = self.comtrade.analog[1][0]
vc = self.comtrade.analog[2][0]
vn = self.comtrade.analog[3][0]
# sum of phase-ground voltages is approximately 0
self.assertAlmostEqual(0.0, va + vb + vc + vn, 1)
def test_time(self):
time_diff = self.comtrade.time[2] - self.comtrade.time[1]
sample_rate = self.comtrade.cfg.sample_rates[0][0]
self.assertAlmostEqual(1.0 / sample_rate, time_diff)
class TestEncodingHandling(unittest.TestCase):
def test_utf8_check(self):
self.assertTrue(comtrade._file_is_utf8("tests/sample_files/sample_ascii_utf-8.cfg"))
self.assertFalse(comtrade._file_is_utf8("tests/sample_files/sample_ascii.cfg"))
def test_loading_iso8859_1(self):
obj = comtrade.Comtrade()
obj.load("tests/sample_files/sample_iso8859-1.cfg", encoding="iso-8859-1")
self.assertEqual(obj.cfg.station_name, "Estação de Medição")
self.assertEqual(obj.cfg.rec_dev_id, "Oscilógrafo")
if __name__ == "__main__":
unittest.main()
| import datetime as dt
import math
import os
import struct
import time
import unittest
import comtrade
from comtrade import Comtrade
COMTRADE_SAMPLE_1_CFG = """STATION_NAME,EQUIPMENT,2001
2,1A,1D
1, IA ,,,A,2.762,0,0, -32768,32767,1,1,S
1, Diff Trip A ,,,0
60
0
0,2
01/01/2000, 10:30:00.228000
01/01/2000,10:30:00.722000
ASCII
1
"""
COMTRADE_SAMPLE_1_CFG_LAZY = """,,1999
2,1A,1D
1,,,,A,2.762,0,0, -32768,32767,1,1,S
1,,,,
0
0,2
,
ASCII
1
"""
COMTRADE_SAMPLE_1_DAT = "1, 0, 0,0\n2,347,-1,1\n"
COMTRADE_SAMPLE_3_CFG = """STATION_NAME,EQUIPMENT,2013
2,1A,1D
1, Signal,,,A,1,0,0,-1,1,1,1,S
1, Status,,,0
60
0
0,{samples}
01/01/2019,00:00:00.000000000
01/01/2019,00:00:{seconds:012.9f}
{format}
1
"""
COMTRADE_SAMPLE_4_CFG_FILE = "tests/sample_files/sample_bin.cfg"
COMTRADE_SAMPLE_4_DAT_FILE = "tests/sample_files/sample_bin.dat"
class TestTimestamp(unittest.TestCase):
"""Test timestamp parsing."""
def setUp(self):
self.complete_date = "01/01/2000"
self.complete_time = "10:30:00.228000"
self.incomplete_fraction_date = "01/01/2020"
self.incomplete_fraction_time = "00:00:00.23"
self.nanoseconds = "00:00:00.123456789"
def test_complete_date(self):
day, month, year = comtrade._get_date(self.complete_date)
self.assertEqual(day, 1)
self.assertEqual(month, 1)
self.assertEqual(year, 2000)
def test_complete_time(self):
hour, minute, second, microsecond, \
in_nanoseconds = comtrade._get_time(self.complete_time)
self.assertEqual(hour, 10)
self.assertEqual(minute, 30)
self.assertEqual(second, 0)
self.assertEqual(microsecond, 228000)
self.assertFalse(in_nanoseconds)
def test_incomplete_fraction_time(self):
hour, minute, second, microsecond, \
in_nanoseconds = comtrade._get_time(self.incomplete_fraction_time)
self.assertEqual(hour, 0)
self.assertEqual(minute, 0)
self.assertEqual(second, 0)
self.assertEqual(microsecond, 230000)
self.assertFalse(in_nanoseconds)
def test_nanoseconds(self):
ignore = True
hour, minute, second, microsecond, \
in_nanoseconds = comtrade._get_time(self.nanoseconds, ignore)
self.assertEqual(hour, 0)
self.assertEqual(minute, 0)
self.assertEqual(second, 0)
self.assertEqual(microsecond, 123456) # s the decimal .789
self.assertTrue(in_nanoseconds)
class TestCfg1Reading(unittest.TestCase):
"""String CFG and DAT 1999 pair test case."""
def setUp(self):
self.comtrade = Comtrade(ignore_warnings=True)
self.comtrade.read(COMTRADE_SAMPLE_1_CFG, COMTRADE_SAMPLE_1_DAT)
def test_station(self):
self.assertEqual(self.comtrade.station_name, "STATION_NAME")
def test_rec_dev_id(self):
self.assertEqual(self.comtrade.rec_dev_id, "EQUIPMENT")
def test_rev_year(self):
self.assertEqual(self.comtrade.rev_year, "2001")
def test_1a(self):
self.assertEqual(self.comtrade.analog_count, 1)
def test_1d(self):
self.assertEqual(self.comtrade.status_count, 1)
def test_2c(self):
self.assertEqual(self.comtrade.channels_count, 2)
def test_frequency(self):
self.assertEqual(float(self.comtrade.frequency), 60.0)
def test_total_samples(self):
self.assertEqual(self.comtrade.total_samples, 2)
def test_timestamp(self):
self.assertEqual(self.comtrade.start_timestamp,
dt.datetime(2000, 1, 1, 10, 30, 0, 228000, None))
self.assertEqual(self.comtrade.trigger_timestamp,
dt.datetime(2000, 1, 1, 10, 30, 0, 722000, None))
def test_time_base(self):
self.assertEqual(self.comtrade.time_base,
self.comtrade.cfg.TIME_BASE_MICROSEC)
def test_ft(self):
self.assertEqual(self.comtrade.ft, "ASCII")
class TestCfg1LazyReading(unittest.TestCase):
"""String CFG and DAT 1999 pair test case, abusing missing values in CFG."""
def setUp(self):
self.comtrade = Comtrade(ignore_warnings=True)
self.comtrade.read(COMTRADE_SAMPLE_1_CFG_LAZY, COMTRADE_SAMPLE_1_DAT)
def test_station(self):
self.assertEqual(self.comtrade.station_name, "")
def test_rec_dev_id(self):
self.assertEqual(self.comtrade.rec_dev_id, "")
def test_rev_year(self):
self.assertEqual(self.comtrade.rev_year, "1999")
def test_1a(self):
self.assertEqual(self.comtrade.analog_count, 1)
def test_1d(self):
self.assertEqual(self.comtrade.status_count, 1)
def test_2c(self):
self.assertEqual(self.comtrade.channels_count, 2)
def test_frequency(self):
self.assertEqual(float(self.comtrade.frequency), 0.0)
def test_total_samples(self):
self.assertEqual(self.comtrade.total_samples, 2)
def test_timestamp(self):
self.assertEqual(self.comtrade.start_timestamp,
dt.datetime(1, 1, 1, 0, 0, 0, 0, None))
self.assertEqual(self.comtrade.trigger_timestamp,
dt.datetime(1, 1, 1, 0, 0, 0, 0, None))
def test_time_base(self):
self.assertEqual(self.comtrade.time_base,
self.comtrade.cfg.TIME_BASE_MICROSEC)
def test_ft(self):
self.assertEqual(self.comtrade.ft, "ASCII")
class TestCffReading(unittest.TestCase):
"""CFF 2013 file test case."""
def setUp(self):
self.comtrade = Comtrade(ignore_warnings=True)
self.comtrade.load("tests/sample_files/sample_ascii.cff")
def test_station(self):
self.assertEqual(self.comtrade.station_name, "SMARTSTATION")
def test_rec_dev_id(self):
self.assertEqual(self.comtrade.rec_dev_id, "IED123")
def test_rev_year(self):
self.assertEqual(self.comtrade.rev_year, "2013")
def test_1a(self):
self.assertEqual(self.comtrade.analog_count, 4)
def test_1d(self):
self.assertEqual(self.comtrade.status_count, 4)
def test_2c(self):
self.assertEqual(self.comtrade.channels_count, 8)
def test_frequency(self):
self.assertEqual(float(self.comtrade.frequency), 60.0)
def test_total_samples(self):
self.assertEqual(self.comtrade.total_samples, 40)
def test_time_base(self):
self.assertEqual(self.comtrade.time_base,
self.comtrade.cfg.TIME_BASE_MICROSEC)
def test_ft(self):
self.assertEqual(self.comtrade.ft, "ASCII")
def test_hdr(self):
self.assertIsNone(self.comtrade.hdr)
def test_inf(self):
self.assertIsNone(self.comtrade.inf)
class TestCfg2Reading(TestCffReading):
"""CFG and DAT 2013 file pair test case (same content as the CFF test).
"""
def setUp(self):
self.comtrade = Comtrade(ignore_warnings=True)
self.comtrade.load("tests/sample_files/sample_ascii.cfg")
def test_hdr(self):
self.assertIsNone(self.comtrade.hdr)
def test_inf(self):
self.assertIsNone(self.comtrade.inf)
class TestCfgAsciiEncodingReading(TestCffReading):
"""CFG and DAT 2013 file pair test case (same content as the CFF test), but
this time with the file using ASCII text encoding.
"""
def setUp(self):
self.comtrade = Comtrade(ignore_warnings=True)
self.comtrade.load("tests/sample_files/sample_ascii_utf-8.cfg", "tests/sample_files/sample_ascii.dat")
def test_hdr(self):
self.assertIsNone(self.comtrade.hdr)
def test_inf(self):
self.assertIsNone(self.comtrade.inf)
def test_station(self):
self.assertEqual(self.comtrade.station_name, "SMARTSTATION testing text encoding: hgvcj터파크387")
class TestBinaryReading(unittest.TestCase):
dat_format = comtrade.TYPE_BINARY
filename = "temp_binary"
def parseAnalog(self, analog_value):
return int(analog_value)
def getFormat(self):
if struct.calcsize("L") == 4:
return 'Lf h H'
else:
return 'If h H'
def setUp(self):
# Sample auto-generated Comtrade file.
timebase = 1e+6 # seconds to microseconds
timemult = 1
max_time = 2
self.samples = 10000
sample_freq = max_time / self.samples
# Create temporary cfg file.
cfg_contents = COMTRADE_SAMPLE_3_CFG.format(samples=self.samples,
seconds=max_time,
format=self.dat_format)
file_path = os.path.abspath("tests/{}.cfg".format(self.filename))
with open(file_path, 'w') as file:
file.write(cfg_contents)
# Struct object to write data.
datawriter = struct.Struct(self.getFormat())
# Create temporary binary dat file, with one analog and one status
# channel.
max_time = 2.0
def analog(t: float) -> float:
return math.cos(2*math.pi*60*t)*100
def status(t: float) -> bool:
return t > max_time/2.0 and 1 or 0
file_path = os.path.abspath("tests/{}.dat".format(self.filename))
with open(file_path, 'wb') as file:
for isample in range(0, self.samples):
t = isample * sample_freq
t_us = t * timebase * timemult
y_analog = self.parseAnalog(analog(t))
y_status = status(t)
file.write(datawriter.pack(isample +1, t_us, y_analog, y_status))
# Load file
file_path = os.path.abspath("tests/{}".format(self.filename))
self.comtrade = Comtrade(ignore_warnings=True)
self.comtrade.load(file_path + ".cfg".format(self.filename))
def tearDown(self):
# Remove temporary files.
os.remove("tests/{}.cfg".format(self.filename))
os.remove("tests/{}.dat".format(self.filename))
def test_total_samples(self):
self.assertEqual(self.comtrade.total_samples, self.samples)
self.assertEqual(len(self.comtrade.analog[0]), self.samples)
self.assertEqual(len(self.comtrade.status[0]), self.samples)
self.assertEqual(len(self.comtrade.time), self.samples)
def test_analog_channels(self):
self.assertEqual(self.comtrade.analog_count, 1)
self.assertEqual(len(self.comtrade.analog), 1)
def test_status_channels(self):
self.assertEqual(self.comtrade.status_count, 1)
self.assertEqual(len(self.comtrade.status), 1)
def test_max_analog_value(self):
tolerance = 2
self.assertLessEqual(100 - max(self.comtrade.analog[0]), 2)
def test_last_status_value(self):
self.assertEqual(self.comtrade.status[0][-1], 1)
def test_timestamps(self):
self.assertEqual(self.comtrade.start_timestamp,
dt.datetime(2019, 1, 1, 0, 0, 0, 0, None))
self.assertEqual(self.comtrade.trigger_timestamp,
dt.datetime(2019, 1, 1, 0, 0, 2, 0, None))
def test_time_base(self):
self.assertEqual(self.comtrade.time_base,
self.comtrade.cfg.TIME_BASE_NANOSEC)
def test_ft(self):
self.assertEqual(self.comtrade.ft, self.dat_format)
class TestBinary32Reading(TestBinaryReading):
dat_format = comtrade.TYPE_BINARY32
filename = "temp_binary32"
def parseAnalog(self, analog_value):
return int(analog_value)
def getFormat(self):
if struct.calcsize("L") == 4:
return 'Lf l H'
else:
return 'If i H'
class TestFloat32Reading(TestBinaryReading):
dat_format = comtrade.TYPE_FLOAT32
filename = "temp_float32"
def parseAnalog(self, analog_value):
return int(analog_value)
def getFormat(self):
if struct.calcsize("L") == 4:
return 'Lf f H'
else:
return 'If f H'
class TestRealBinaryReading(unittest.TestCase):
def setUp(self):
self.comtrade = Comtrade(ignore_warnings=True)
self.comtrade.load(COMTRADE_SAMPLE_4_CFG_FILE,
COMTRADE_SAMPLE_4_DAT_FILE)
def test_value_conversion(self):
va_4 = -23425 * 0.000361849
self.assertAlmostEqual(va_4, self.comtrade.analog[0][3], places=6)
def test_values(self):
va = self.comtrade.analog[0][0]
vb = self.comtrade.analog[1][0]
vc = self.comtrade.analog[2][0]
vn = self.comtrade.analog[3][0]
# sum of phase-ground voltages is approximately 0
self.assertAlmostEqual(0.0, va + vb + vc + vn, 1)
def test_time(self):
time_diff = self.comtrade.time[2] - self.comtrade.time[1]
sample_rate = self.comtrade.cfg.sample_rates[0][0]
self.assertAlmostEqual(1.0 / sample_rate, time_diff)
class TestEncodingHandling(unittest.TestCase):
def test_utf8_check(self):
self.assertTrue(comtrade._file_is_utf8("tests/sample_files/sample_ascii_utf-8.cfg"))
self.assertFalse(comtrade._file_is_utf8("tests/sample_files/sample_ascii.cfg"))
def test_loading_iso8859_1(self):
obj = comtrade.Comtrade()
obj.load("tests/sample_files/sample_iso8859-1.cfg", encoding="iso-8859-1")
self.assertEqual(obj.cfg.station_name, "Estação de Medição")
self.assertEqual(obj.cfg.rec_dev_id, "Oscilógrafo")
if __name__ == "__main__":
unittest.main() | en | 0.668267 | STATION_NAME,EQUIPMENT,2001 2,1A,1D 1, IA ,,,A,2.762,0,0, -32768,32767,1,1,S 1, Diff Trip A ,,,0 60 0 0,2 01/01/2000, 10:30:00.228000 01/01/2000,10:30:00.722000 ASCII 1 ,,1999 2,1A,1D 1,,,,A,2.762,0,0, -32768,32767,1,1,S 1,,,, 0 0,2 , ASCII 1 STATION_NAME,EQUIPMENT,2013 2,1A,1D 1, Signal,,,A,1,0,0,-1,1,1,1,S 1, Status,,,0 60 0 0,{samples} 01/01/2019,00:00:00.000000000 01/01/2019,00:00:{seconds:012.9f} {format} 1 Test timestamp parsing. # s the decimal .789 String CFG and DAT 1999 pair test case. String CFG and DAT 1999 pair test case, abusing missing values in CFG. CFF 2013 file test case. CFG and DAT 2013 file pair test case (same content as the CFF test). CFG and DAT 2013 file pair test case (same content as the CFF test), but this time with the file using ASCII text encoding. # Sample auto-generated Comtrade file. # seconds to microseconds # Create temporary cfg file. # Struct object to write data. # Create temporary binary dat file, with one analog and one status # channel. # Load file # Remove temporary files. # sum of phase-ground voltages is approximately 0 | 2.638741 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.