metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jgough/opensearch-curator",
"score": 2
}
|
#### File: test/integration/test_cli.py
```python
import opensearchpy
import curator
import os
import json
import string, random, tempfile
import click
from click import testing as clicktest
from mock import patch, Mock
from . import CuratorTestCase
from . import testvars as testvars
import logging
logger = logging.getLogger(__name__)
host, port = os.environ.get('TEST_ES_SERVER', 'localhost:9200').split(':')
port = int(port) if port else 9200
class TestCLIMethods(CuratorTestCase):
def test_bad_client_config(self):
self.create_indices(10)
self.write_config(
self.args['configfile'],
testvars.bad_client_config.format(host, port)
)
self.write_config(self.args['actionfile'],
testvars.disabled_proto.format('close', 'delete_indices'))
test = clicktest.CliRunner()
result = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
'--dry-run',
self.args['actionfile']
],
)
self.assertEqual(1, result.exit_code)
def test_no_config(self):
# This test checks whether localhost:9200 is provided if no hosts or
# port are in the configuration. But in testing, sometimes
# TEST_ES_SERVER is set to something other than localhost:9200. In this
# case, the test here would fail. The if statement at the end now
# compensates. See https://github.com/elastic/curator/issues/843
localtest = False
if (host == 'localhost' or host == '127.0.0.1') and \
port == 9200:
localtest = True
self.create_indices(10)
self.write_config(
self.args['configfile'],
' \n'
)
self.write_config(self.args['actionfile'],
testvars.disabled_proto.format('close', 'delete_indices'))
test = clicktest.CliRunner()
result = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
'--dry-run',
self.args['actionfile']
],
)
if localtest:
self.assertEqual(0, result.exit_code)
else:
self.assertEqual(-1, result.exit_code)
def test_no_logging_config(self):
self.create_indices(10)
self.write_config(
self.args['configfile'],
testvars.no_logging_config.format(host, port)
)
self.write_config(self.args['actionfile'],
testvars.disabled_proto.format('close', 'delete_indices'))
test = clicktest.CliRunner()
result = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
'--dry-run',
self.args['actionfile']
],
)
self.assertEqual(0, result.exit_code)
def test_logging_none(self):
self.create_indices(10)
self.write_config(
self.args['configfile'],
testvars.none_logging_config.format(host, port)
)
self.write_config(self.args['actionfile'],
testvars.disabled_proto.format('close', 'delete_indices'))
test = clicktest.CliRunner()
result = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
'--dry-run',
self.args['actionfile']
],
)
self.assertEqual(0, result.exit_code)
def test_invalid_action(self):
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.optionless_proto.format('invalid_action'))
test = clicktest.CliRunner()
result = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
self.args['actionfile']
],
)
self.assertEqual(1, result.exit_code)
def test_action_is_None(self):
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.optionless_proto.format(' '))
test = clicktest.CliRunner()
result = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
self.args['actionfile']
],
)
self.assertEqual(
type(curator.ConfigurationError()), type(result.exception))
def test_no_action(self):
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.actionless_proto)
test = clicktest.CliRunner()
result = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
self.args['actionfile']
],
)
self.assertEqual(
type(curator.ConfigurationError()), type(result.exception))
def test_dry_run(self):
self.create_indices(10)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.delete_proto.format(
'age', 'name', 'older', '\'%Y.%m.%d\'', 'days', 5, ' ', ' ', ' '
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
'--dry-run',
self.args['actionfile']
],
)
self.assertEquals(10, len(curator.get_indices(self.client)))
def test_action_disabled(self):
self.create_indices(10)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.disabled_proto.format('close', 'delete_indices'))
test = clicktest.CliRunner()
result = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
self.args['actionfile']
],
)
self.assertEquals(0, len(curator.get_indices(self.client)))
self.assertEqual(0, result.exit_code)
# I'll have to think up another way to create an exception.
# The exception that using "alias" created, a missing argument,
# is caught too early for this to actually run the test now :/
#
def test_continue_if_exception(self):
name = 'log1'
self.create_index(name)
self.create_index('log2')
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.continue_proto.format(
name, True, 'delete_indices', False
)
)
test = clicktest.CliRunner()
result = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
self.args['actionfile']
],
)
self.assertEquals(0, len(curator.get_indices(self.client)))
self.assertEqual(0, result.exit_code)
def test_continue_if_exception_False(self):
name = 'log1'
self.create_index(name)
self.create_index('log2')
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.continue_proto.format(
name, False, 'delete_indices', False
)
)
test = clicktest.CliRunner()
result = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
self.args['actionfile']
],
)
self.assertEquals(2, len(curator.get_indices(self.client)))
self.assertEqual(1, result.exit_code)
def test_no_options_in_action(self):
self.create_indices(10)
self.create_index('my_index') # Added for the ILM filter's sake
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.no_options_proto.format('delete_indices'))
test = clicktest.CliRunner()
result = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
'--dry-run',
self.args['actionfile']
],
)
self.assertEqual(0, result.exit_code)
```
#### File: test/integration/test_forcemerge.py
```python
import opensearchpy
import curator
import os
import json
import string
import random
import tempfile
from time import sleep
import click
from click import testing as clicktest
from mock import patch, Mock
from . import CuratorTestCase
from . import testvars as testvars
import logging
logger = logging.getLogger(__name__)
host, port = os.environ.get('TEST_ES_SERVER', 'localhost:9200').split(':')
port = int(port) if port else 9200
class TestActionFileforceMerge(CuratorTestCase):
def test_merge(self):
count = 1
idx = 'my_index'
self.create_index(idx)
self.add_docs(idx)
ilo1 = curator.IndexList(self.client)
ilo1._get_segment_counts()
self.assertEqual(3, ilo1.index_info[idx]['segments'])
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.forcemerge_test.format(count, 0.9))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
ilo2 = curator.IndexList(self.client)
# This stupid block is only for the benefit of Travis CI
# With Python 2.7 and ES 7.0, it apparently can finish testing before
# the segments have _reported_ as fully merged. This is forcing
# 3 checks before giving up and reporting the result.
for _ in range(0, 3):
self.client.indices.refresh(index=idx)
ilo2._get_segment_counts()
if ilo2.index_info[idx]['segments'] == count:
break
else:
sleep(1)
self.assertEqual(count, ilo2.index_info[idx]['segments'])
def test_extra_option(self):
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.bad_option_proto_test.format('forcemerge'))
test = clicktest.CliRunner()
result = test.invoke(
curator.cli,
[
'--config', self.args['configfile'],
self.args['actionfile']
],
)
self.assertEqual(1, result.exit_code)
class TestCLIforceMerge(CuratorTestCase):
def test_merge(self):
count = 1
idx = 'my_index'
self.create_index(idx)
self.add_docs(idx)
ilo1 = curator.IndexList(self.client)
ilo1._get_segment_counts()
self.assertEqual(3, ilo1.index_info[idx]['segments'])
args = self.get_runner_args()
args += [
'--config', self.args['configfile'],
'forcemerge',
'--max_num_segments', str(count),
'--delay', '0.9',
'--filter_list', '{"filtertype":"pattern","kind":"prefix","value":"my"}',
]
self.assertEqual(0, self.run_subprocess(args, logname='TestCLIforceMerge.test_merge'))
ilo2 = curator.IndexList(self.client)
# This stupid block is only for the benefit of Travis CI
# With Python 2.7 and ES 7.0, it apparently can finish testing before
# the segments have _reported_ as fully merged. This is forcing
# 3 checks before giving up and reporting the result.
for _ in range(0, 3):
self.client.indices.refresh(index=idx)
ilo2._get_segment_counts()
if ilo2.index_info[idx]['segments'] == count:
break
else:
sleep(1)
self.assertEqual(count, ilo2.index_info[idx]['segments'])
```
#### File: test/integration/test_reindex.py
```python
import opensearchpy
import curator
import os
import json
import string
import random
import tempfile
import click
from click import testing as clicktest
import time
from . import CuratorTestCase
from unittest.case import SkipTest
from . import testvars as testvars
import logging
logger = logging.getLogger(__name__)
host, port = os.environ.get('TEST_ES_SERVER', 'localhost:9200').split(':')
rhost, rport = os.environ.get('REMOTE_ES_SERVER', 'localhost:9201').split(':')
port = int(port) if port else 9200
rport = int(rport) if rport else 9201
class TestActionFileReindex(CuratorTestCase):
def test_reindex_manual(self):
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = 3
self.create_index(source)
self.add_docs(source)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, source, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_selected(self):
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = 3
self.create_index(source)
self.add_docs(source)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, 'REINDEX_SELECTION', dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_empty_list(self):
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = '.tasks'
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, source, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, curator.get_indices(self.client)[0])
def test_reindex_selected_many_to_one(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
dest = 'my_dest'
expected = 6
self.create_index(source1)
self.add_docs(source1)
self.create_index(source2)
for i in ["4", "5", "6"]:
ver = curator.get_version(self.client)
if ver >= (7, 0, 0):
self.client.create(
index=source2, doc_type='doc', id=i, body={"doc" + i :'TEST DOCUMENT'})
else:
self.client.create(
index=source2, doc_type='doc', id=i, body={"doc" + i :'TEST DOCUMENT'})
# Decorators make this pylint exception necessary
# pylint: disable=E1123
self.client.indices.flush(index=source2, force=True)
self.client.indices.refresh(index=source2)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(
self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, 'REINDEX_SELECTION', dest)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.client.indices.refresh(index=dest)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_selected_empty_list_fail(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
dest = 'my_dest'
expected = 6
self.create_index(source1)
self.add_docs(source1)
self.create_index(source2)
for i in ["4", "5", "6"]:
self.client.create(
index=source2, doc_type='log', id=i,
body={"doc" + i :'TEST DOCUMENT'},
)
# Decorators make this pylint exception necessary
# pylint: disable=E1123
self.client.indices.flush(index=source2, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex_empty_list.format('false', wait_interval, max_wait, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(_.exit_code, 1)
def test_reindex_selected_empty_list_pass(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
dest = 'my_dest'
expected = 6
self.create_index(source1)
self.add_docs(source1)
self.create_index(source2)
for i in ["4", "5", "6"]:
self.client.create(
index=source2, doc_type='log', id=i,
body={"doc" + i :'TEST DOCUMENT'},
)
# Decorators make this pylint exception necessary
# pylint: disable=E1123
self.client.indices.flush(index=source2, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex_empty_list.format('true', wait_interval, max_wait, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(_.exit_code, 0)
def test_reindex_from_remote(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
prefix = 'my_'
dest = 'my_dest'
expected = 6
# Build remote client
try:
rclient = curator.get_client(
host=rhost, port=rport, skip_version_test=True)
rclient.info()
except:
raise SkipTest(
'Unable to connect to host at {0}:{1}'.format(rhost, rport))
# Build indices remotely.
counter = 0
for rindex in [source1, source2]:
rclient.indices.create(index=rindex)
for i in range(0, 3):
rclient.create(
index=rindex, doc_type='log', id=str(counter+1),
body={"doc" + str(counter+i) :'TEST DOCUMENT'},
)
counter += 1
# Decorators make this pylint exception necessary
# pylint: disable=E1123
rclient.indices.flush(index=rindex, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.remote_reindex.format(
wait_interval,
max_wait,
'http://{0}:{1}'.format(rhost, rport),
'REINDEX_SELECTION',
dest,
prefix
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
# Do our own cleanup here.
rclient.indices.delete(index='{0},{1}'.format(source1, source2))
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_migrate_from_remote(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
prefix = 'my_'
dest = 'MIGRATION'
expected = 3
# Build remote client
try:
rclient = curator.get_client(
host=rhost, port=rport, skip_version_test=True)
rclient.info()
except:
raise SkipTest(
'Unable to connect to host at {0}:{1}'.format(rhost, rport))
# Build indices remotely.
counter = 0
for rindex in [source1, source2]:
rclient.indices.create(index=rindex)
for i in range(0, 3):
rclient.create(
index=rindex, doc_type='log', id=str(counter+1),
body={"doc" + str(counter+i) :'TEST DOCUMENT'},
)
counter += 1
# Decorators make this pylint exception necessary
# pylint: disable=E1123
rclient.indices.flush(index=rindex, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.remote_reindex.format(
wait_interval,
max_wait,
'http://{0}:{1}'.format(rhost, rport),
'REINDEX_SELECTION',
dest,
prefix
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
# Do our own cleanup here.
rclient.indices.delete(index='{0},{1}'.format(source1, source2))
# And now the neat trick of verifying that the reindex worked to both
# indices, and they preserved their names
self.assertEqual(expected, self.client.count(index=source1)['count'])
self.assertEqual(expected, self.client.count(index=source2)['count'])
def test_reindex_migrate_from_remote_with_pre_suf_fixes(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
prefix = 'my_'
dest = 'MIGRATION'
expected = 3
mpfx = 'pre-'
msfx = '-fix'
# Build remote client
try:
rclient = curator.get_client(
host=rhost, port=rport, skip_version_test=True)
rclient.info()
except:
raise SkipTest(
'Unable to connect to host at {0}:{1}'.format(rhost, rport))
# Build indices remotely.
counter = 0
for rindex in [source1, source2]:
rclient.indices.create(index=rindex)
for i in range(0, 3):
rclient.create(
index=rindex, doc_type='log', id=str(counter+1),
body={"doc" + str(counter+i) :'TEST DOCUMENT'},
)
counter += 1
# Decorators make this pylint exception necessary
# pylint: disable=E1123
rclient.indices.flush(index=rindex, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.migration_reindex.format(
wait_interval,
max_wait,
mpfx,
msfx,
'http://{0}:{1}'.format(rhost, rport),
'REINDEX_SELECTION',
dest,
prefix
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
# Do our own cleanup here.
rclient.indices.delete(index='{0},{1}'.format(source1, source2))
# And now the neat trick of verifying that the reindex worked to both
# indices, and they preserved their names
self.assertEqual(expected, self.client.count(index='{0}{1}{2}'.format(mpfx,source1,msfx))['count'])
self.assertEqual(expected, self.client.count(index='{0}{1}{2}'.format(mpfx,source1,msfx))['count'])
def test_reindex_from_remote_no_connection(self):
wait_interval = 1
max_wait = 3
bad_port = 70000
dest = 'my_dest'
expected = 1
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.remote_reindex.format(
wait_interval,
max_wait,
'http://{0}:{1}'.format(rhost, bad_port),
'REINDEX_SELECTION',
dest,
'my_'
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, _.exit_code)
def test_reindex_from_remote_no_indices(self):
wait_interval = 1
max_wait = 3
source1 = 'wrong1'
source2 = 'wrong2'
prefix = 'my_'
dest = 'my_dest'
expected = 1
# Build remote client
try:
rclient = curator.get_client(
host=rhost, port=rport, skip_version_test=True)
rclient.info()
except:
raise SkipTest(
'Unable to connect to host at {0}:{1}'.format(rhost, rport))
# Build indices remotely.
counter = 0
for rindex in [source1, source2]:
rclient.indices.create(index=rindex)
for i in range(0, 3):
rclient.create(
index=rindex, doc_type='log', id=str(counter+1),
body={"doc" + str(counter+i) :'TEST DOCUMENT'},
)
counter += 1
# Decorators make this pylint exception necessary
# pylint: disable=E1123
rclient.indices.flush(index=rindex, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.remote_reindex.format(
wait_interval,
max_wait,
'http://{0}:{1}'.format(rhost, rport),
'REINDEX_SELECTION',
dest,
prefix
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
# Do our own cleanup here.
rclient.indices.delete(index='{0},{1}'.format(source1, source2))
self.assertEqual(expected, _.exit_code)
def test_reindex_into_alias(self):
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = 3
alias_body = {'aliases' : {dest : {}}}
self.client.indices.create(index='dummy', body=alias_body)
self.add_docs(source)
self.write_config(self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(
self.args['actionfile'], testvars.reindex.format(wait_interval, max_wait, source, dest)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_manual_date_math(self):
wait_interval = 1
max_wait = 3
source = '<source-{now/d}>'
dest = '<target-{now/d}>'
expected = 3
self.create_index(source)
self.add_docs(source)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, source, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_bad_mapping(self):
# This test addresses GitHub issue #1260
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = 1
ver = curator.get_version(self.client)
if ver < (7, 0, 0):
request_body = {
"settings": { "number_of_shards": 1, "number_of_replicas": 0},
"mappings": { "doc": { "properties": { "doc1": { "type": "keyword" }}}}
}
else:
request_body = {
"settings": { "number_of_shards": 1, "number_of_replicas": 0},
"mappings": { "properties": { "doc1": { "type": "keyword" }}}
}
self.client.indices.create(index=source, body=request_body)
self.add_docs(source)
# Create the dest index with a different mapping.
if ver < (7, 0, 0):
request_body['mappings']['doc']['properties']['doc1']['type'] = 'integer'
else:
request_body['mappings']['properties']['doc1']['type'] = 'integer'
self.client.indices.create(index=dest, body=request_body)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, source, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, _.exit_code)
```
#### File: test/unit/test_action_delete_indices.py
```python
from unittest import TestCase
from mock import Mock, patch
import opensearchpy
import curator
# Get test variables and constants from a single source
from . import testvars as testvars
class TestActionDeleteIndices(TestCase):
def test_init_raise(self):
self.assertRaises(TypeError, curator.DeleteIndices, 'invalid')
def test_init_raise_bad_master_timeout(self):
client = Mock()
client.info.return_value = {'version': {'number': '5.0.0'} }
client.indices.get_settings.return_value = testvars.settings_one
client.cluster.state.return_value = testvars.clu_state_one
client.indices.stats.return_value = testvars.stats_one
ilo = curator.IndexList(client)
self.assertRaises(TypeError, curator.DeleteIndices, ilo, 'invalid')
def test_init(self):
client = Mock()
client.info.return_value = {'version': {'number': '5.0.0'} }
client.indices.get_settings.return_value = testvars.settings_one
client.cluster.state.return_value = testvars.clu_state_one
client.indices.stats.return_value = testvars.stats_one
ilo = curator.IndexList(client)
do = curator.DeleteIndices(ilo)
self.assertEqual(ilo, do.index_list)
self.assertEqual(client, do.client)
def test_do_dry_run(self):
client = Mock()
client.info.return_value = {'version': {'number': '5.0.0'} }
client.indices.get_settings.return_value = testvars.settings_four
client.cluster.state.return_value = testvars.clu_state_four
client.indices.stats.return_value = testvars.stats_four
client.indices.delete.return_value = None
ilo = curator.IndexList(client)
do = curator.DeleteIndices(ilo)
self.assertIsNone(do.do_dry_run())
def test_do_action(self):
client = Mock()
client.info.return_value = {'version': {'number': '5.0.0'} }
client.indices.get_settings.return_value = testvars.settings_four
client.cluster.state.return_value = testvars.clu_state_four
client.indices.stats.return_value = testvars.stats_four
client.indices.delete.return_value = None
ilo = curator.IndexList(client)
do = curator.DeleteIndices(ilo)
self.assertIsNone(do.do_action())
def test_do_action_not_successful(self):
client = Mock()
client.info.return_value = {'version': {'number': '5.0.0'} }
client.indices.get_settings.return_value = testvars.settings_four
client.cluster.state.return_value = testvars.clu_state_four
client.indices.stats.return_value = testvars.stats_four
client.indices.delete.return_value = None
ilo = curator.IndexList(client)
do = curator.DeleteIndices(ilo)
self.assertIsNone(do.do_action())
def test_do_action_raises_exception(self):
client = Mock()
client.info.return_value = {'version': {'number': '5.0.0'} }
client.indices.get_settings.return_value = testvars.settings_four
client.cluster.state.return_value = testvars.clu_state_four
client.indices.stats.return_value = testvars.stats_four
client.indices.delete.return_value = None
client.indices.delete.side_effect = testvars.fake_fail
ilo = curator.IndexList(client)
do = curator.DeleteIndices(ilo)
self.assertRaises(curator.FailedExecution, do.do_action)
def test_verify_result_positive(self):
client = Mock()
client.info.return_value = {'version': {'number': '5.0.0'} }
client.indices.get_settings.return_value = testvars.settings_four
client.cluster.state.return_value = testvars.clu_state_four
client.indices.stats.return_value = testvars.stats_four
client.indices.delete.return_value = None
ilo = curator.IndexList(client)
do = curator.DeleteIndices(ilo)
self.assertTrue(do._verify_result([],2))
```
|
{
"source": "jgourmelen/django-comments-xtd",
"score": 2
}
|
#### File: django-comments-xtd/django_comments_xtd/utils.py
```python
from copy import copy
import hashlib
try:
import Queue as queue # python2
except ImportError:
import queue as queue # python3
import threading
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from django.core.mail import EmailMultiAlternatives
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.shortcuts import get_current_site
from django.utils.crypto import salted_hmac
from django_comments_xtd.conf import settings
mail_sent_queue = queue.Queue()
class EmailThread(threading.Thread):
def __init__(self, subject, body, from_email, recipient_list,
fail_silently, html):
self.subject = subject
self.body = body
self.recipient_list = recipient_list
self.from_email = from_email
self.fail_silently = fail_silently
self.html = html
threading.Thread.__init__(self)
def run(self):
_send_mail(self.subject, self.body, self.from_email,
self.recipient_list, self.fail_silently, self.html)
mail_sent_queue.put(True)
def _send_mail(subject, body, from_email, recipient_list,
fail_silently=False, html=None):
msg = EmailMultiAlternatives(subject, body, from_email, recipient_list)
if html:
msg.attach_alternative(html, "text/html")
msg.send(fail_silently)
def send_mail(subject, body, from_email, recipient_list,
fail_silently=False, html=None):
if settings.COMMENTS_XTD_THREADED_EMAILS:
EmailThread(subject, body, from_email, recipient_list,
fail_silently, html).start()
else:
_send_mail(subject, body, from_email, recipient_list,
fail_silently, html)
def get_app_model_options(comment=None, content_type=None):
"""
Get the app_model_option from COMMENTS_XTD_APP_MODEL_OPTIONS.
If a comment is given, the content_type is extracted from it. Otherwise,
the content_type kwarg has to be provided. The funcion checks whether there
is a matching dictionary for the app_label.model of the content_type, and
returns it. It returns the default otherwise: { 'who_can_post': 'all',
'allow_flagging': False, 'allow_feedback': False, 'show_feedback': False }.
"""
default = {
'who_can_post': 'all', # Valid values: "users", "all"
'allow_flagging': False,
'allow_feedback': False,
'show_feedback': False,
}
if 'default' in settings.COMMENTS_XTD_APP_MODEL_OPTIONS:
# The developer overwrite the default settings. Check whether
# the developer added all the expected keys in the dictionary.
has_missing_key = False
for k in default.keys():
if k not in settings.COMMENTS_XTD_APP_MODEL_OPTIONS['default']:
has_missing_key = True
if not has_missing_key:
default = copy(settings.COMMENTS_XTD_APP_MODEL_OPTIONS['default'])
if comment:
content_type = ContentType.objects.get_for_model(comment.content_object)
key = "%s.%s" % (content_type.app_label, content_type.model)
elif content_type:
key = content_type
else:
return default
try:
default.update(settings.COMMENTS_XTD_APP_MODEL_OPTIONS[key])
return default
except Exception:
return default
def get_current_site_id(request=None):
""" it's a shortcut """
return getattr(get_current_site(request), 'pk', 1) # fallback value
def get_html_id_suffix(object):
value = "%s" % object.__hash__()
suffix = salted_hmac(settings.COMMENTS_XTD_SALT, value).hexdigest()
return suffix
def get_user_avatar(comment):
path = hashlib.md5(comment.user_email.lower().encode('utf-8')).hexdigest()
param = urlencode({'s': 48})
return "//www.gravatar.com/avatar/%s?%s&d=identicon" % (path, param)
```
|
{
"source": "J-Goutham/python_scripts",
"score": 3
}
|
#### File: J-Goutham/python_scripts/banner.py
```python
import socket, sys
def retban():
try:
ip = sys.argv[1]
port = sys.argv[2]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
print (s.recv(1024))
except:
pass
retban()
```
|
{
"source": "JGoutin/compilertools",
"score": 2
}
|
#### File: compilertools/compilers/llvm.py
```python
from compilertools.compilers import CompilerBase as _CompilerBase
from compilertools._utils import (
dump_version as _dump_version,
python_version as _python_version,
)
__all__ = ["Compiler"]
class Compiler(_CompilerBase):
"""LLVM Clang"""
@_CompilerBase._memoized_property
def option(self):
"""Compatibles Options
Returns
-------
dict
Keys are options names, values are dict of arguments with keys in
{'link', 'compile'}."""
return {"fast_fpmath": {"compile": "-Ofast"}}
@_CompilerBase._memoized_property
def api(self):
"""Compatibles API
Returns
-------
dict
Keys are API names, values are dict of arguments with keys in
{'link', 'compile'}."""
api = {}
if self.version >= 3.7:
api["openmp"] = {"compile": "-fopenmp", "link": "-fopenmp=libomp"}
return api
@_CompilerBase._memoized_property
def version(self):
"""Compiler version used.
Returns
-------
float
Version."""
if not self.current_compiler:
return
return _dump_version("clang")
@_CompilerBase._memoized_property
def python_build_version(self):
"""Compiler version that was used to build Python.
Returns
-------
float
Version."""
return _python_version("clang")
def _compile_args_matrix(self, arch, cpu):
"""Returns available Clang compiler options for the specified CPU architecture.
Parameters
----------
arch : str
CPU Architecture.
cpu : compilertools.processors.ProcessorBase subclass
Processor instance
Returns
-------
list of CompilerBase.Arg
Arguments matrix."""
# Generic optimisation
args = [[self.Arg(args=["-flto", "-O3"])]]
# Architecture specific optimisations
if arch == "x86_64":
args += [
# CPU Generic optimisations
[self.Arg(args="-m64")],
# CPU Instructions sets
[
self.Arg(
args=["-mavx512cd", "-mavx512f"],
suffix="avx512",
import_if=(
"AVX512F" in cpu.features
and "AVX512CD" in cpu.features
and cpu.os_supports_xsave
),
build_if=self.version >= 3.9,
),
self.Arg(
args="-mavx2",
suffix="avx2",
import_if=("AVX2" in cpu.features and cpu.os_supports_xsave),
),
self.Arg(
args="-mavx",
suffix="avx",
import_if=("AVX" in cpu.features and cpu.os_supports_xsave),
),
self.Arg(),
],
]
elif arch == "x86_32":
args += [
# CPU Generic optimisations
[self.Arg(args="-m32")],
# CPU Instructions sets
[
self.Arg(
args=["-mfpmath=sse", "-mavx2"],
suffix="avx2",
import_if=("AVX2" in cpu.features and cpu.os_supports_xsave),
),
self.Arg(
args=["-mfpmath=sse", "-mavx"],
suffix="avx",
import_if=("AVX" in cpu.features and cpu.os_supports_xsave),
),
self.Arg(
args=["-mfpmath=sse", "-msse4"],
suffix="sse4",
import_if=(
"SSE4_1" in cpu.features and "SSE4_2" in cpu.features
),
),
self.Arg(
args=["-mfpmath=sse", "-msse4.2"],
suffix="sse4_2",
import_if="SSE4_2" in cpu.features,
),
self.Arg(
args=["-mfpmath=sse", "-msse4.1"],
suffix="sse4_1",
import_if="SSE4_1" in cpu.features,
),
self.Arg(
args=["-mfpmath=sse", "-msse4a"],
suffix="sse4a",
import_if=(
"SSE4A" in cpu.features and cpu.vendor == "AuthenticAMD"
),
),
self.Arg(
args=["-mfpmath=sse", "-mssse3"],
suffix="ssse3",
import_if="SSSE3" in cpu.features,
),
self.Arg(
args=["-mfpmath=sse", "-msse2"],
suffix="sse2",
import_if="SSE2" in cpu.features,
),
self.Arg(
args=["-mfpmath=sse", "-msse"],
suffix="sse",
import_if="SSE" in cpu.features,
),
self.Arg(),
],
]
return args
def _compile_args_current_machine(self, arch, cpu):
"""Return auto-optimised Clang arguments for current machine.
Parameters
----------
arch : str
CPU Architecture.
cpu : compilertools.processors.ProcessorBase subclass
Processor instance.
Returns
-------
str
Best compiler arguments for current machine."""
args = ["-O3 -march=native -flto"]
if arch == "x86_32":
args.append("-m32")
if "SSE" in cpu.features:
args.append("-mfpmath=sse")
elif arch == "x86_64":
args.append("-m64")
return " ".join(args)
```
#### File: compilertools/compilertools/_core.py
```python
from compilertools.compilers import get_compiler
from compilertools._utils import always_str_list
__all__ = ["get_compile_args", "get_compiler", "suffix_from_args", "log_exception"]
def get_compile_args(
compiler=None, arch=None, current_machine=False, current_compiler=False
):
"""Gets compiler args OrderedDict for a specific compiler and architecture
combination.
Parameters
----------
compiler : str or compilertools.compilers.CompilerBase subclass
Compiler name or instance.
arch : str
Target architecture name.
current_machine : bool
Only compatibles with current machine CPU
current_compiler : bool
If True, return only arguments compatibles with current compiler.
Returns
-------
collections.OrderedDict
Arguments
"""
return get_compiler(compiler, current_compiler).compile_args(arch, current_machine)
def suffix_from_args(args, extension="", return_empty_suffixes=False):
"""Returns suffixes from args.
Parameters
----------
args : collections.OrderedDict
Arguments.
extension : str or list of str
File extensions.
return_empty_suffixes : bool
If True, return '' suffixes.
Returns
-------
list of str
Suffixes"""
suffixes = []
for suffix in args:
if extension:
if suffix:
for ext in always_str_list(extension):
suffixes.append(f".{suffix}{ext}")
elif return_empty_suffixes:
for ext in always_str_list(extension):
suffixes.append(ext)
elif suffix:
suffixes.append(f".{suffix}")
elif return_empty_suffixes:
suffixes.append("")
return suffixes
def log_exception():
"""
Log exception with system info.
Must be called in exception handler.
"""
from compilertools._config import CONFIG
if CONFIG.get("logging", True):
from logging import getLogger
import platform
getLogger("compilertools").exception(
"\n".join(
(
"Compilertools: Exception when trying to enable optimization, "
"Back to compatible mode.",
f" OS: {platform.platform()}",
f" CPU: {platform.processor()}",
f" Python: {platform.python_version()} "
f"[{platform.python_compiler()}]",
)
)
)
```
#### File: compilertools/processors/x86_32.py
```python
from compilertools.processors import ProcessorBase as _ProcessorBase
__all__ = ["Processor", "Cpuid"]
class Processor(_ProcessorBase):
"""x86-32 CPU"""
def __init__(self, current_machine=False):
_ProcessorBase.__init__(self, current_machine)
self._default["os_supports_xsave"] = False
self._default["cpuid_highest_extended_function"] = 0
@_ProcessorBase._memoized_property
def cpuid_highest_extended_function(self):
"""CPUID highest extended function.
Returns
-------
int
Related EAX value for CPUID."""
if not self.current_machine:
return
return Cpuid(0x80000000).eax
@_ProcessorBase._memoized_property
def vendor(self):
"""CPU's manufacturer ID from CPUID.
Returns
-------
str
Manufacturer ID."""
if not self.current_machine:
return
reg = Cpuid()
return Cpuid.registers_to_str(reg.ebx, reg.edx, reg.ecx)
@_ProcessorBase._memoized_property
def brand(self):
"""CPU's brand from CPUID
Returns
-------
str
Brand."""
if not self.current_machine:
return
if self.cpuid_highest_extended_function < 0x80000004:
return
brand_list = []
for eax in (0x80000002, 0x80000003, 0x80000004):
reg = Cpuid(eax)
brand_list += [reg.eax, reg.ebx, reg.ecx, reg.edx]
return Cpuid.registers_to_str(*brand_list)
@_ProcessorBase._memoized_property
def features(self):
"""CPU's features flags from CPUID
Returns
-------
list of str
Flags names.
References
----------
Reference: Linux kernel "arch/x86/include/asm/cpufeatures.h"
Feature naming convention:
Use "cpufeatures.h" quoted names in comments in priority, then use name from
"cpufeatures.h" constants.
Exceptions in names: PNI called SSE3 (like other SSE feature flags)"""
if not self.current_machine:
return
# Feature bits description
# feature_bits_desc: {(eax, ecx): registers_dict}
# registers_dict: {register_name: feature_dict}
# feature_dict: {bit: feature}
feature_bits_desc = {
# Intel
(1, 0): {
"edx": {
0: "FPU",
1: "VME",
2: "DE",
3: "PSE",
4: "TSC",
5: "MSR",
6: "PAE",
7: "MCE",
8: "CX8",
9: "APIC",
11: "SEP",
12: "MTRR",
13: "PGE",
14: "MCA",
15: "CMOV",
16: "PAT",
17: "PSE36",
18: "PN",
19: "CLFLUSH",
21: "DS",
22: "ACPI",
23: "MMX",
24: "FXSR",
25: "SSE",
26: "SSE2",
27: "SS",
28: "HT",
29: "TM",
30: "IA64",
31: "PBE",
},
"ecx": {
0: "SSE3",
1: "PCLMULQDQ",
2: "DTES64",
3: "MONITOR",
4: "DS_CPL",
5: "VMX",
6: "SMX",
7: "EST",
8: "TM2",
9: "SSSE3",
10: "CID",
11: "SDBG",
12: "FMA",
13: "CX16",
14: "XTPR",
15: "PDCM",
17: "PCID",
18: "DCA",
19: "SSE4_1",
20: "SSE4_2",
21: "X2APIC",
22: "MOVBE",
23: "POPCNT",
24: "TSC_DEADLINE_TIMER",
25: "AES",
26: "XSAVE",
27: "OSXSAVE",
28: "AVX",
29: "F16C",
30: "RDRAND",
31: "HYPERVISOR",
},
},
# Intel structured extended
(7, 0): {
"ebx": {
0: "FSGSBASE",
1: "TSC_ADJUST",
3: "BMI1",
4: "HLE",
5: "AVX2",
7: "SMEP",
8: "BMI2",
9: "ERMS",
10: "INVPCID",
11: "RTM",
12: "CQM",
14: "MPX",
15: "RDT_A",
16: "AVX512F",
17: "AVX512DQ",
18: "RDSEED",
19: "ADX",
20: "SMAP",
21: "AVX512IFMA",
23: "CLFLUSHOPT",
24: "CLWB",
26: "AVX512PF",
27: "AVX512ER",
28: "AVX512CD",
29: "SHA_NI",
30: "AVX512BW",
31: "AVX512VL",
},
"ecx": {
0: "PREFETCHWT1",
1: "AVX512VBMI",
2: "UMIP",
3: "PKU",
4: "OSPKE",
6: "AVX512_VBMI2",
8: "GFNI",
9: "VAES",
10: "VPCLMULQDQ",
11: "AVX512_VNNI",
12: "AVX512_BITALG",
14: "AVX512_VPOPCNTDQ",
16: "LA57",
22: "RDPID",
},
"edx": {2: "AVX512_4VNNIW", 3: "AVX512_4FMAPS"},
},
}
if self.cpuid_highest_extended_function >= 0x80000001:
# AMD
feature_bits_desc[(0x80000001, 0)] = {
"edx": {
11: "SYSCALL",
19: "MP",
20: "NX",
22: "MMXEXT",
25: "FXSR_OPT",
26: "PDPE1GB",
27: "RDTSCP",
29: "LM",
30: "3DNOWEXT",
31: "3DNOW",
},
"ecx": {
0: "LAHF_LM",
1: "CMP_LEGACY",
2: "SVM",
3: "EXTAPIC",
4: "CR8_LEGACY",
5: "ABM",
6: "SSE4A",
7: "MISALIGNSSE",
8: "3DNOWPREFETCH",
9: "OSVW",
10: "IBS",
11: "XOP",
12: "SKINIT",
13: "WDT",
15: "LWP",
16: "FMA4",
17: "TCE",
19: "NODEID_MSR",
21: "TBM",
22: "TOPOEXT",
23: "PERFCTR_CORE",
24: "PERFCTR_NB",
26: "BPEXT",
27: "PTSC",
28: "PERFCTR_LLC",
29: "MWAITX",
},
}
flags = set()
add_flag = flags.add
for eax, ecx in feature_bits_desc:
reg = Cpuid(eax, ecx)
reg_desc = feature_bits_desc[(eax, ecx)]
for exx in reg_desc:
bits = getattr(reg, exx)
reg_exx = reg_desc[exx]
for bit in reg_exx:
if ((1 << bit) & bits) != 0:
add_flag(reg_exx[bit])
return flags
@_ProcessorBase._memoized_property
def os_supports_xsave(self):
"""OS and CPU supports XSAVE instruction.
Returns
-------
bool
Supports if True."""
if not self.current_machine:
return
return "XSAVE" in self["features"] and "OSXSAVE" in self["features"]
class Cpuid:
"""Gets Processor CPUID.
Parameters
----------
eax_value : int
EAX register value
ecx_value : int
ECX register value"""
def __init__(self, eax_value=0, ecx_value=0):
bytecode = []
for reg, value in ((0x0, eax_value), (0x1, ecx_value)):
if value == 0:
# Sets to 0 (XOR reg, reg)
bytecode += (
# XOR
b"\x31",
# reg, reg
(0b11000000 | reg | (reg << 3)).to_bytes(1, "little"),
)
else:
# Sets other value (MOV reg, value)
bytecode += (
# MOV reg,
(0b10111000 | reg).to_bytes(1, "little"),
# Value
value.to_bytes(4, "little"),
)
self._bytecode_base = b"".join(
bytecode
+
# CPUID
[b"\x0F\xA2"]
)
def _get_cpuid(self, reg):
"""Gets specified register CPUID result.
Parameters
----------
reg : int
Register address.
Returns
-------
int
Raw CPUID Result as unsigned integer."""
from platform import system
from ctypes import (
c_void_p,
c_size_t,
c_ulong,
c_uint32,
c_int,
CFUNCTYPE,
memmove,
)
bytecode = [self._bytecode_base]
if reg != 0x0:
# MOV EAX, reg
bytecode += [
# MOV
b"\x89",
# EAX, reg
(0b11000000 | (reg << 3)).to_bytes(1, "little"),
]
bytecode = b"".join(
bytecode
+
# RET
[b"\xC3"]
)
is_windows = system() == "Windows"
size = len(bytecode)
if size < 0x1000:
size = 0x1000
try:
if is_windows:
from ctypes import windll
lib = windll.kernel32
valloc = lib.VirtualAlloc
valloc.argtypes = [c_void_p, c_size_t, c_ulong, c_ulong]
args = (None, size, 0x1000, 0x40)
else:
from ctypes import cdll
lib = cdll.LoadLibrary(None)
mprotect = lib.mprotect
valloc = lib.valloc
valloc.argtypes = [c_size_t]
args = (c_size_t(size),)
valloc.restype = c_void_p
address = valloc(*args)
if address == 0:
raise RuntimeError("Failed to allocate memory")
if not is_windows:
mprotect.restype = c_int
mprotect.argtypes = [c_void_p, c_size_t, c_int]
if mprotect(address, size, 1 | 2 | 4) != 0:
raise RuntimeError("Failed to memory protect")
memmove(address, bytecode, size)
result = CFUNCTYPE(c_uint32)(address)()
finally:
if is_windows:
lib.VirtualFree(c_ulong(address), 0, 0x8000)
else:
mprotect(address, size, 1 | 2)
lib.free(c_void_p(address))
return result
@property
def eax(self):
"""Get EAX register CPUID result.
Returns
-------
int
Raw EAX register value."""
return self._get_cpuid(0x0)
@property
def ebx(self):
"""Get EBX register CPUID result.
Returns
-------
int
Raw EAX register value."""
return self._get_cpuid(0x3)
@property
def ecx(self):
"""Get ECX register CPUID result.
Returns
-------
int
Raw EAX register value."""
return self._get_cpuid(0x1)
@property
def edx(self):
"""Get EDX register CPUID result.
Returns
-------
int
Raw EAX register value."""
return self._get_cpuid(0x2)
@staticmethod
def registers_to_str(*uints):
"""Converts unsigned integers from CPUID register to ASCII string.
Parameters
----------
uints : int
Unsigned integers to concatenate and convert to string.
Returns
-------
str
Result."""
from struct import pack
return pack(f"<{'I' * len(uints)}", *uints).decode("ASCII").strip("\x00 ")
```
#### File: compilertools/compilertools/_utils.py
```python
import sys
from importlib import import_module
from collections.abc import MutableMapping
from functools import wraps
__all__ = ["always_str_list", "import_class", "BaseClass"]
def always_str_list(list_or_str):
"""Makes sure list_or_str is always a tuple or list
Parameters
----------
list_or_str: str or str iterable (tuple, list, ...)
Parameter to set as iterable if single str.
Returns
-------
Iterable
Iterable equivalent to list_or_str."""
if isinstance(list_or_str, str):
return (list_or_str,)
return list_or_str
def import_class(package_name, module_name, class_name, default_class):
"""Imports a sub module by name
Parameters
----------
package_name : str
Package name in compilertools.
module_name : str
Module name in package.
class_name : str
Class name in module.
default_class : class
Default class to return if import or attribute error
Returns
-------
class
Imported class."""
path = f"compilertools.{package_name}.{module_name}"
try:
import_module(path)
except ImportError:
return default_class
try:
return getattr(sys.modules[path], class_name)
except AttributeError:
return default_class
class BaseClass(MutableMapping):
"""Base class for data storage classes with default values, attribute/item access
and memoization"""
def __init__(self):
self._items = {}
self._default = {}
@staticmethod
def _memoized_property(class_property):
"""Property decorator with memoization"""
@property
@wraps(class_property)
def _property(self):
key = class_property.__name__
try:
return self._items.__getitem__(key)
except KeyError:
pass
value = class_property(self)
if value is None:
try:
value = self._default.__getitem__(key)
except KeyError:
pass
self._items[key] = value
return value
return _property
def __getitem__(self, key):
try:
return self._items.__getitem__(key)
except KeyError:
pass
if hasattr(self, key):
return getattr(self, key)
return self._default.__getitem__(key)
def __getattr__(self, name):
try:
return self._items.__getitem__(name)
except KeyError:
pass
try:
return self._default.__getitem__(name)
except KeyError:
raise AttributeError
def __setitem__(self, key, value):
return self._items.__setitem__(key, value)
def __delitem__(self, key):
return self._items.__delitem__(key)
def __len__(self):
return self._items.__len__()
def __iter__(self):
return self._items.__iter__()
def dump_version(command):
"""
Dump version for GCC/Clang compilers
Parameters
----------
command : str
Compiler command.
Returns
-------
float or None: version if found else None
"""
from subprocess import run, PIPE, CalledProcessError
try:
if (
command
not in run(
[command, "--version"],
stdout=PIPE,
stderr=PIPE,
universal_newlines=True,
check=True,
).stdout
):
# Command is linked to another compiler
return
except (FileNotFoundError, CalledProcessError):
return
for method in ("-dumpversion", "-dumpfullversion"):
process = run(
[command, method], stdout=PIPE, stderr=PIPE, universal_newlines=True
)
if not process.returncode and "." in process.stdout:
return float(".".join(process.stdout.split(".", 2)[:2]))
def python_version(name):
"""
Get compiler version used in Python.
Parameters
----------
name : str
Compiler name.
Returns
-------
float: version
"""
from platform import python_compiler
version_str = python_compiler()
if name not in version_str.lower():
return 0.0
return float(version_str.split(" ", 2)[1].rsplit(".", 1)[0])
```
#### File: compilertools/tests/test_build.py
```python
from distutils.command.build_ext import build_ext
BUILD_EXTENSION = build_ext.build_extension
GET_EXT_FILENAME = build_ext.get_ext_filename
GET_EXT_FULLNAME = build_ext.get_ext_fullname
GET_OUTPUTS = build_ext.get_outputs
BUILD_EXT_NEW = build_ext.__new__
def tests_get_build_compile_args():
"""Test get_build_compile_args"""
from distutils.sysconfig import get_config_var
from compilertools.compilers import CompilerBase
from compilertools.build import get_build_compile_args
from compilertools._config_build import ConfigBuild
ext_suffix = get_config_var("EXT_SUFFIX")
raise_exception = False
class Compiler(CompilerBase):
"""Dummy Compiler"""
def __init__(self, current_compiler=False):
CompilerBase.__init__(self, current_compiler=current_compiler)
self["api"]["api_name"] = {"compile": "--api-compile", "link": "--api-link"}
self["option"]["option_name"] = {
"compile": "--option-compile",
"link": "--option-link",
}
def _compile_args_matrix(self, arch, cpu):
"""Return test args matrix"""
return [
[
self.Arg(
args="--arch1", suffix="arch1", build_if=(arch == "arch1")
),
self.Arg(
args="--arch2", suffix="arch2", build_if=(arch == "arch2")
),
self.Arg(
args="--arch2_opt",
suffix="arch2_opt",
build_if=(arch == "arch2"),
),
]
]
def _compile_args_current_machine(self, arch, cpu):
"""return current machine args"""
if raise_exception:
raise OSError
return "--native"
compiler = Compiler(current_compiler=True)
# Test default values
assert get_build_compile_args(compiler, "arch1") == {
f".arch1{ext_suffix}": ["--arch1"]
}
# Test current_machine
assert get_build_compile_args(compiler, current_machine=True) == {
ext_suffix: ["--native"]
}
# Test current_machine, no exception in case of error
raise_exception = True
assert get_build_compile_args(compiler, current_machine=True) == {ext_suffix: []}
raise_exception = False
# Test current_machine from CONFIG_BUILD
ConfigBuild.current_machine = True
assert get_build_compile_args(compiler) == {ext_suffix: ["--native"]}
ConfigBuild.current_machine = False
# Test ext_suffix
assert get_build_compile_args(compiler, "arch1", ext_suffix=".ext") == {
".arch1.ext": ["--arch1"]
}
# Test use_api
assert get_build_compile_args(compiler, "arch1", use_api=["api_name"]) == {
f".arch1{ext_suffix}": ["--arch1", "--api-compile"]
}
# Test use_option
assert get_build_compile_args(compiler, "arch1", use_option=["option_name"]) == {
f".arch1{ext_suffix}": ["--arch1", "--option-compile"]
}
# Test filtering suffixes
assert get_build_compile_args(compiler, "arch2") == {
f".arch2{ext_suffix}": ["--arch2"],
f".arch2_opt{ext_suffix}": ["--arch2_opt"],
}
ConfigBuild.suffixes_excludes.add("arch2_opt")
assert get_build_compile_args(compiler, "arch2") == {
f".arch2{ext_suffix}": ["--arch2"]
}
ConfigBuild.suffixes_excludes.remove("arch2_opt")
ConfigBuild.suffixes_includes.add("arch2")
assert get_build_compile_args(compiler, "arch2") == {
f".arch2{ext_suffix}": ["--arch2"]
}
ConfigBuild.suffixes_includes.remove("arch2")
def tests_get_build_link_args():
"""Test get_build_link_args"""
from compilertools.compilers import CompilerBase
from compilertools.build import get_build_link_args
class Compiler(CompilerBase):
"""Dummy Compiler"""
def __init__(self):
CompilerBase.__init__(self)
self["api"]["api_name"] = {"compile": "--api-compile", "link": "--api-link"}
self["option"]["option_name"] = {
"compile": "--option-compile",
"link": "--option-link",
}
compiler = Compiler()
# Test default values
assert get_build_link_args(compiler) == []
# Test use_api
assert get_build_link_args(compiler, use_api=["api_name"]) == ["--api-link"]
# Test use_option
assert get_build_link_args(compiler, use_option=["option_name"]) == [
"--option-link"
]
def tests_find_if_current_machine():
"""Test _find_if_current_machine"""
import os
from compilertools.build import _find_if_current_machine
from compilertools._config_build import ConfigBuild
w_dir = os.getcwd()
def dummy_getcwd():
"""Dummy os.getcwd"""
return w_dir
os_getcwd = os.getcwd
os.getcwd = dummy_getcwd
try:
# Set by configuration
ConfigBuild.current_machine = False
assert _find_if_current_machine() is False
ConfigBuild.current_machine = True
assert _find_if_current_machine() is True
# Pip detection
ConfigBuild.current_machine = "autodetect"
w_dir = "dir/not_current_machine"
assert _find_if_current_machine() is False
w_dir = "dir/pip-current_machine"
assert _find_if_current_machine() is True
finally:
os.getcwd = os_getcwd
ConfigBuild.current_machine = False
def tests_add_args():
"""Test _add_args"""
from compilertools.compilers import CompilerBase
from compilertools.build import _add_args
class Compiler(CompilerBase):
"""Dummy Compiler"""
def __init__(self):
CompilerBase.__init__(self)
self["api"]["api_name"] = {"compile": "--api-compile"}
compiler = Compiler()
# API & category exists
args = []
_add_args(compiler, args, "api", "compile", ["api_name"])
assert args == ["--api-compile"]
# API exists, category not exists
args = []
_add_args(compiler, args, "api", "link", ["api_name"])
assert args == []
# API not exists, category exists
args = []
_add_args(compiler, args, "api", "compile", ["not_exist"])
assert args == []
def tests_update_extension():
"""Test _update_extension, _patch_build_extension.patched and
_patch_get_ext_filename.patched"""
from os.path import join
from os import makedirs
from tempfile import TemporaryDirectory
from distutils.sysconfig import get_config_var
from compilertools.compilers import CompilerBase
from compilertools._config_build import ConfigBuild
from compilertools.build import (
_update_extension,
_patch_build_extension,
_patch_get_ext_filename,
_patch_get_ext_fullname,
_patch_get_outputs,
_String,
)
ext_suffix = get_config_var("EXT_SUFFIX")
class Compiler(CompilerBase):
"""Dummy Compiler"""
def __init__(self):
CompilerBase.__init__(self)
self["api"]["api_name"] = {"compile": "--api-compile", "link": "--api-link"}
self["option"]["option_name"] = {
"compile": "--option-compile",
"link": "--option-link",
}
def _compile_args_matrix(self, arch, cpu):
"""Return test args matrix"""
return [
[self.Arg(args="--inst", suffix="inst"), self.Arg()],
[self.Arg(args="--arch", suffix="arch"), self.Arg()],
]
compiler = Compiler()
# Create dummy distutils classes
class DummyCompiler:
"""Dummy distutils.ccompiler.CCompiler"""
def __init__(self):
# Replace compiler type str by Compiler instance
# This force the use of the testing compiler
self.compiler_type = compiler
class DummyExtension:
"""Dummy distutils.extension.Extension"""
def __init__(self):
self.sources = []
self.extra_compile_args = ["--extra_compile"]
self.extra_link_args = ["--extra_link"]
self.name = "package.module"
class DummyBuildExt:
"""Dummy distutils.command.build_ext.build_ext"""
def __init__(self):
self.package = None
self.extensions = []
self.compiler = DummyCompiler()
self.plat_name = "arch"
self.inplace = False
# Use build_ext.get_ext_filename directly
get_ext_filename = GET_EXT_FILENAME
get_ext_fullname = GET_EXT_FULLNAME
def get_outputs(self):
"""Dummy get_outputs"""
return []
def build_extension(self, _):
"""Dummy build_extension"""
# Patch dummy build_ext
DummyBuildExt.build_extension = _patch_build_extension(
DummyBuildExt.build_extension
)
DummyBuildExt.get_ext_filename = _patch_get_ext_filename(
DummyBuildExt.get_ext_filename
)
DummyBuildExt.build_extension = _patch_build_extension(
DummyBuildExt.build_extension
)
DummyBuildExt.get_ext_fullname = _patch_get_ext_fullname(
DummyBuildExt.get_ext_fullname
)
DummyBuildExt.get_outputs = _patch_get_outputs(DummyBuildExt.get_outputs)
# Test with patched build_extension
dummy_build_ext = DummyBuildExt()
dummy_ext = DummyExtension()
dummy_build_ext.build_extension(dummy_ext)
results = dummy_build_ext.extensions
# Check result count
excepted_args = compiler.compile_args()
assert len(excepted_args) != 0
assert len(results) == len(excepted_args)
# Check results details
results.sort(
key=lambda x: getattr(x, "compilertools_extended_suffix")
if hasattr(x, "compilertools_extended_suffix")
else ""
)
for index, result in enumerate(results):
# Get suffix (not for the legacy extension)
if index == 0:
assert not hasattr(result, "compilertools_extended_suffix")
suffix = ""
else:
suffix = result.compilertools_extended_suffix
# Check compile args
assert result.extra_compile_args == excepted_args[suffix.strip(".")] + [
"--extra_compile"
]
# Check link args
assert result.extra_link_args == ["--extra_link"]
# Check get_ext_filename
assert dummy_build_ext.get_ext_filename(result.name) == "".join(
(
join("package", "module"),
suffix,
ext_suffix,
)
)
# Check no duplicates if run a second time
dummy_build_ext.build_extension(dummy_ext)
results = dummy_build_ext.extensions
assert len(results) == len(excepted_args)
# Test "compilertools_extra_ouputs" presence
# Cause, not use default compiler for current platform
assert dummy_build_ext.compilertools_compiler_name
assert dummy_build_ext.compilertools_extra_ouputs == [
"".join((join("package", "module"), ".compilertools"))
]
# Test get_output
with TemporaryDirectory() as tmp:
dummy_build_ext.build_lib = join(tmp, "build")
makedirs(join(dummy_build_ext.build_lib, "package"), exist_ok=True)
excepted_file = join(
dummy_build_ext.build_lib, "package", "module.compilertools"
)
assert dummy_build_ext.get_outputs() == [excepted_file]
with open(excepted_file, "rt") as file:
assert file.read() == dummy_build_ext.compilertools_compiler_name
# Test after disabling optimization with CONFIG_BUILD
ConfigBuild.disabled = True
dummy_ext = DummyExtension()
assert _update_extension(DummyBuildExt(), dummy_ext) == [dummy_ext]
ConfigBuild.disabled = False
# Test options activation
ConfigBuild.option["option_name"] = True
results = _update_extension(DummyBuildExt(), DummyExtension())
for result in results:
# Check option arguments presence
assert result.extra_compile_args[-2] == "--option-compile"
assert result.extra_link_args[-2] == "--option-link"
del ConfigBuild.option["option_name"]
# Test API activation with file analysis
ConfigBuild.api["api_name"] = {"c": "#pragma api "}
with TemporaryDirectory() as tmp:
# Create dummy source file
source = join(tmp, "source.c")
with open(source, "wt") as file:
file.write("#pragma api operation")
# Reset extension and add file as source
dummy_ext = DummyExtension()
dummy_ext.sources.append(source)
# Compute results
results = _update_extension(DummyBuildExt(), dummy_ext)
for result in results:
# Check API arguments presence
assert result.extra_compile_args[-2] == "--api-compile"
assert result.extra_link_args[-2] == "--api-link"
del ConfigBuild.api["api_name"]
# Check type conservation with "get_ext_fullname"
assert isinstance(DummyBuildExt().get_ext_fullname(_String("module")), _String)
dummy_build_ext = DummyBuildExt()
dummy_build_ext.package = "package"
assert isinstance(dummy_build_ext.get_ext_fullname(_String("module")), _String)
def tests_string():
"""Test _String"""
from compilertools.build import _String
string = _String("a.b")
# Test parent_extension
parent_extension = "parent_extension"
assert string.parent_extension is None
string.parent_extension = parent_extension
assert string.parent_extension == parent_extension
# Test split
splited = string.split(".")
assert isinstance(splited[0], _String)
assert isinstance(splited[1], _String)
assert splited[0].parent_extension == parent_extension
assert splited[1].parent_extension == parent_extension
def tests_patch_build_extension():
"""Test _patch_build_extension"""
from compilertools.build import _patch_build_extension
# Check if patched
assert BUILD_EXTENSION is not build_ext.build_extension
# Check wrap
assert (
build_ext.build_extension.__module__
== f"compilertools.{BUILD_EXTENSION.__module__}"
)
assert build_ext.build_extension.__name__ == BUILD_EXTENSION.__name__
# Test re-patch
previous = build_ext.build_extension
build_ext.build_extension = _patch_build_extension(build_ext.build_extension)
assert build_ext.build_extension is previous
def tests_patch_get_ext_filename():
"""Test _patch_get_ext_filename"""
from compilertools.build import _patch_get_ext_filename
# Check if patched
assert GET_EXT_FILENAME is not build_ext.get_ext_filename
# Check wrap
assert (
build_ext.get_ext_filename.__module__
== f"compilertools.{GET_EXT_FILENAME.__module__}"
)
assert build_ext.get_ext_filename.__name__ == GET_EXT_FILENAME.__name__
# Test re-patch
previous = build_ext.get_ext_filename
build_ext.get_ext_filename = _patch_get_ext_filename(build_ext.get_ext_filename)
assert build_ext.get_ext_filename is previous
def tests_patch_get_ext_fullname():
"""Test _patch_get_ext_filename"""
from compilertools.build import _patch_get_ext_fullname
# Check if patched
assert GET_EXT_FULLNAME is not build_ext.get_ext_fullname
# Check wrap
assert (
build_ext.get_ext_fullname.__module__
== f"compilertools.{GET_EXT_FULLNAME.__module__}"
)
assert build_ext.get_ext_fullname.__name__ == GET_EXT_FULLNAME.__name__
# Test re-patch
previous = build_ext.get_ext_fullname
build_ext.get_ext_fullname = _patch_get_ext_fullname(build_ext.get_ext_fullname)
assert build_ext.get_ext_fullname is previous
def tests_patch_get_outputs():
"""Test _patch_get_outputs"""
from compilertools.build import _patch_get_outputs
# Check if patched
assert GET_OUTPUTS is not build_ext.get_outputs
# Check wrap
assert build_ext.get_outputs.__module__ == f"compilertools.{GET_OUTPUTS.__module__}"
assert build_ext.get_outputs.__name__ == GET_OUTPUTS.__name__
# Test re-patch
previous = build_ext.get_outputs
build_ext.get_outputs = _patch_get_outputs(build_ext.get_outputs)
assert build_ext.get_outputs is previous
def tests_patch___new__():
"""Test _patch___new__"""
# Check if patched
assert BUILD_EXT_NEW is not build_ext.__new__
# Check build_ext instantiation
from distutils.dist import Distribution
build_ext(Distribution())
assert GET_EXT_FULLNAME is not build_ext.get_ext_fullname
assert GET_EXT_FILENAME is not build_ext.get_ext_filename
assert BUILD_EXTENSION is not build_ext.build_extension
assert GET_OUTPUTS is not build_ext.get_outputs
```
#### File: compilertools/tests/test_compilers_llvm.py
```python
def tests_compiler():
"""Test Compiler"""
import platform
import subprocess
from compilertools.compilers._core import _get_arch_and_cpu
from compilertools.compilers.llvm import Compiler
cmd = {
"python": "",
"--version": "",
"-dumpversion": "",
"not_found": False,
}
def dummy_compiler():
"""Force version"""
return cmd["python"]
def run(*popenargs, check=False, **_):
"""Mocked subprocess.run"""
args = popenargs[0]
if cmd["not_found"]:
raise FileNotFoundError
try:
stdout = cmd[args[1]]
return_code = 0
except KeyError:
stdout = ""
return_code = 1
if check:
raise subprocess.CalledProcessError(return_code, args)
return subprocess.CompletedProcess(args, return_code, stdout)
platform_python_compiler = platform.python_compiler
platform.python_compiler = dummy_compiler
subprocess_run = subprocess.run
subprocess.run = run
try:
compiler = Compiler(current_compiler=True)
# Check not existing version
assert compiler.python_build_version == 0.0
assert compiler.version == 0.0
# Check existing version
cmd["python"] = "Clang 6.0 (clang-600.0.57)"
cmd["--version"] = "clang version 7.0.0 (Fedora 7.0.0-2.fc29)\n..."
cmd["-dumpversion"] = "7.0.0"
del compiler["python_build_version"]
del compiler["version"]
assert compiler.python_build_version == 6.0
assert compiler.version == 7.0
cmd["--version"] = "Apple LLVM version 9.1.0 (clang-902.0.39.2)\n..."
cmd["-dumpversion"] = "9.1.0"
del compiler["version"]
assert compiler.version == 9.1
# Not current compiler
assert Compiler().version == 0.0
# Test Error
del compiler["version"]
cmd["not_found"] = True
assert compiler.version == 0.0
# Initialize system configurations
compiler["version"] = 7.0
arch_x86, cpu_x86 = _get_arch_and_cpu("x86_32")
arch_amd64, cpu_amd64 = _get_arch_and_cpu("x86_64")
# Test API/Options
assert len(compiler.api) > 0
assert len(compiler.option) > 0
# Test _compile_args_matrix
assert compiler._compile_args_matrix(arch_x86, cpu_x86)
assert compiler._compile_args_matrix(arch_amd64, cpu_amd64)
# Test _compile_args_current_machine with x86
args = compiler._compile_args_current_machine(arch_x86, cpu_x86)
assert args
assert "-march=native" in args
# Check return a result also with amd64
assert compiler._compile_args_current_machine(arch_amd64, cpu_amd64)
# Check -mfpmath with or without SSE
cpu_x86["features"] = ["SSE"]
args = compiler._compile_args_current_machine(arch_x86, cpu_x86)
assert "-mfpmath=sse" in args
cpu_x86["features"] = []
args = compiler._compile_args_current_machine(arch_x86, cpu_x86)
assert "-mfpmath=sse" not in args
finally:
platform.python_compiler = platform_python_compiler
subprocess.run = subprocess_run
def tests_compiler_clang_command():
"""Test Compiler if CC/Clang command available"""
from subprocess import Popen, PIPE
try:
version_str = (
Popen(["clang", "--version"], stdout=PIPE, universal_newlines=True)
.stdout.read()
.lower()
)
except OSError:
from pytest import skip
version_str = ""
skip("Clang not available")
from compilertools.compilers.llvm import Compiler
assert Compiler(current_compiler=True).version != 0.0 or "clang" not in version_str
```
#### File: compilertools/tests/test_compilers_msvc.py
```python
def tests_compiler():
"""Test Compiler"""
import platform
from compilertools.compilers._core import _get_arch_and_cpu
from compilertools.compilers.msvc import Compiler
version = ""
def dummy_compiler():
"""platform.python_compiler"""
return version
platform_python_compiler = platform.python_compiler
platform.python_compiler = dummy_compiler
try:
compiler = Compiler(current_compiler=True)
# Check not existing version
assert compiler.version == 0.0
# Check existing version
version = "MSC v.1800 64 bit"
del compiler["version"]
assert compiler.version == 12.0
# Check 13.0 skipped
version = "MSC v.1900 64 bit"
del compiler["version"]
assert compiler.version == 14.0
# Not current compiler
assert Compiler().version == 0.0
finally:
platform.python_compiler = platform_python_compiler
# Test API/Options
assert len(compiler.api) > 0
assert len(compiler.option) > 0
# Test _compile_args_matrix
arch, cpu = _get_arch_and_cpu("x86_32")
assert compiler._compile_args_matrix(arch, cpu)
```
#### File: compilertools/tests/test_imports.py
```python
def tests_update_ext_suffixes():
"""Test update_extensions_suffixes & ARCH_SUFFIXES"""
from collections import OrderedDict
from importlib.machinery import EXTENSION_SUFFIXES
import compilertools._core as core
from compilertools._core import suffix_from_args
from compilertools.imports import ARCH_SUFFIXES, update_extensions_suffixes
# Create fake compiler and extensions on environments without ARCH_SUFFIXES
if not ARCH_SUFFIXES:
ARCH_SUFFIXES += [".fake1", ".fake2"]
def get_compile_args(*_, **__):
"""Mocker function"""
return OrderedDict(
[
("inst1-arch1", ["--generic", "--inst1", "--arch1"]),
("inst1-arch2", ["--generic", "--inst1", "--arch2"]),
("arch1", ["--generic", "--arch1"]),
("arch2", ["--generic", "--arch2"]),
]
)
core_get_compile_args = core.get_compile_args
core.get_compile_args = get_compile_args
else:
get_compile_args = core.get_compile_args
core_get_compile_args = None
# Test
try:
# Test update
update_extensions_suffixes("gcc")
get_compile_args("gcc", current_machine=True)
suffixes = suffix_from_args(
get_compile_args("gcc", current_machine=True), EXTENSION_SUFFIXES
)
for suffixe in suffixes:
assert suffixe in ARCH_SUFFIXES
# Test extensions presence
result = []
for ext in EXTENSION_SUFFIXES:
result.append(False)
for arch_ext in ARCH_SUFFIXES:
if ext in arch_ext:
result[-1] = True
break
assert all(result)
# Tests not crash on exception
update_extensions_suffixes("fake_compiler")
finally:
if core_get_compile_args:
ARCH_SUFFIXES.clear()
core.get_compile_args = core_get_compile_args
def tests_extension_file_finder():
"""Test _ExtensionFileFinder"""
import sys
from os.path import join
from tempfile import TemporaryDirectory
import importlib.machinery as machinery
import compilertools.imports as imports
from compilertools.imports import (
_ExtensionFileFinder,
ARCH_SUFFIXES,
_PROCESSED_COMPILERS,
)
# Create fake compiler and extensions on environments without ARCH_SUFFIXES
if not ARCH_SUFFIXES:
ARCH_SUFFIXES += [".fake1", ".fake2"]
_PROCESSED_COMPILERS.add("fake_compiler")
def update_extensions_suffixes(compiler):
"""Mocker function"""
_PROCESSED_COMPILERS.add(compiler)
imports_update_extensions_suffixes = imports.update_extensions_suffixes
imports.update_extensions_suffixes = update_extensions_suffixes
else:
imports_update_extensions_suffixes = None
# Check presence in sys.meta_path
assert isinstance(sys.meta_path[0], _ExtensionFileFinder)
# Test find_spec
# Monkey patch importlib machinery for don't need to import unexisting file.
module_spec = machinery.ModuleSpec
extension_file_loader = machinery.ExtensionFileLoader
def dummy_spec(_, loader, *, origin=None):
"""Dummy ModuleSpec"""
assert loader == origin
return origin
def dummy_fileloader(_, path):
"""Dummy ExtensionFileLoader"""
return path
machinery.ModuleSpec = dummy_spec
machinery.ExtensionFileLoader = dummy_fileloader
try:
for use_compiler_file in (False, True):
with TemporaryDirectory() as tmp:
sys.path.insert(0, tmp)
name = "compilertools_dummy_file"
ext = ARCH_SUFFIXES[0]
file_path = join(tmp, "".join([name, ext]))
with open(file_path, "wt") as file:
file.write("")
if use_compiler_file:
compiler = _PROCESSED_COMPILERS.pop()
assert compiler not in _PROCESSED_COMPILERS
path_compiler = join(tmp, "".join([name, ".compilertools"]))
with open(path_compiler, "wt") as file:
file.write(compiler)
file_finder = _ExtensionFileFinder()
# Existing file
assert file_finder.find_spec(name, "") == file_path
if use_compiler_file:
assert compiler in _PROCESSED_COMPILERS
# Checks called twice
assert file_finder.find_spec(name, "") == file_path
# non-existing file
assert file_finder.find_spec("compilertools_notexists_file", "") is None
sys.path.remove(tmp)
finally:
machinery.ModuleSpec = module_spec
machinery.ExtensionFileLoader = extension_file_loader
if imports_update_extensions_suffixes:
ARCH_SUFFIXES.clear()
_PROCESSED_COMPILERS.discard("fake_compiler")
imports.update_extensions_suffixes = imports_update_extensions_suffixes
```
#### File: compilertools/tests/test_processors__core.py
```python
def tests_get_arch():
"""Test get_arch"""
from platform import machine
from compilertools._config import CONFIG
from compilertools.processors._core import get_arch
# Test lower case
assert get_arch("ARCH") == "arch"
# Current arch
arch = machine().lower()
assert get_arch() == CONFIG["architectures"].get(arch, arch)
# Test aliases
for arch in CONFIG["architectures"]:
assert get_arch(arch) == CONFIG["architectures"][arch]
# Test cross compilation
assert get_arch("x86_amd64") == "x86_64"
# Test prefixed architecture
assert get_arch("linux-x86_64") == "x86_64"
def tests_get_processor():
"""Test get_processor"""
from os import listdir
from os.path import splitext, dirname
from compilertools.processors import _core
from compilertools.processors._core import get_processor
from itertools import product
# Return processor by name with all file in "compilertools.processors"
for file, current_machine in product(
listdir(dirname(_core.__file__)), (True, False)
):
if file.startswith("_"):
continue
name = splitext(file)[0]
processor = get_processor(name, current_machine=current_machine)
assert processor.__class__.__module__ == f"compilertools.processors.{name}"
assert processor.current_machine is current_machine
def tests_processor_base():
"""Test ProcessorBase"""
from compilertools.processors import ProcessorBase
# Test current machine
processor = ProcessorBase()
assert processor.current_machine is False
processor = ProcessorBase(current_machine=True)
assert processor.current_machine is True
# Test properties
assert processor.vendor == ""
processor["vendor"] = "vendor"
assert processor.vendor == "vendor"
assert processor.brand == ""
processor["brand"] = "brand"
assert processor.brand == "brand"
assert processor.features == []
processor["features"] = ["feature1", "feature2"]
assert processor.features == ["feature1", "feature2"]
# Test arch
assert processor.arch == "_core"
```
|
{
"source": "JGoutin/rfs",
"score": 3
}
|
#### File: rfs/airfs/config.py
```python
from json import dump as _dump
from os import chmod
from airfs._core.config import CONFIG_FILE as _CONFIG_FILE, read_config as _read_config
def get_mount(storage, config_name=None):
"""
Get the mount configuration.
.. versionadded:: 1.5.0
Args:
storage (str): Storage name.
config_name (str): If specified, load the configuration as a specific storage
configuration. "See airfs.config.set_mount".
Returns:
dict or None: Storage configuration, None if not configured.
"""
if config_name:
storage = f"{storage}.{config_name}"
try:
return _read_config()[storage]
except (KeyError, TypeError):
return None
def set_mount(
storage, config_name=None, storage_parameters=None, unsecure=None, extra_root=None
):
"""
Set a mount configuration. Most arguments are identical to "airfs.mount".
.. versionadded:: 1.5.0
Args:
storage (str): Storage name.
config_name (str): If specified, save the configuration as a specific storage
configuration. This allow to save multiple configurations for a same
"storage".
storage_parameters (dict): Storage configuration parameters.
Generally, client configuration and credentials.
unsecure (bool): If True, disables TLS/SSL to improves
transfer performance. But makes connection unsecure. Default to False.
extra_root (str): Extra root that can be used in replacement of root in path.
This can be used to provides support for shorter URLS.
Example: with root "https://www.my_storage.com/user" and extra_root
"mystorage://" it is possible to access object using
"mystorage://container/object" instead of
"https://www.my_storage.com/user/container/object".
"""
if config_name:
storage = f"{storage}.{config_name}"
config = _read_config() or dict()
config[storage] = {
key: value
for key, value in dict(
unsecure=unsecure,
extra_root=extra_root,
storage_parameters=storage_parameters,
).items()
if value
}
with open(_CONFIG_FILE, "wt") as config_file:
_dump(config, config_file)
chmod(_CONFIG_FILE, 0o600)
```
#### File: airfs/_core/cache.py
```python
from gzip import open as open_archive
from hashlib import blake2b
from json import load, dump
from os import listdir, utime, remove, makedirs, chmod
from os.path import join, getmtime
from time import time
from airfs._core.config import CACHE_DIR
class NoCacheException(Exception):
"""No cache available"""
#: Long cache default expiry
CACHE_LONG_EXPIRY = 172800
#: Short cache default expiry
CACHE_SHORT_EXPIRY = 60
#: To initialize cache directories only once
_CACHE_INITIALIZED = False
def _hash_name(name):
"""
Convert name to hashed name.
Args:
name (str): name.
Returns:
str: Hashed name.
"""
return blake2b(name.encode(), digest_size=32).hexdigest()
def clear_cache():
"""
Clear expired cache files.
"""
expiry = _get_expiry()
for cached_name in listdir(CACHE_DIR):
path = join(CACHE_DIR, cached_name)
if getmtime(path) < expiry[cached_name[-1]]:
remove(path)
continue
def _get_expiry():
"""
Get expiry timestamps.
Returns:
dict: Expiry for both short and long modes.
"""
current_time = time()
return {
"s": current_time - CACHE_SHORT_EXPIRY,
"l": current_time - CACHE_LONG_EXPIRY,
}
def get_cache(name):
"""
Get an object from disk cache.
Args:
name (str): Cache name.
Returns:
dict or list or None: object, None if object is not cached.
"""
expiry = _get_expiry()
hashed_name = _hash_name(name)
for mode in ("s", "l"):
path = join(CACHE_DIR, hashed_name + mode)
try:
timestamp = getmtime(path)
except FileNotFoundError:
continue
if timestamp < expiry[mode]:
remove(path)
continue
if mode == "l":
# In long cache mode, reset expiry delay
utime(path)
with open_archive(path, "rt") as file:
return load(file)
raise NoCacheException()
def set_cache(name, obj, long=False):
"""
Add an object to disk cache.
Args:
name (str): Cache name.
obj (dict or list): Object to cache.
long (bool): If true, enable "long cache".
"""
path = join(CACHE_DIR, _hash_name(name) + ("l" if long else "s"))
global _CACHE_INITIALIZED
if not _CACHE_INITIALIZED:
makedirs(CACHE_DIR, exist_ok=True)
chmod(CACHE_DIR, 0o700)
_CACHE_INITIALIZED = True
with open_archive(path, "wt") as file:
dump(obj, file)
```
#### File: airfs/_core/config.py
```python
from json import load
import os
from os import getenv
from os.path import join, expandvars, expanduser
def _init_paths():
"""
Initialize application directories.
Returns:
tuple of str: Configuration directory, cache directory
"""
if os.name == "nt":
config_dir = join(expandvars("%APPDATA%"), "airfs")
cache_dir = join(expandvars("%LOCALAPPDATA%"), r"airfs\cache")
elif os.getuid() != 0:
config_dir = join(getenv("XDG_CONFIG_HOME", expanduser("~/.config")), "airfs")
cache_dir = join(getenv("XDG_CACHE_HOME", expanduser("~/.cache")), "airfs")
else:
config_dir = "/etc/airfs"
cache_dir = "/var/cache/airfs"
return config_dir, cache_dir
CONFIG_DIR, CACHE_DIR = _init_paths()
CONFIG_FILE = join(CONFIG_DIR, "config.json")
def read_config():
"""
Read the configuration.
Returns:
dict or None: Configuration. None if no configuration.
"""
try:
with open(CONFIG_FILE, "rt") as config_file:
return load(config_file)
except FileNotFoundError:
return
```
#### File: airfs/_core/functions_core.py
```python
from contextlib import contextmanager
from functools import wraps
from os import fsdecode, fsencode
from airfs._core.exceptions import handle_os_exceptions, ObjectNotImplementedError
def is_storage(file, storage=None):
"""
Check if file is a local file or a storage file.
Args:
file (str or int): file path, URL or file descriptor.
storage (str): Storage name.
Returns:
bool: return True if file is not local.
"""
if storage:
return True
elif isinstance(file, int):
return False
split_url = file.split("://", 1)
if len(split_url) == 2 and split_url[0].lower() != "file":
return True
return False
def format_and_is_storage(path, file_obj_as_storage=False, storage=None):
"""
Checks if path is storage and format it.
If path is an opened file-like object, returns is storage as True.
Args:
path (path-like object or file-like object or int):
Path, opened file or file descriptor.
file_obj_as_storage (bool): If True, count file-like objects as storages.
Useful if standard functions are not intended to support them.
storage (str): Storage name.
Returns:
tuple: str or file-like object or int (Updated path),
bool (True if is storage).
"""
readable = hasattr(path, "read")
if isinstance(path, int) or readable:
return path, readable and file_obj_as_storage
path = fsdecode(path).replace("\\", "/")
return path, is_storage(path, storage)
def equivalent_to(std_function, keep_path_type=False):
"""
Decorates an airfs object compatible function to provides fall back to standard
function if used on local files.
Args:
std_function: standard function to use with local files.
keep_path_type (bool): Convert returned result to bytes if path argument was
bytes.
Returns:
function: new function
"""
def decorate(cos_function):
"""Decorator argument handler
Args:
cos_function (function): Storage function to use with storage files.
"""
@wraps(cos_function)
def decorated(path, *args, **kwargs):
"""
Decorated function.
Args:
path (path-like object or int): Path, URL or file descriptor.
"""
if not isinstance(path, int):
path_str = fsdecode(path).replace("\\", "/")
if is_storage(path_str):
with handle_os_exceptions():
result = cos_function(path_str, *args, **kwargs)
if keep_path_type and isinstance(path, bytes):
result = fsencode(result)
return result
return std_function(path, *args, **kwargs)
return decorated
return decorate
def raises_on_dir_fd(dir_fd):
"""
Raise on use of dir_fd
Args:
dir_fd: Checks if None
Raises:
NotImplementedError: dir_fd is not None.
"""
if dir_fd is not None:
raise ObjectNotImplementedError(feature="dir_fd")
class SeatsCounter:
"""
A simple counter keeping track of available seats.
Args:
max_seats (int or None): Maximum available seats. None if no maximum.
"""
__slots__ = ("_seats",)
def __init__(self, max_seats):
self._seats = max_seats
def take_seat(self):
"""
Take a seat.
"""
if self._seats:
self._seats -= 1
@property
def seats_left(self):
"""
Remaining seats.
Returns:
int or None: Remaining seats. None if no maximum.
"""
if self._seats:
return self._seats
@property
def full(self):
"""
Check if seats are full.
Returns:
bool: True if no more seat available.
"""
return self._seats == 0
@contextmanager
def ignore_exception(exception):
"""
Convenient shorter method to ignore exception.
"""
try:
yield
except exception:
return
```
#### File: airfs/_core/functions_os.py
```python
import os
from os import scandir as os_scandir, fsdecode, fsencode, fspath
from os.path import dirname
from stat import S_ISLNK, S_ISDIR
from airfs._core.storage_manager import get_instance
from airfs._core.functions_core import (
equivalent_to,
is_storage,
raises_on_dir_fd,
format_and_is_storage,
)
from airfs._core.exceptions import (
ObjectExistsError,
ObjectNotFoundError,
handle_os_exceptions,
ObjectPermissionError,
ObjectIsADirectoryError,
ObjectSameFileError,
ObjectNotImplementedError,
)
from airfs._core.io_base import memoizedmethod
@equivalent_to(os.listdir)
def listdir(path="."):
"""
Return a list containing the names of the entries in the directory given by path.
Follow symlinks if any.
Equivalent to "os.listdir".
.. versionadded:: 1.2.0
Args:
path (path-like object): Path or URL.
Returns:
list of str: Entries names.
"""
system = get_instance(path)
path = system.resolve(path, follow_symlinks=True)[0]
return [name.rstrip("/") for name, _ in system.list_objects(path, first_level=True)]
@equivalent_to(os.makedirs)
def makedirs(name, mode=0o777, exist_ok=False):
"""
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not just the rightmost)
will be created if it does not exist.
Equivalent to "os.makedirs".
.. versionadded:: 1.1.0
Args:
name (path-like object): Path or URL.
mode (int): The mode parameter is passed to os.mkdir();
see the os.mkdir() description for how it is interpreted.
Not supported on storage objects.
exist_ok (bool): Don't raises error if target directory already exists.
Raises:
FileExistsError: if exist_ok is False and if the target directory already
exists.
"""
system = get_instance(name)
if not exist_ok and system.isdir(system.ensure_dir_path(name)):
raise ObjectExistsError(path=name)
system.make_dir(name)
@equivalent_to(os.mkdir)
def mkdir(path, mode=0o777, *, dir_fd=None):
"""
Create a directory named path with numeric mode mode.
Equivalent to "os.mkdir".
.. versionadded:: 1.1.0
Args:
path (path-like object): Path or URL.
mode (int): The mode parameter is passed to os.mkdir();
see the os.mkdir() description for how it is interpreted.
Not supported on storage objects.
dir_fd (int): directory descriptors;
see the os.remove() description for how it is interpreted.
Not supported on storage objects.
Raises:
FileExistsError : Directory already exists.
FileNotFoundError: Parent directory not exists.
"""
raises_on_dir_fd(dir_fd)
system = get_instance(path)
relative = system.relpath(path)
parent_dir = dirname(relative.rstrip("/"))
if parent_dir:
parent = "{}{}/".format(path.rsplit(relative, 1)[0], parent_dir)
if not system.isdir(parent):
raise ObjectNotFoundError(path=parent)
if system.isdir(system.ensure_dir_path(path)):
raise ObjectExistsError(path=path)
system.make_dir(relative, relative=True)
@equivalent_to(os.readlink, keep_path_type=True)
def readlink(path, *, dir_fd=None):
"""
Return a string representing the path to which the symbolic link points.
The result may be either an absolute or relative pathname; if it is relative, it may
be converted to an absolute pathname using
os.path.join(os.path.dirname(path), result).
If the path is a string object (directly or indirectly through a PathLike
interface), the result will also be a string object, and the call may raise a
UnicodeDecodeError. If the path is a bytes object (direct or indirectly), the result
will be a bytes object.
Equivalent to "os.readlink".
.. versionadded:: 1.5.0
Args:
path (path-like object): Path or URL.
dir_fd (int): directory descriptors;
see the os.readlink() description for how it is interpreted.
Not supported on storage objects.
"""
raises_on_dir_fd(dir_fd)
return get_instance(path).read_link(path)
@equivalent_to(os.remove)
def remove(path, *, dir_fd=None):
"""
Remove a file.
Equivalent to "os.remove" and "os.unlink".
.. versionadded:: 1.2.0
Args:
path (path-like object): Path or URL.
dir_fd (int): directory descriptors;
see the os.remove() description for how it is interpreted.
Not supported on storage objects.
"""
raises_on_dir_fd(dir_fd)
system = get_instance(path)
if system.is_locator(path) or path[-1] == "/":
raise ObjectIsADirectoryError(path=path)
system.remove(path)
unlink = remove
@equivalent_to(os.rmdir)
def rmdir(path, *, dir_fd=None):
"""
Remove a directory.
Equivalent to "os.rmdir".
.. versionadded:: 1.2.0
Args:
path (path-like object): Path or URL.
dir_fd (int): directory descriptors;
see the os.rmdir() description for how it is interpreted.
Not supported on storage objects.
"""
raises_on_dir_fd(dir_fd)
system = get_instance(path)
system.remove(system.ensure_dir_path(path))
@equivalent_to(os.lstat)
def lstat(path, *, dir_fd=None):
"""
Get the status of a file or a file descriptor.
Perform the equivalent of a "lstat()" system call on the given path.
Equivalent to "os.lstat".
On storage object, may return extra storage specific attributes in "os.stat_result".
.. versionadded:: 1.2.0
Args:
path (path-like object): Path or URL.
dir_fd (int): directory descriptors;
see the os.rmdir() description for how it is interpreted.
Not supported on storage objects.
Returns:
os.stat_result: stat result.
"""
raises_on_dir_fd(dir_fd)
return get_instance(path).stat(path)
@equivalent_to(os.stat)
def stat(path, *, dir_fd=None, follow_symlinks=True):
"""
Get the status of a file or a file descriptor.
Perform the equivalent of a "stat()" system call on the given path.
Equivalent to "os.stat".
On storage object, may return extra storage specific attributes in "os.stat_result".
.. versionadded:: 1.2.0
Args:
path (path-like object): Path or URL.
dir_fd (int): directory descriptors;
see the os.rmdir() description for how it is interpreted.
Not supported on storage objects.
follow_symlinks (bool): Follow symlinks.
Returns:
os.stat_result: stat result.
"""
raises_on_dir_fd(dir_fd)
return get_instance(path).stat(path, follow_symlinks=follow_symlinks)
class DirEntry:
"""
Object yielded by scandir() to expose the file path and other file attributes of a
directory entry.
Equivalent to "os.DirEntry".
Not intended to be instantiated directly.
.. versionadded:: 1.2.0
"""
__slots__ = ("_cache", "_system", "_name", "_header", "_path", "_bytes_path")
def __init__(self, scandir_path, system, name, header, bytes_path):
"""
Should only be instantiated by "scandir".
Args:
scandir_path (str): scandir path argument.
system (airfs._core.io_system.SystemBase subclass): Storage system.
name (str): Name of the object relative to "scandir_path".
header (dict): Object header
bytes_path (bool): True if path must be returned as bytes.
"""
self._cache = dict()
self._system = system
self._name = name
self._header = header
self._path = "".join(
(scandir_path if scandir_path[-1] == "/" else (scandir_path + "/"), name)
)
self._bytes_path = bytes_path
@memoizedmethod
def __str__(self):
return f"<DirEntry '{self._name.rstrip('/')}'>"
__repr__ = __str__
@property # type: ignore
@memoizedmethod
def _client_kwargs(self):
"""
Get base keyword arguments for client
Returns:
dict: keyword arguments
"""
return self._system.get_client_kwargs(self._path)
@property # type: ignore
@memoizedmethod
def name(self):
"""
The entry’s base filename, relative to the scandir() path argument.
Returns:
str: name.
"""
name = self._name.rstrip("/")
if self._bytes_path:
name = fsencode(name)
return name
@property # type: ignore
@memoizedmethod
def path(self):
"""
The entry’s full path name:
equivalent to os.path.join(scandir_path, entry.name) where scandir_path is the
scandir() path argument.
The path is only absolute if the scandir() path argument was absolute.
Returns:
str: name.
"""
path = self._path.rstrip("/")
if self._bytes_path:
path = fsencode(path)
return path
@memoizedmethod
def inode(self):
"""
Return the inode number of the entry.
The result is cached on the os.DirEntry object.
Returns:
int: inode.
"""
return self.stat().st_ino
@memoizedmethod
def is_dir(self, *, follow_symlinks=True):
"""
Return True if this entry is a directory or a symbolic link pointing to a
directory; return False if the entry is or points to any other kind of file, or
if it doesn’t exist anymore.
The result is cached on the os.DirEntry object.
Args:
follow_symlinks (bool): Follow symlinks.
Returns:
bool: True if directory exists.
"""
with handle_os_exceptions():
try:
return self._system.isdir(
path=self._path,
client_kwargs=self._client_kwargs,
virtual_dir=False,
follow_symlinks=follow_symlinks,
) or bool(
# Some directories only exists virtually in object path and don't
# have headers.
S_ISDIR(self.stat().st_mode)
)
except ObjectPermissionError:
return True
@memoizedmethod
def is_file(self, *, follow_symlinks=True):
"""
Return True if this entry is a file or a symbolic link pointing to a file;
return False if the entry is or points to a directory or other non-file entry,
or if it doesn’t exist anymore.
The result is cached on the os.DirEntry object.
Args:
follow_symlinks (bool): Follow symlinks.
Returns:
bool: True if directory exists.
"""
with handle_os_exceptions():
return self._system.isfile(
path=self._path,
client_kwargs=self._client_kwargs,
follow_symlinks=follow_symlinks,
)
@memoizedmethod
def is_symlink(self):
"""
Return True if this entry is a symbolic link
The result is cached on the os.DirEntry object.
Returns:
bool: True if symbolic link.
"""
return bool(S_ISLNK(self.stat().st_mode))
@memoizedmethod
def stat(self, *, follow_symlinks=True):
"""
Return a stat_result object for this entry.
The result is cached on the os.DirEntry object.
Args:
follow_symlinks (bool): Follow symlinks.
Returns:
os.stat_result: Stat result object
"""
with handle_os_exceptions():
return self._system.stat(
path=self._path,
client_kwargs=self._client_kwargs,
header=self._header,
follow_symlinks=follow_symlinks,
)
DirEntry.__module__ = "airfs"
def scandir(path="."):
"""
Return an iterator of os.DirEntry objects corresponding to the entries in the
directory given by path. The entries are yielded in arbitrary order, and the special
entries '.' and '..' are not included.
Equivalent to "os.scandir".
.. versionadded:: 1.2.0
Args:
path (path-like object): Path or URL.
If path is of type bytes (directly or indirectly through the PathLike
interface), the type of the name and path attributes of each os.DirEntry
will be bytes; in all other circumstances, they will be of type str.
Returns:
Generator of os.DirEntry: Entries information.
"""
scandir_path = fsdecode(path).replace("\\", "/")
if not is_storage(scandir_path):
return os_scandir(scandir_path)
system = get_instance(scandir_path)
return _scandir_generator(
is_bytes=isinstance(fspath(path), (bytes, bytearray)),
scandir_path=system.resolve(scandir_path, follow_symlinks=True)[0],
system=system,
)
def _scandir_generator(is_bytes, scandir_path, system):
"""
scandir generator
Args:
is_bytes (bool): True if DirEntry must handle path as bytes.
scandir_path (str): Path.
system (airfs._core.io_system.SystemBase subclass): Storage system.
Yields:
DirEntry: Directory entries
"""
with handle_os_exceptions():
for name, header in system.list_objects(scandir_path, first_level=True):
yield DirEntry(
scandir_path=scandir_path,
system=system,
name=name,
header=header,
bytes_path=is_bytes,
)
def symlink(src, dst, target_is_directory=False, *, dir_fd=None):
"""
Create a symbolic link pointing to src named dst.
Equivalent to "os.symlink".
.. versionadded:: 1.5.0
Args:
src (path-like object): Path or URL to the symbolic link.
dst (path-like object): Path or URL to the target.
target_is_directory (bool): On Windows, define if symlink represents either a
file or a directory.
Not supported on storage objects and non-Windows platforms.
dir_fd (int): directory descriptors;
see the os.symlink() description for how it is interpreted.
Not supported on storage objects.
"""
src, src_is_storage = format_and_is_storage(src)
dst, dst_is_storage = format_and_is_storage(dst)
if not src_is_storage and not dst_is_storage:
return os.symlink(
src, dst, target_is_directory=target_is_directory, dir_fd=dir_fd
)
with handle_os_exceptions():
if not src_is_storage or not dst_is_storage:
ObjectNotImplementedError("Cross storage symlinks are not supported")
raises_on_dir_fd(dir_fd)
system_src = get_instance(src)
system_dst = get_instance(dst)
if system_src is not system_dst:
ObjectNotImplementedError("Cross storage symlinks are not supported")
elif system_src.relpath(src) == system_dst.relpath(dst):
raise ObjectSameFileError(path1=src, path2=dst)
return get_instance(src).symlink(src, dst)
```
#### File: airfs/_core/functions_shutil.py
```python
from os.path import join, basename
from shutil import (
copy as shutil_copy,
copyfileobj,
copyfile as shutil_copyfile,
)
from airfs._core.compat import COPY_BUFSIZE
from airfs._core.functions_io import cos_open
from airfs._core.functions_os_path import isdir
from airfs._core.functions_core import format_and_is_storage, ignore_exception
from airfs._core.exceptions import (
AirfsInternalException,
handle_os_exceptions,
ObjectSameFileError,
)
from airfs._core.storage_manager import get_instance
def _copy(src, dst, src_is_storage, dst_is_storage, follow_symlinks):
"""
Copies file from source to destination
Args:
src (str or file-like object): Source file.
dst (str or file-like object): Destination file.
src_is_storage (bool): Source is storage.
dst_is_storage (bool): Destination is storage.
follow_symlinks (bool): If True, follow symlinks.
"""
with handle_os_exceptions():
if src_is_storage and dst_is_storage:
system_src = get_instance(src)
system_dst = get_instance(dst)
if system_src is system_dst:
if system_src.relpath(src) == system_dst.relpath(dst):
raise ObjectSameFileError(path1=src, path2=dst)
try:
return system_dst.copy(src, dst)
except AirfsInternalException:
pass
for caller, called, method in (
(system_dst, system_src, "copy_from_%s"),
(system_src, system_dst, "copy_to_%s"),
):
if hasattr(caller, method % called.storage):
try:
return getattr(caller, method % called.storage)(
src, dst, called, follow_symlinks
)
except AirfsInternalException:
continue
_copy_stream(dst, src)
def _copy_stream(dst, src):
"""
Copy files by streaming content from source to destination.
Args:
src (str or file-like object): Source file.
dst (str or file-like object): Destination file.
"""
with cos_open(src, "rb") as fsrc:
with cos_open(dst, "wb") as fdst:
for stream in (fsrc, fdst):
try:
buffer_size = getattr(stream, "_buffer_size")
break
except AttributeError:
continue
else:
buffer_size = COPY_BUFSIZE
copyfileobj(fsrc, fdst, buffer_size)
def copy(src, dst, *, follow_symlinks=True):
"""
Copies a source file to a destination file or directory.
Equivalent to "shutil.copy".
Source and destination can also be binary opened file-like objects.
.. versionadded:: 1.0.0
Args:
src (path-like object or file-like object): Source file.
dst (path-like object or file-like object): Destination file or directory.
follow_symlinks (bool): If True, follow symlinks.
Raises:
FileNotFoundError: Destination directory not found.
"""
src, src_is_storage = format_and_is_storage(src, True)
dst, dst_is_storage = format_and_is_storage(dst, True)
if not src_is_storage and not dst_is_storage:
return shutil_copy(src, dst, follow_symlinks=follow_symlinks)
if not hasattr(dst, "read"):
with ignore_exception(PermissionError):
# Tries to write if not enough permission to check if destination is a
# directory
if isdir(dst):
dst = join(dst, basename(src))
_copy(src, dst, src_is_storage, dst_is_storage, follow_symlinks)
def copyfile(src, dst, *, follow_symlinks=True):
"""
Copies a source file to a destination file.
Equivalent to "shutil.copyfile".
Source and destination can also be binary opened file-like objects.
.. versionadded:: 1.2.0
Args:
src (path-like object or file-like object): Source file.
dst (path-like object or file-like object): Destination file.
follow_symlinks (bool): Follow symlinks.
Raises:
FileNotFoundError: Destination directory not found.
"""
src, src_is_storage = format_and_is_storage(src, True)
dst, dst_is_storage = format_and_is_storage(dst, True)
if not src_is_storage and not dst_is_storage:
return shutil_copyfile(src, dst, follow_symlinks=follow_symlinks)
_copy(src, dst, src_is_storage, dst_is_storage, follow_symlinks)
```
#### File: airfs/_core/io_base_system.py
```python
from abc import abstractmethod, ABC
from collections import OrderedDict, namedtuple
from re import compile
from stat import S_IFDIR, S_IFREG, S_IFLNK
from posixpath import join, normpath, dirname
from dateutil.parser import parse
from airfs._core.io_base import WorkerPoolBase
from airfs._core.compat import Pattern, getgid, getuid
from airfs._core.exceptions import (
ObjectNotFoundError,
ObjectPermissionError,
ObjectNotImplementedError,
ObjectUnsupportedOperation,
)
from airfs._core.functions_core import SeatsCounter
class SystemBase(ABC, WorkerPoolBase):
"""
Cloud storage system handler.
This class subclasses are not intended to be public and are implementation details.
This base system is for Object storage that does not handles files with a true
hierarchy like file systems. Directories are virtual with this kind of storage.
Args:
storage_parameters (dict): Storage configuration parameters.
Generally, client configuration and credentials.
unsecure (bool): If True, disables TLS/SSL to improves transfer performance.
But makes connection unsecure.
roots (tuple): Tuple of roots to force use.
"""
__slots__ = (
"_storage_parameters",
"_unsecure",
"_storage",
"_client",
"_cache",
"_roots",
)
#: If True, storage support symlinks
SUPPORTS_SYMLINKS = False
# By default, assumes that information are in a standard HTTP header
_SIZE_KEYS = ("Content-Length",)
_CTIME_KEYS = ()
_MTIME_KEYS = ("Last-Modified",)
_CHAR_FILTER = compile(r"[^a-z0-9_]*")
def __init__(self, storage_parameters=None, unsecure=False, roots=None, **_):
WorkerPoolBase.__init__(self)
if storage_parameters:
storage_parameters = storage_parameters.copy()
for key in tuple(storage_parameters):
if key.startswith("airfs."):
del storage_parameters[key]
else:
storage_parameters = dict()
self._storage_parameters = storage_parameters
self._unsecure = unsecure
self._storage = self.__module__.rsplit(".", 1)[1]
self._client = None
self._cache = {}
if roots:
self._roots = roots
else:
self._roots = self._get_roots()
@property
def storage(self):
"""
Storage name
Returns:
str: Storage
"""
return self._storage
@property
def client(self):
"""
Storage client
Returns:
client
"""
if self._client is None:
self._client = self._get_client()
return self._client
def copy(self, src, dst, other_system=None):
"""
Copy object of the same storage.
Args:
src (str): Path or URL.
dst (str): Path or URL.
other_system (airfs._core.io_system.SystemBase subclass):
Other storage system. May be required for some storage.
"""
# This method is intended to copy objects to and from a same storage
# It is possible to define methods to copy from a different storage by creating
# a "copy_from_<src_storage>" method for the target storage and, vice versa, to
# copy to a different storage by creating a "copy_to_<dst_storage>" method.
# Theses methods must have the same signature as "copy".
# "other_system" is optional and will be:
# - The destination storage system with "copy_to_<src_storage>" method.
# - The source storage system with "copy_from_<src_storage>" method.
# - None elsewhere.
# Note that if no "copy_from"/'copy_to" methods are defined, copy are performed
# over the current machine with "shutil.copyfileobj".
raise ObjectUnsupportedOperation
def exists(
self,
path=None,
client_kwargs=None,
assume_exists=None,
header=None,
follow_symlinks=None,
):
"""
Return True if path refers to an existing path.
Args:
path (str): Path or URL.
client_kwargs (dict): Client arguments.
assume_exists (bool or None): This value define the value to return in the
case there is no enough permission to determinate the existing status of
the file. If set to None, the permission exception is reraised
(Default behavior). if set to True or False, return this value.
header (dict): Object header.
follow_symlinks (bool): Follow symlinks.
Returns:
bool: True if exists.
"""
try:
path, client_kwargs, header = self.resolve(
path, client_kwargs, header, follow_symlinks
)
self.head(path, client_kwargs, header)
except ObjectNotFoundError:
return False
except ObjectPermissionError:
if assume_exists is None:
raise
return assume_exists
return True
@abstractmethod
def _get_client(self):
"""
Storage client
Returns:
client
"""
@abstractmethod
def get_client_kwargs(self, path):
"""
Get base keyword arguments for client for a specific path.
Args:
path (str): Absolute path or URL.
Returns:
dict: client args
"""
def getctime(self, path=None, client_kwargs=None, header=None):
"""
Return the creation time of path.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
float: The number of seconds since the epoch (see the time module).
"""
return self._getctime_from_header(self.head(path, client_kwargs, header))
def _getctime_from_header(self, header):
"""
Return the time from header
Args:
header (dict): Object header.
Returns:
float: The number of seconds since the epoch
"""
return self._get_time(header, self._CTIME_KEYS, "getctime")
def getmtime(self, path=None, client_kwargs=None, header=None):
"""
Return the time of last access of path.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
float: The number of seconds since the epoch (see the time module).
"""
return self._getmtime_from_header(self.head(path, client_kwargs, header))
def _getmtime_from_header(self, header):
"""
Return the time from header
Args:
header (dict): Object header.
Returns:
float: The number of seconds since the epoch
"""
return self._get_time(header, self._MTIME_KEYS, "getmtime")
@staticmethod
def _get_time(header, keys, name):
"""
Get time from header
Args:
header (dict): Object header.
keys (tuple of str): Header keys.
name (str): Method name.
Returns:
float: The number of seconds since the epoch
"""
for key in keys:
try:
date_value = header[key]
except KeyError:
continue
try:
return parse(date_value).timestamp()
except TypeError:
return float(date_value)
raise ObjectUnsupportedOperation(name)
@abstractmethod
def _get_roots(self):
"""
Return URL roots for this storage.
Returns:
tuple of str or re.Pattern: URL roots
"""
def getsize(self, path=None, client_kwargs=None, header=None):
"""
Return the size, in bytes, of path.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
int: Size in bytes.
"""
return self._getsize_from_header(self.head(path, client_kwargs, header))
def _getsize_from_header(self, header):
"""
Return the size from header
Args:
header (dict): Object header.
Returns:
int: Size in bytes.
"""
for key in self._SIZE_KEYS:
try:
return int(header[key])
except KeyError:
continue
else:
raise ObjectUnsupportedOperation("getsize")
def isdir(
self,
path=None,
client_kwargs=None,
virtual_dir=True,
assume_exists=None,
header=None,
follow_symlinks=None,
):
"""
Return True if path is an existing directory.
Args:
path (str): Path or URL.
client_kwargs (dict): Client arguments.
virtual_dir (bool): If True, checks if directory exists virtually if an
object path if not exists as a specific object.
assume_exists (bool or None): This value define the value to return in the
case there is no enough permission to determinate the existing status of
the file. If set to None, the permission exception is reraised
(Default behavior). if set to True or False, return this value.
header (dict): Object header.
follow_symlinks (bool): Follow symlinks.
Returns:
bool: True if directory exists.
"""
relative = self.relpath(path)
if not relative:
# Root always exists and is a directory
return True
if path[-1] == "/" or self.is_locator(relative, relative=True):
exists = self.exists(
path, client_kwargs, assume_exists, header, follow_symlinks
)
if exists:
return True
elif virtual_dir:
try:
next(self.list_objects(relative, relative=True, max_results=1))
return True
except (StopIteration, ObjectNotFoundError, ObjectUnsupportedOperation):
return False
return False
def isfile(
self,
path=None,
client_kwargs=None,
assume_exists=None,
header=None,
follow_symlinks=None,
):
"""
Return True if path is an existing regular file.
Args:
path (str): Path or URL.
client_kwargs (dict): Client arguments.
assume_exists (bool or None): This value define the value to return in the
case there is no enough permission to determinate the existing status of
the file. If set to None, the permission exception is reraised
(Default behavior). if set to True or False, return this value.
header (dict): Object header.
follow_symlinks (bool): Follow symlinks.
Returns:
bool: True if file exists.
"""
relative = self.relpath(path)
if not relative:
# Root always exists and is a directory
return False
if path[-1] != "/" and not self.is_locator(path, relative=True):
return self.exists(
path, client_kwargs, assume_exists, header, follow_symlinks
)
return False
@property
def storage_parameters(self):
"""
Storage parameters
Returns:
dict: Storage parameters
"""
return self._storage_parameters
@abstractmethod
def _head(self, client_kwargs):
"""
Returns object HTTP header.
Args:
client_kwargs (dict): Client arguments.
Returns:
dict: HTTP header.
"""
def head(self, path=None, client_kwargs=None, header=None):
"""
Returns object HTTP header.
Args:
path (str): Path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
dict: HTTP header.
"""
if header is not None:
return header
elif client_kwargs is None:
client_kwargs = self.get_client_kwargs(path)
return self._head(client_kwargs)
@property
def roots(self):
"""
Return URL roots for this storage.
Returns:
tuple of str: URL roots
"""
return self._roots
@roots.setter
def roots(self, roots):
"""
Set URL roots for this storage.
Args:
roots (tuple of str): URL roots
"""
self._roots = roots
def relpath(self, path):
"""
Get path relative to storage.
args:
path (str): Absolute path or URL.
Returns:
str: relative path.
"""
for root in self.roots:
if isinstance(root, Pattern):
match = root.match(path)
if not match:
continue
root = match.group(0)
try:
relative = path.split(root, 1)[1]
return relative.lstrip("/")
except IndexError:
continue
return path
def is_abs(self, path):
"""
Return True if path is absolute in this storage.
args:
path (str): Path or URL.
Returns:
bool: True if absolute path.
"""
for root in self.roots:
if isinstance(root, Pattern):
if root.match(path):
return True
elif path.startswith(root):
return True
return False
def is_locator(self, path, relative=False):
"""
Returns True if path refer to a locator.
Depending the storage, locator may be a bucket or container name, a hostname,...
args:
path (str): path or URL.
relative (bool): Path is relative to current root.
Returns:
bool: True if locator.
"""
if not relative:
path = self.relpath(path)
return path and "/" not in path.rstrip("/")
def split_locator(self, path):
"""
Split the path into a pair (locator, path).
args:
path (str): Absolute path or URL.
Returns:
tuple of str: locator, path.
"""
relative = self.relpath(path)
try:
locator, tail = relative.split("/", 1)
except ValueError:
locator = relative
tail = ""
return locator, tail
def make_dir(self, path, relative=False):
"""
Make a directory.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
"""
if not relative:
path = self.relpath(path)
self._make_dir(
self.get_client_kwargs(self.ensure_dir_path(path, relative=True))
)
def _make_dir(self, client_kwargs):
"""
Make a directory.
args:
client_kwargs (dict): Client arguments.
"""
raise ObjectUnsupportedOperation("mkdir")
def remove(self, path, relative=False):
"""
Remove an object.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
"""
if not relative:
path = self.relpath(path)
self._remove(self.get_client_kwargs(path))
def _remove(self, client_kwargs):
"""
Remove an object.
args:
client_kwargs (dict): Client arguments.
"""
raise ObjectUnsupportedOperation("remove")
def ensure_dir_path(self, path, relative=False):
"""
Ensure the path is a dir path.
Should end with '/' except for schemes and locators.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
Returns:
path: dir path
"""
if not relative:
rel_path = self.relpath(path)
else:
rel_path = path
if self.is_locator(rel_path, relative=True):
path = path.rstrip("/")
elif rel_path:
path = path.rstrip("/") + "/"
return path
def list_objects(
self, path="", relative=False, first_level=False, max_results=None
):
"""
List objects.
Returns object path (relative to input "path") and object headers.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
first_level (bool): It True, returns only first level objects.
Else, returns full tree.
max_results (int): If specified, the maximum result count returned.
Yields:
tuple: object path str, object header dict
"""
seats = SeatsCounter(max_results)
if not relative:
path = self.relpath(path)
if path == "":
generator = self._list_locators(max_results)
else:
generator = self._list_objects(
self.get_client_kwargs(path), path, max_results, first_level
)
if first_level:
generator = self._list_first_level_only(generator)
else:
generator = self._list_all_levels(generator, path, seats)
take_seat = seats.take_seat
for item in generator:
yield item
take_seat()
if seats.full:
return
def _list_all_levels(self, generator, path, seats):
"""
Recursively yields all level entries.
Args:
generator (iterable of tuple): path str, header dict, directory bool
path (str): Path being listed.
seats (airfs._core.functions_core.SeatsCounter): Seats counter.
Yields:
tuple: object path str, object header dict
"""
dirs = list()
add_dir = dirs.append
for obj_path, header, is_dir in generator:
if not obj_path:
# Do not yield itself
continue
if is_dir:
add_dir(obj_path)
obj_path = obj_path.rstrip("/") + "/"
yield obj_path, header
if dirs:
path = path.rstrip("/")
for sub_path in dirs:
if path:
full_path = "/".join((path, sub_path))
else:
full_path = sub_path
max_results = seats.seats_left
if max_results:
# Add an extra seat to ensure the good count when yielding itself
max_results += 1
for obj_path, header in self._list_all_levels(
self._list_objects(
self.get_client_kwargs(full_path),
full_path,
max_results,
False,
),
full_path,
seats,
):
yield "/".join((sub_path.rstrip("/"), obj_path)), header
@staticmethod
def _list_first_level_only(generator):
"""
Yield the first level entries only.
Args:
generator (iterable of tuple): path str, header dict, has content bool
Yields:
tuple: object path str, object header dict
"""
dirs = set()
virtual_dirs = set()
add_virtual_dir = virtual_dirs.add
add_dir = dirs.add
for obj_path, header, is_dir in generator:
obj_path = obj_path.rstrip("/")
try:
obj_path, _ = obj_path.split("/", 1)
except ValueError:
if is_dir:
add_dir(obj_path)
obj_path += "/"
yield obj_path, header
else:
add_virtual_dir(obj_path)
for obj_path in virtual_dirs - dirs:
yield obj_path + "/", dict()
def _list_locators(self, max_results):
"""
Lists locators.
args:
max_results (int): The maximum results that should return the method.
Yields:
tuple: locator name str, locator header dict, has content bool
"""
# Implementation note: See "_list_objects" method.
raise ObjectUnsupportedOperation("listdir")
def _list_objects(self, client_kwargs, path, max_results, first_level):
"""
Lists objects.
args:
client_kwargs (dict): Client arguments.
path (str): Path to list.
max_results (int): The maximum results that should return the method.
None if no limit.
first_level (bool): It True, may only first level objects.
Yields:
tuple: object path str, object header dict, has content bool
"""
# Implementation note:
#
# Should return a tuple of the following values
# - The object path (relative to the "path" argument)
# - The object headers
# - The "had content" bool that must be True if the object has sub-content that
# should be listed recursively by the function. For instance, it should be
# False for files, True for directories that are list without there content
# and False for directories that are list with their content.
#
# Returning only first level entries with "first_level" or only the maximum
# entries with "max_results" are optional, these parameters are mainly
# intended to help to reduce result size from requests against the storage and
# improve the performance.
raise ObjectUnsupportedOperation("listdir")
def islink(self, path=None, client_kwargs=None, header=None):
"""
Returns True if object is a symbolic link.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
bool: True if object is Symlink.
"""
return False
def _getuid(self, path=None, client_kwargs=None, header=None):
"""
Get object user ID.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
int: User ID.
"""
# Default to current process UID
return getuid()
def _getgid(self, path=None, client_kwargs=None, header=None):
"""
Get object group ID.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
int: Group ID.
"""
# Default to current process GID
return getgid()
def _getmode(self, path=None, client_kwargs=None, header=None):
"""
Get object permission mode in Unix format.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
int: Group ID.
"""
# Default to an arbitrary common value
return 0o644
def stat(self, path=None, client_kwargs=None, header=None, follow_symlinks=None):
"""
Get the status of an object.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
follow_symlinks (bool): Follow symlinks.
Returns:
namedtuple: Stat result object. Follow the "os.stat_result" specification
and may contain storage dependent extra entries.
"""
path, client_kwargs, header = self.resolve(
path, client_kwargs, header, follow_symlinks
)
stat = OrderedDict(
(
("st_mode", self._getmode(path, client_kwargs, header)),
("st_ino", 0),
("st_dev", 0),
("st_nlink", 0),
("st_uid", self._getuid()),
("st_gid", self._getgid()),
("st_size", 0),
("st_atime", 0),
("st_mtime", 0),
("st_ctime", 0),
("st_atime_ns", 0),
("st_mtime_ns", 0),
("st_ctime_ns", 0),
)
)
header = self.head(path, client_kwargs, header)
try:
stat["st_size"] = int(self._getsize_from_header(header))
except ObjectUnsupportedOperation:
pass
for st_time, st_time_ns, method in (
("st_mtime", "st_mtime_ns", self._getmtime_from_header),
("st_ctime", "st_ctime_ns", self._getctime_from_header),
):
try:
time_value = method(header)
except ObjectUnsupportedOperation:
continue
stat[st_time] = int(time_value)
stat[st_time_ns] = int(time_value * 1000000000)
if self.islink(path=path, header=header):
stat["st_mode"] += S_IFLNK
elif self.isdir(path=path, client_kwargs=client_kwargs, header=header):
stat["st_mode"] += S_IFDIR
else:
stat["st_mode"] += S_IFREG
sub = self._CHAR_FILTER.sub
for key, value in tuple(header.items()):
stat[sub("", key.lower().replace("-", "_"))] = value
stat_result = namedtuple("stat_result", tuple(stat))
stat_result.__name__ = "os.stat_result"
stat_result.__module__ = "airfs"
return stat_result(**stat)
def read_link(self, path=None, client_kwargs=None, header=None):
"""
Return the path linked by the symbolic link.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
str: Path.
"""
raise ObjectUnsupportedOperation("symlink")
def symlink(self, target, path=None, client_kwargs=None):
"""
Creates a symbolic link to target.
Args:
target (str): Target path or URL.
path (str): File path or URL.
client_kwargs (dict): Client arguments.
"""
raise ObjectUnsupportedOperation("symlink")
def resolve(self, path=None, client_kwargs=None, header=None, follow_symlinks=None):
"""
Follow symlinks and return input arguments updated for target.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
follow_symlinks (bool): If True, follow symlink if any.
If False, return input directly if symlink are supported by storage,
else raise NotImplementedError. If None, same as False but returns
input instead of raising exception.
Returns:
tuple: path, client_kwargs, headers of the target.
Raises:
ObjectNotImplementedError: follow_symlink is False on storage that do not
support symlink.
"""
if not self.SUPPORTS_SYMLINKS and follow_symlinks is False:
raise ObjectNotImplementedError(feature="follow_symlink=False")
elif not follow_symlinks or not self.SUPPORTS_SYMLINKS:
return path, client_kwargs, header
return self._resolve(path, client_kwargs, header)
def _resolve(self, path=None, client_kwargs=None, header=None):
"""
Resolve core function.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
tuple: path, client_kwargs, headers of the target.
"""
is_link = self.islink(path, client_kwargs, header)
if is_link:
target = self.read_link(path, client_kwargs, header)
if not self.is_abs(target):
rel_path = self.relpath(path)
target = path[: -len(rel_path)] + normpath(
join(dirname(rel_path), target)
)
return self._resolve(target)
if not is_link and self.exists(path, client_kwargs, header=header):
return path, client_kwargs, header
try:
parent, name = path.rstrip("/").rsplit("/", 1)
except ValueError:
return path, client_kwargs, header
parent_target = self._resolve(parent + "/")[0]
path = "/".join((parent_target, name)) + ("/" if path.endswith("/") else "")
return path, None, None
def shareable_url(self, path, expires_in):
"""
Get a shareable URL for the specified path.
Args:
path (str): Path or URL.
expires_in (int): Expiration in seconds.
Returns:
str: Shareable URL.
"""
return self._shareable_url(
self.get_client_kwargs(self.relpath(path)), expires_in
)
def _shareable_url(self, client_kwargs, expires_in):
"""
Get a shareable URL for the specified path.
Args:
client_kwargs (dict): Client arguments.
expires_in (int): Expiration in seconds.
Returns:
str: Shareable URL.
"""
raise ObjectNotImplementedError("shareable_url")
```
#### File: storage/github/_client.py
```python
from datetime import datetime, timedelta
from json import dumps
from time import sleep
from urllib.parse import urlparse, parse_qs
from dateutil.parser import parse
from requests import Session
from airfs.storage.http import _handle_http_errors
from airfs._core.exceptions import (
AirfsWarning,
AirfsException,
ObjectNotFoundError,
ObjectPermissionError,
)
from airfs._core.cache import get_cache, set_cache, CACHE_SHORT_EXPIRY, NoCacheException
GITHUB_API = "https://api.github.com"
_CACHE_SHORT_DELTA = timedelta(seconds=CACHE_SHORT_EXPIRY)
_CODES_CONVERSION = {
403: ObjectPermissionError,
404: ObjectNotFoundError,
# The API sometime returns this code when a commit hash is not found instead of
# returning 404
422: ObjectNotFoundError,
}
class GithubRateLimitException(AirfsException):
"""Exception if rate limit reached"""
class GithubRateLimitWarning(AirfsWarning):
"""Warning if rate limit reached and waiting"""
class Client:
"""
GitHub REST API client.
Args:
token (str): GitHub API authentication token.
wait_rate_limit (bool): If True, wait if API rate limit is reached, else raise
"airfs.storage.github.GithubRateLimitException" exception.
wait_warn (bool): If True and "wait_rate_limit" is True, warn using
"airfs.storage.github.GithubRateLimitWarning" when waiting for rate limit
reset for the first time.
wait_retry_delay (int or float): Delay in seconds between two API get attempt
when waiting for rate limit reset.
"""
_RATE_LIMIT_WARNED = False
__slots__ = (
"_request",
"session",
"_token",
"_headers",
"_wait_rate_limit",
"_wait_warn",
"_wait_retry_delay",
)
def __init__(
self, token=None, wait_rate_limit=True, wait_warn=True, wait_retry_delay=60
):
self._wait_rate_limit = wait_rate_limit
self._wait_warn = wait_warn
self._wait_retry_delay = wait_retry_delay
self._headers = None
self._token = token
self.session = Session()
self._request = self.session.request
def _api_headers(self, previous_headers=None):
"""
Return headers to use to make requests to GitHub API.
Args:
previous_headers (dict): Headers from a previous cached identical request.
Used to perform a conditional request to check if data was updated
without consume the rate limit.
Returns:
dict or None: API request headers.
"""
if self._headers is None:
auth_headers = {}
token = self._token
if token:
auth_headers["Authorization"] = f"token {token}"
self._headers = auth_headers
if previous_headers is not None:
headers = self._headers.copy()
for condition, key in (
("If-Modified-Since", "Last-Modified"),
("If-None-Match", "ETag"),
):
try:
headers[condition] = previous_headers[key]
except KeyError:
continue
return headers
return self._headers
def request(self, path, method="GET", **kwargs):
"""
Perform an HTTP request over the GitHub API and other GitHub domains.
Handle the case where the API rate-limit is reached.
Args:
path (str): GitHub API relative path or GitHub non API full URL.
method (str): HTTP method. Default to "GET".
kwargs: requests.request keyword arguments.
Returns:
requests.Response: Response.
"""
if path.startswith("https://"):
response = self._request(method, path, **kwargs)
_handle_http_errors(response, _CODES_CONVERSION)
return response
while True:
response = self._request(method, GITHUB_API + path, **kwargs)
if (
response.status_code == 403
and int(response.headers.get("X-RateLimit-Remaining", "-1")) == 0
):
self._handle_rate_limit()
continue
return response
def get(self, path, params=None, never_expire=False):
"""
Get result from the GitHub API. Also handle caching of result to speed up
futures requests and improve rate-limit consumption.
Args:
path (str): GitHub API path.
params (dict): Request parameters.
never_expire (bool): Indicate that the request result should never expire
and can be cached indefinitely.
Returns:
tuple: result dict, headers dict.
"""
cache_name = path
if params:
cache_name += dumps(params)
try:
result, headers = get_cache(cache_name)
except NoCacheException:
result = headers = None
else:
if never_expire:
return result, headers
dt_date = parse(headers["Date"])
if dt_date > datetime.now(dt_date.tzinfo) - _CACHE_SHORT_DELTA:
return result, headers
response = self.request(
path, params=params, headers=self._api_headers(previous_headers=headers)
)
if response.status_code == 304:
return result, headers
_handle_http_errors(response, _CODES_CONVERSION)
result = response.json()
headers = dict(response.headers)
set_cache(cache_name, [result, headers], long=True)
return result, headers
def get_paged(self, path, params=None):
"""
Get a multiple paged result from the GitHub API.
Args:
path (str): GitHub API path.
params (dict): Request parameters.
Returns:
generator of dict: results.
"""
if params:
params = params.copy()
else:
params = dict()
max_page = 0
page = 1
while page <= max_page or not max_page:
results, headers = self.get(path, params=params)
for result in results:
yield result
page += 1
params["page"] = page
if max_page == 0:
try:
links = headers["Link"]
except KeyError:
# If not present, there is only one page.
break
max_page = self._parse_link_header(links)
@staticmethod
def _parse_link_header(links):
"""
Get number of the last page from the "Link" header.
Args:
links (str): "Links" header value.
Returns:
int: Number of the last page.
"""
for link in links.split(","):
url, rel = link.split(";", 1)
if rel.strip() == 'rel="last"':
return int(parse_qs(urlparse(url.strip("<>")).query)["page"][0])
raise RuntimeError('Last page not found in "Link" header: ' + links)
def _handle_rate_limit(self):
"""
Wait until remaining rate limit is greater than 0, or raise exception.
"""
if not self._wait_rate_limit:
raise GithubRateLimitException(self._rate_limit_reached())
url = GITHUB_API + "/rate_limit"
headers = self._api_headers()
remaining = 0
while remaining == 0:
if self._wait_warn and not Client._RATE_LIMIT_WARNED:
from warnings import warn
warn(self._rate_limit_reached(True), GithubRateLimitWarning)
Client._RATE_LIMIT_WARNED |= True
sleep(self._wait_retry_delay)
resp = self._request("GET", url, headers=headers)
remaining = int((resp.json())["resources"]["core"]["remaining"])
def _rate_limit_reached(self, waiting=False):
"""
Rate limit message for exception or warning.
Args:
waiting (bool): True if waiting for reset.
Returns:
str: exception/Warning message
"""
msg = ["GitHub rate limit reached."]
if waiting:
msg.append("Waiting for limit reset...")
if "Authorization" not in self._api_headers():
msg.append("Authenticate to GitHub to increase the limit.")
return " ".join(msg)
```
#### File: storage/github/_model_archive.py
```python
from airfs.storage.github._model_base import GithubObject
from airfs.storage.github._model_git import Branch, Tag
from airfs.storage.github._model_reference import Reference
class Archive(GithubObject):
"""Git tree archive"""
KEY = "archive"
GET = "https://github.com/{owner}/{repo}/archive/{archive}"
HEAD_KEYS = {"Content-Type", "Content-Length"}
HEAD_FROM = {"pushed_at": Reference, "sha": Reference}
@classmethod
def list(cls, client, spec, first_level=False):
"""
List archives for all branches and tags.
Args:
client (airfs.storage.github._api.ApiV3): Client.
spec (dict): Item spec.
first_level (bool): It True, returns only first level objects.
Returns:
generator of tuple: object name str, object header dict, has content bool
"""
cls._raise_if_not_dir(not spec.get("archive"), spec, client)
for parent in (Tag, Branch):
response = client.get(parent.LIST.format(**spec))[0]
key = parent.LIST_KEY
parent_key = parent.KEY
for ref in response:
ref_spec = spec.copy()
ref_spec[parent_key] = ref_name = ref[key]
ref_head = parent.set_header(ref)
for ext in (".tar.gz", ".zip"):
name = ref_name + ext
yield name, cls(client, ref_spec, ref_head, name), False
@classmethod
def head_obj(cls, client, spec):
"""
Get archive headers.
Args:
client (airfs.storage.github._api.ApiV3): Client.
spec (dict): Item spec.
Returns:
dict: Object headers.
"""
url = cls.GET.format(**spec)
headers = ""
# Sometime, Content-Length is missing from response, so retry until success
while "Content-Length" not in headers:
headers = client.request(url, method="HEAD").headers
return cls.set_header(headers)
def _update_spec_parent_ref(self, parent_key):
"""
Update the spec with the parent reference.
Args:
parent_key (str): The parent key (parent_class.KEY).
"""
name = self._spec["archive"]
self._spec[parent_key] = name.rsplit(
".", 2 if name.lower().endswith(".tar.gz") else 1
)[0]
```
#### File: storage/github/_model_base.py
```python
from collections import ChainMap
from collections.abc import Mapping
from itertools import chain
from airfs._core.exceptions import (
ObjectIsADirectoryError,
ObjectNotASymlinkError,
ObjectNotADirectoryError,
ObjectNotFoundError,
)
class GithubObject(Mapping):
"""
Github Object base class.
Instances represent headers of an object this a specific spec. Instance are only
generated by the "head" class-method that act as factory.
Classes also allow navigating in the virtual file-system tree that represent the
GitHub repositories.
Args:
client (airfs.storage.github._api.ApiV3): Client.
spec (dict): Object spec.
headers (dict): Known header values. Missing values will be get lazily from
parents.
name (str): Object name, if not already in spec.
"""
#: Virtual file-system structure starting from this object class
#: Contains dicts representing virtual folders and other _GithubObject subclasses
#: that represent objects inside.
STRUCT = None
#: The specification "key" that represent this object
KEY = None
#: If specified, is a Git reference. True to use KEY value, str value to use a
#: specific hardcoded value.
REF = None
#: API path to get objects of this GitHub class
GET = None
#: API path to get objects headers of this GitHub class
HEAD = None
#: Head result keys to keep
HEAD_KEYS = set()
#: Head result keys to move to dict root.
#: dict key is key name, dict value is tuple of key path to follow
HEAD_EXTRA = ()
#: Keys to head from a parent class, key is key name, value is parent class
HEAD_FROM = {}
#: API path to list objects of this GitHub class
LIST = None
#: API key of objects names to list
LIST_KEY = "name"
#: Symlink like object pointing to the specified absolute path
SYMLINK = None
__slots__ = ("_client", "_spec", "_headers", "_header_updated")
def __init__(self, client, spec, headers=None, name=None):
self._client = client
if name is not None:
spec = spec.copy()
spec[self.KEY] = name
self._spec = spec
if headers is None:
self._headers = self.head_obj(self._client, self._spec)
self._header_updated = True
else:
self._headers = headers
self._header_updated = False
def __getitem__(self, key):
"""
Get a value from the object header.
Args:
key (str): Header key.
Returns:
object: Header value matching the key.
"""
try:
return self._headers[key]
except KeyError:
pass
try:
parent = self.HEAD_FROM[key]
except KeyError:
self._update_headers()
else:
self._update_headers_from_parent(parent)
return self._headers[key]
def __iter__(self):
"""
Iterate over object header keys.
Yields:
str: keys
"""
for key in chain(
self.HEAD_KEYS, (key for key, _ in self.HEAD_EXTRA), self.HEAD_FROM
):
yield key
def __len__(self):
"""
Header length.
Returns:
int: Length
"""
return len(self.HEAD_KEYS) + len(self.HEAD_EXTRA) + len(self.HEAD_FROM)
def __repr__(self):
"""
Headers representation. Values that are lazily evaluated and are not yet
evaluated are replaced by the "<Not evaluated yet>" string.
Returns:
str: repr value.
"""
content = self._headers.copy()
for key in self:
content.setdefault(key, "<Not evaluated yet>")
return repr(content)
__str__ = __repr__
def _update_spec_parent_ref(self, parent_key):
"""
Update the spec with the parent reference.
Args:
parent_key (str): The parent key (parent_class.KEY).
"""
self._update_headers()
self._spec[parent_key] = self._headers[parent_key]
def _update_headers(self):
"""
Ensure current object headers are updated.
"""
if not self._header_updated:
headers = self.head_obj(self._client, self._spec)
self._headers.update(headers)
self._header_updated = True
def _update_headers_from_parent(self, parent):
"""
Ensure current object headers are updated with parent headers.
Args:
parent (airfs.storage.github._model_base.GithubObject subclass instance):
Parent.
"""
if parent.KEY not in self._spec and parent.KEY is not None:
self._update_spec_parent_ref(parent.KEY)
parent_headers = parent.head(self._client, self._spec)
headers = self._headers
for key, obj_cls in self.HEAD_FROM.items():
if obj_cls == parent:
headers[key] = parent_headers[key]
@classmethod
def next_model(cls, client, spec):
"""
Get next model in the structure.
Args:
client (airfs.storage.github._api.ApiV3): Client.
spec (dict): Partial object spec.
Returns:
_Model subclass: model.
"""
cls = cls._get_cls(client, spec) # noqa
if cls.STRUCT is None:
cls._set_key(spec, "/".join(spec["keys"]))
spec["keys"].clear()
spec["object"] = cls
spec["content"] = cls
return cls
cls._update_key(spec)
try:
key = spec["keys"].popleft()
except IndexError:
spec["object"] = cls
spec["content"] = cls.STRUCT
return cls
model = cls.STRUCT
while isinstance(model, dict):
model = cls._get_dict_model(key, model, spec)
try:
key = spec["keys"].popleft()
except IndexError:
return cls._get_latest_model(model, spec)
spec["keys"].appendleft(key)
spec["parent"] = cls
return model.next_model(client, spec)
@classmethod
def _get_dict_model(cls, key, model, spec):
"""
Get submodel of a dict.
Args:
key (str): Model key.
model (dict): Current model.
spec (dict): Partial object spec.
Returns:
_Model subclass or dict: Next model.
"""
try:
return model[key]
except KeyError:
raise ObjectNotFoundError(path=spec["full_path"])
@classmethod
def _update_key(cls, spec):
"""
Update key in spec with current model.
Args:
spec (dict): Partial object spec.
"""
if cls.KEY is not None:
cls._set_key(spec, spec["keys"].popleft())
elif cls.REF is not None:
spec["ref"] = cls.REF
@classmethod
def _get_latest_model(cls, model, spec):
"""
Get latest model when no more keys to evaluate.
Args:
model (_Model subclass): Current model
spec (dict): Partial object spec.
Returns:
_Model subclass: Latest model.
"""
if hasattr(model, "KEY") and model.KEY is not None:
spec["content"] = model
model = cls
elif hasattr(model, "STRUCT"):
spec["content"] = model.STRUCT
else:
# Is a dict
spec["content"] = model
spec["object"] = model
return model
@classmethod
def _get_cls(cls, client, spec):
"""
Get object class.
Args:
client (airfs.storage.github._api.ApiV3): Client.
spec (dict): Partial object spec.
Returns:
_Model subclass: model.
"""
return cls
@classmethod
def _set_key(cls, spec, value):
"""
Set "KEY" value, and eventually "ref" value.
Args:
spec (dict): Partial object spec.
value (str): Key value
"""
spec[cls.KEY] = value
if cls.REF:
spec["ref"] = value
@classmethod
def list(cls, client, spec, first_level=False):
"""
List objects of this GitHub class matching the spec.
Args:
client (airfs.storage.github._api.ApiV3): Client.
spec (dict): Item spec.
first_level (bool): It True, returns only first level objects.
Yields:
tuple: object name str, object header dict, has content bool
"""
response = client.get_paged(cls.LIST.format(**spec))
key = cls.LIST_KEY
set_header = cls.set_header
is_dir = cls.STRUCT is not None
for headers in response:
name = headers[key]
yield name, cls(client, spec, set_header(headers), name), is_dir
@classmethod
def head_obj(cls, client, spec):
"""
Head the object of this GitHub class matching the spec.
Only return result directly from current object response as dict.
Args:
client (airfs.storage.github._api.ApiV3): Client.
spec (dict): Item spec.
Returns:
dict: Object headers.
"""
return cls.set_header(client.get(cls.HEAD.format(**spec))[0])
@classmethod
def head(cls, client, spec, headers=None):
"""
Head the object of this GitHub class matching the spec.
Returns a dict like object that can retrieve keys from this object response or
its parents.
Args:
client (airfs.storage.github._api.ApiV3): Client.
spec (dict): Item spec.
headers (dict): Known header values. Missing values will be get lazily from
parents.
Returns:
_GithubObject subclass instance: Object headers.
"""
return cls(client, spec, headers)
@classmethod
def get_url(cls, client, spec):
"""
Get the URL of the object of this GitHub class matching the spec.
Args:
client (airfs.storage.github._api.ApiV3): Client.
spec (dict): Item spec.
Returns:
str: Object URL.
"""
if cls.GET is None:
raise ObjectIsADirectoryError(spec["full_path"])
return cls.GET.format(**spec)
@classmethod
def set_header(cls, response):
"""
Set object header from raw API response.
Args:
response (dict): Raw API response.
Returns:
dict: Object header.
"""
head = {key: response[key] for key in (response.keys() & cls.HEAD_KEYS)}
for key_name, key_path in cls.HEAD_EXTRA:
value = response
try:
for key in key_path:
value = value[key]
except KeyError:
continue
head[key_name] = value
return head
@classmethod
def read_link(cls, client, spec):
"""
Return the path linked by the symbolic link.
Args:
client (airfs.storage.github._api.ApiV3): Client.
spec (dict): Item spec.
Returns:
str: Path.
"""
if cls.SYMLINK is None:
raise ObjectNotASymlinkError(path=spec["full_path"])
target = cls.SYMLINK.format(**ChainMap(spec, cls.head(client, spec)))
content = spec.get("content")
if isinstance(cls.STRUCT, dict) and not isinstance(content, dict):
for key, obj_cls in cls.STRUCT.items():
if content == obj_cls:
return f"{target}/{key}"
return target
@classmethod
def _raise_if_not_dir(cls, isdir, spec, client=None):
"""
Raise exception if not a directory.
Args:
isdir (bool): True is a directory.
spec (dict): Item spec.
client (airfs.storage.github._api.ApiV3): Client. If present, also checks
if exists if not a directory.
Raises:
airfs._core.exceptions.ObjectNotADirectoryError: Not a directory.
"""
if not isdir:
if client:
# Check if exists
cls.head_obj(client, spec)
raise ObjectNotADirectoryError(path=spec["full_path"])
```
#### File: storage/github/_model_release.py
```python
from airfs._core.exceptions import ObjectNotFoundError
from airfs.storage.github._model_archive import Archive
from airfs.storage.github._model_git import Commit, Tag, Tree
from airfs.storage.github._model_base import GithubObject
class ReleaseAsset(GithubObject):
"""GitHub release asset"""
KEY = "asset"
GET = "https://github.com/{owner}/{repo}/releases/download/{tag}/{asset}"
HEAD_KEYS = {"size", "download_count", "created_at", "updated_at", "content_type"}
HEAD_FROM = {"sha": Tag}
@classmethod
def list(cls, client, spec, first_level=False):
"""
List assets of a release.
Args:
client (airfs.storage.github._api.ApiV3): Client.
spec (dict): Item spec.
first_level (bool): It True, returns only first level objects.
Returns:
generator of tuple: object name str, object header dict, has content bool
"""
cls._raise_if_not_dir(not spec.get("asset"), spec, client)
for asset in cls._parent_release(client, spec)["assets"]:
name = asset["name"]
yield name, cls(client, spec, cls.set_header(asset), name), False
@classmethod
def head_obj(cls, client, spec):
"""
Get asset headers.
Args:
client (airfs.storage.github._api.ApiV3): Client.
spec (dict): Item spec.
Returns:
dict: Object headers.
"""
name = spec["asset"]
for asset in cls._parent_release(client, spec)["assets"]:
if asset["name"] == name:
return cls.set_header(asset)
raise ObjectNotFoundError(path=spec["full_path"])
@classmethod
def get_url(cls, client, spec):
"""
Get asset URL.
Args:
client (airfs.storage.github._api.ApiV3): Client.
spec (dict): Item spec.
Returns:
str: Object headers.
"""
if "tag" not in spec:
spec["tag"] = cls._parent_release(client, spec)["tag_name"]
return cls.GET.format(**spec)
@classmethod
def _parent_release(cls, client, spec):
"""
Get the parent release
Args:
client (airfs.storage.github._api.ApiV3): Client.
spec (dict): Item spec.
Returns:
dict: Release raw headers.
"""
return client.get(Release.HEAD.format(**spec))[0]
class ReleaseArchive(Archive):
"""GitHub release archive"""
HEAD_FROM = {"pushed_at": Tag, "sha": Tag} # type: ignore
@classmethod
def list(cls, client, spec, first_level=False):
"""
List archives for all releases. Uses generic unversioned archive name to avoid
have to know the "latest" tag to get its archive.
Args:
client (airfs.storage.github._api.ApiV3): Client.
spec (dict): Item spec.
first_level (bool): It True, returns only first level objects.
Returns:
generator of tuple: object name str, object header dict, has content bool
"""
cls._raise_if_not_dir(not spec.get("archive"), spec, client)
for ext in (".tar.gz", ".zip"):
name = f"source_code{ext}"
yield name, cls(client, spec, name=name), False
@classmethod
def head_obj(cls, client, spec):
"""
Get archive headers.
Args:
client (airfs.storage.github._api.ApiV3): Client.
spec (dict): Item spec.
Returns:
dict: Object headers.
"""
cls._set_archive_tag(client, spec)
return Archive.head_obj(client, spec)
def _update_spec_parent_ref(self, parent_key):
"""
Update the spec with the parent reference.
Args:
parent_key (str): The parent key (parent_class.KEY).
"""
self._set_archive_tag(self._client, self._spec)
@staticmethod
def _set_archive_tag(client, spec):
"""
Get the tag and archive exact name.
Args:
client (airfs.storage.github._api.ApiV3): Client.
spec (dict): Item spec.
"""
if "tag" not in spec:
spec["tag"] = LatestRelease.get_tag(client, spec)
if spec["archive"].startswith("source_code"):
spec["archive"] = spec["archive"].replace("source_code", spec["tag"])
class Release(GithubObject):
"""GitHub release"""
KEY = "tag"
CTIME = "created_at"
LIST = "/repos/{owner}/{repo}/releases"
LIST_KEY = "tag_name"
HEAD = "/repos/{owner}/{repo}/releases/tags/{tag}"
HEAD_KEYS = {"prerelease", "created_at", "published_at", "name"}
HEAD_FROM = {"sha": Tag, "tree_sha": Commit}
HEAD_EXTRA = (("tag", ("tag_name",)),)
STRUCT = {
"assets": ReleaseAsset,
"tree": Tree,
"archive": ReleaseArchive,
}
class LatestRelease(Release):
"""Latest GitHub release, with fallback to HEAD"""
KEY = None # type: ignore
HEAD = "/repos/{owner}/{repo}/releases/latest"
SYMLINK = "https://github.com/{owner}/{repo}/releases/tag/{tag}"
@classmethod
def get_tag(cls, client, spec):
"""
Get the tag matching the latest release.
Args:
client (airfs.storage.github._api.ApiV3): Client.
spec (dict): Item spec.
Returns:
str: Tag.
"""
return client.get(cls.HEAD.format(**spec))[0]["tag_name"]
class ReleaseDownload(GithubObject):
"""
GitHub release downloads only
To handle "https://github.com/:owner/:repo/releases/download/:tag/:asset_name"
"""
KEY = "tag"
CTIME = "created_at"
LIST = "/repos/{owner}/{repo}/releases"
LIST_KEY = "tag_name"
HEAD = "/repos/{owner}/{repo}/releases/tags/{tag}"
HEAD_KEYS = {"prerelease", "created_at", "published_at", "name"}
HEAD_FROM = {"sha": Tag, "tree_sha": Commit}
STRUCT = ReleaseAsset
```
#### File: rfs/tests/test_core_exceptions.py
```python
import pytest
def test_handle_os_exceptions():
"""Tests airfs._core.exceptions.handle_os_exceptions"""
from airfs._core.exceptions import (
handle_os_exceptions,
ObjectNotFoundError,
ObjectPermissionError,
ObjectNotADirectoryError,
ObjectExistsError,
)
with pytest.raises(FileNotFoundError):
with handle_os_exceptions():
raise ObjectNotFoundError
with pytest.raises(PermissionError):
with handle_os_exceptions():
raise ObjectPermissionError
with pytest.raises(FileExistsError):
with handle_os_exceptions():
raise ObjectExistsError
with pytest.raises(NotADirectoryError):
with handle_os_exceptions():
raise ObjectNotADirectoryError
with pytest.raises(FileExistsError):
with handle_os_exceptions():
raise FileExistsError()
def test_full_traceback():
"""Ensure full traceback mode is enabled in tests"""
from airfs._core.exceptions import _FULLTRACEBACK
assert _FULLTRACEBACK
```
#### File: rfs/tests/test_core_io_base.py
```python
import pytest
def test_object_base_io():
"""Tests airfs._core.io_base.ObjectIOBase"""
from airfs._core.io_base import ObjectIOBase
name = "name"
object_io = ObjectIOBase(name)
assert object_io.name == name
assert object_io.mode == "r"
assert object_io.readable()
assert object_io.seekable()
assert not object_io.writable()
assert name in str(object_io)
object_io = ObjectIOBase(name, mode="w")
assert object_io.mode == "w"
assert not object_io.readable()
assert object_io.seekable()
assert object_io.writable()
object_io = ObjectIOBase(name, mode="a")
assert object_io.mode == "a"
assert not object_io.readable()
assert object_io.seekable()
assert object_io.writable()
with pytest.raises(ValueError):
ObjectIOBase(name, mode="z")
def test_memoizedmethod():
"""Tests airfs._core.utilities.memoizedmethod"""
from airfs._core.io_base import memoizedmethod
class Dummy:
"""Dummy class"""
def __init__(self):
self._cache = {}
@memoizedmethod
def to_memoize(self, arg):
"""Fake method"""
return arg
dummy = Dummy()
assert not dummy._cache
value = "value"
assert dummy.to_memoize(value) == value
assert dummy._cache == {"to_memoize": value}
assert dummy.to_memoize(value) == value
```
#### File: rfs/tests/test_core_io_buffered.py
```python
import os
import time
def test_object_buffered_base_io():
"""Tests airfs._core.io_buffered.ObjectBufferedIOBase"""
from airfs._core.io_base_raw import ObjectRawIOBase
from airfs._core.io_base_buffered import ObjectBufferedIOBase
from airfs._core.io_random_write import (
ObjectRawIORandomWriteBase,
ObjectBufferedIORandomWriteBase,
)
# Mock sub class
name = "name"
size = 10000
flushed = bytearray()
raw_flushed = bytearray()
buffer_size = 100
flush_sleep = 0
def flush(data):
"""Dummy flush"""
flushed.extend(data)
time.sleep(flush_sleep)
class DummySystem:
"""Dummy system"""
client = None
def __init__(self, **_):
"""Do nothing"""
@staticmethod
def getsize(*_, **__):
"""Returns fake result"""
return size
@staticmethod
def head(*_, **__):
"""Returns fake result"""
return {}
@staticmethod
def relpath(path):
"""Returns fake result"""
return path
@staticmethod
def get_client_kwargs(*_, **__):
"""Returns fake result"""
return {}
class DummyRawIO(ObjectRawIOBase):
"""Dummy IO"""
_SYSTEM_CLASS = DummySystem
def _flush(self, buffer):
"""Do nothing"""
raw_flushed.extend(buffer)
def _read_range(self, start, end=0):
"""Read fake bytes"""
return ((size if end > size else end) - start) * b"0"
class DummyBufferedIO(ObjectBufferedIOBase):
"""Dummy buffered IO"""
_RAW_CLASS = DummyRawIO
DEFAULT_BUFFER_SIZE = buffer_size
MINIMUM_BUFFER_SIZE = 10
MAXIMUM_BUFFER_SIZE = 10000
def ensure_ready(self):
"""Ensure flush is complete"""
while any(1 for future in self._write_futures if not future.done()):
time.sleep(0.01)
def __init(self, *arg, **kwargs):
ObjectBufferedIOBase.__init__(self, *arg, **kwargs)
self.close_called = False
def _close_writable(self):
"""Checks called"""
self.close_called = True
self.ensure_ready()
def _flush(self):
"""Flush"""
self._write_futures.append(
self._workers.submit(flush, self._write_buffer[: self._buffer_seek])
)
class DummyRawIOPartFlush(DummyRawIO, ObjectRawIORandomWriteBase):
"""Dummy IO with part flush support"""
_size = 20
def _flush(self, buffer, start, *_):
"""Do nothing"""
if start == 50:
# Simulate buffer that need to wait previous one
time.sleep(0.1)
raw_flushed.extend(buffer)
class DummyBufferedIOPartFlush(ObjectBufferedIORandomWriteBase):
"""Dummy buffered IO with part flush support"""
_RAW_CLASS = DummyRawIOPartFlush
# Tests: Read until end
object_io = DummyBufferedIO(name)
assert object_io.read() == size * b"0"
# Tests: Read when already at end
assert object_io.read() == b""
# Tests: Read, max buffer
object_io = DummyBufferedIO(name)
assert object_io._max_buffers == size // buffer_size
object_io = DummyBufferedIO(name, max_buffers=5)
assert object_io.read(100) == 100 * b"0"
# Tests: Read by parts
assert sorted(object_io._read_queue) == list(
range(100, 100 + buffer_size * 5, buffer_size)
)
assert object_io._seek == 100
assert object_io.read(150) == 150 * b"0"
assert sorted(object_io._read_queue) == list(
range(200, 200 + buffer_size * 5, buffer_size)
)
assert object_io._seek == 250
assert object_io.read(50) == 50 * b"0"
assert sorted(object_io._read_queue) == list(
range(300, 300 + buffer_size * 5, buffer_size)
)
assert object_io._seek == 300
assert object_io.read() == (size - 300) * b"0"
assert not object_io._read_queue
# Tests: Read small parts
part = buffer_size // 10
object_io.seek(0)
for index in range(1, 15):
assert object_io.read(part) == part * b"0"
assert object_io._seek == part * index
# Tests: Read, change seek
object_io.seek(450)
assert sorted(object_io._read_queue) == list(
range(450, 450 + buffer_size * 5, buffer_size)
)
object_io.seek(700)
assert sorted(object_io._read_queue) == list(
range(700, 700 + buffer_size * 5, buffer_size)
)
# Tests: Read buffer size (No copy mode)
object_io.seek(0)
assert object_io.read(buffer_size) == buffer_size * b"0"
object_io.seek(size - buffer_size // 2)
assert object_io.read(buffer_size) == b"0" * (buffer_size // 2)
object_io._seek = size
# Tests: Read, EOF before theoretical EOF
def read_range(*_, **__):
"""Returns empty bytes"""
return b""
object_io = DummyBufferedIO(name, max_buffers=5)
object_io._read_range = read_range
assert object_io.read() == b""
# Tests write (with auto flush)
assert bytes(flushed) == b""
object_io = DummyBufferedIO(name, mode="w")
assert object_io.write(250 * b"0") == 250
object_io.ensure_ready()
assert object_io._buffer_seek == 50
assert bytes(object_io._write_buffer) == 50 * b"0" + 50 * b"\0"
assert object_io._get_buffer().tobytes() == 50 * b"0"
assert object_io._seek == 2
assert len(flushed) == 200
assert bytes(flushed) == 200 * b"0"
# Tests manual flush
object_io.flush()
object_io.ensure_ready()
assert object_io._seek == 3
assert bytes(flushed) == 250 * b"0"
assert object_io._buffer_seek == 0
# Tests write, only buffered should flush
flushed = bytearray()
raw_flushed = bytearray()
assert bytes(flushed) == b""
assert bytes(raw_flushed) == b""
with DummyBufferedIO(name, mode="w") as object_io:
assert object_io.write(150 * b"0") == 150
object_io.ensure_ready()
assert len(flushed) == 100
assert object_io._buffer_seek == 50
assert len(object_io._get_buffer()) == 50
object_io.raw._write_buffer = object_io._get_buffer()
assert len(object_io.raw._get_buffer()) == 50
assert len(flushed) == 150
assert not len(raw_flushed)
# Tests write small data flushed by raw
object_io = DummyBufferedIO(name, mode="w")
assert object_io.write(10 * b"0") == 10
object_io.close()
assert bytes(raw_flushed) == 10 * b"0"
# Test max buffer
object_io = DummyBufferedIO(name, mode="w", max_buffers=2)
flush_sleep = object_io._FLUSH_WAIT
assert object_io.write(1000 * b"0") == 1000
flush_sleep = 0
# Test default implementation with part flush support
raw_flushed[:] = b""
content = os.urandom(100)
with DummyBufferedIOPartFlush(name, mode="w", buffer_size=10) as object_io:
object_io.write(content)
assert raw_flushed == content
```
#### File: rfs/tests/test_core_io_raw.py
```python
def test_object_raw_base_io_http_range():
"""Tests airfs._core.io_raw.ObjectRawIOBase._http_range"""
from airfs._core.io_base_raw import ObjectRawIOBase
assert ObjectRawIOBase._http_range(10, 50) == "bytes=10-49"
assert ObjectRawIOBase._http_range(10) == "bytes=10-"
```
#### File: rfs/tests/test_storage_azure.py
```python
from datetime import datetime
from time import time
import pytest
pytest.importorskip("azure.storage.blob")
pytest.importorskip("azure.storage.file")
def test_handle_azure_exception():
"""Test airfs.storage.azure._handle_azure_exception"""
from airfs.storage.azure import _handle_azure_exception
from azure.common import AzureHttpError # type: ignore
from airfs._core.exceptions import ObjectNotFoundError, ObjectPermissionError
# Any error
with pytest.raises(AzureHttpError):
with _handle_azure_exception():
raise AzureHttpError(message="", status_code=400)
# 404 error
with pytest.raises(ObjectNotFoundError):
with _handle_azure_exception():
raise AzureHttpError(message="", status_code=404)
# 403 error
with pytest.raises(ObjectPermissionError):
with _handle_azure_exception():
raise AzureHttpError(message="", status_code=403)
def test_mount_redirect():
"""Test airfs.storage.azure.MOUNT_REDIRECT"""
from collections import OrderedDict
import airfs._core.storage_manager as manager
from airfs import MountException
# Mocks mounted
manager_mounted = manager.MOUNTED
manager.MOUNTED = OrderedDict()
account_name = "account_name"
endpoint_suffix = "endpoint_suffix"
# Tests
try:
# Auto mount of all Azure services
result = manager.mount(
storage="azure",
storage_parameters=dict(
account_name=account_name, endpoint_suffix=endpoint_suffix
),
)
assert "azure_blob" in result
assert "azure_file" in result
# Incompatible extra root argument
with pytest.raises(MountException):
manager.mount(
storage="azure",
extra_root="azure://",
storage_parameters=dict(
account_name=account_name, endpoint_suffix=endpoint_suffix
),
)
# Mandatory arguments
manager.MOUNTED = OrderedDict()
with pytest.raises(ValueError):
manager.mount(storage="azure_blob")
# Restore Mounted
finally:
manager.MOUNTED = manager_mounted
def test_update_listing_client_kwargs():
"""
Test airfs.storage.azure._AzureBaseSystem._update_listing_client_kwargs
"""
from airfs.storage.azure import _AzureBaseSystem
params = dict(arg=1)
assert _AzureBaseSystem._update_listing_client_kwargs(params, 10) == dict(
num_results=10, arg=1
)
assert _AzureBaseSystem._update_listing_client_kwargs(params, 0) == dict(arg=1)
def test_model_to_dict():
"""Test airfs.storage.azure._AzureBaseSystem._model_to_dict"""
from airfs.storage.azure import _AzureBaseSystem
from azure.storage.file import models # type: ignore
last_modified = datetime.now()
props = models.FileProperties()
props.etag = "etag"
props.last_modified = last_modified
file = models.File(props=props, metadata=dict(metadata1=0))
assert _AzureBaseSystem._model_to_dict(file) == dict(
etag="etag", last_modified=last_modified, metadata=dict(metadata1=0)
)
def test_get_time():
"""Test airfs.storage.azure._AzureBaseSystem._get_time"""
from airfs.storage.azure import _AzureBaseSystem
from airfs._core.exceptions import ObjectUnsupportedOperation
m_time = time()
last_modified = datetime.fromtimestamp(m_time)
assert _AzureBaseSystem._get_time(
{"last_modified": last_modified}, ("last_modified",), "gettime"
) == pytest.approx(m_time, 1)
with pytest.raises(ObjectUnsupportedOperation):
_AzureBaseSystem._get_time({}, ("last_modified",), "gettime")
def get_storage_mock():
"""
Return storage mock configured for Azure.
Returns:
tests.storage_mock.ObjectStorageMock: Mocked storage
"""
from azure.common import AzureHttpError
from tests.storage_mock import ObjectStorageMock
def raise_404():
"""Raise 404 error"""
raise AzureHttpError(message="", status_code=404)
def raise_416():
"""Raise 416 error"""
raise AzureHttpError(message="", status_code=416)
def raise_500():
"""Raise 500 error"""
raise AzureHttpError(message="", status_code=500)
return ObjectStorageMock(
raise_404, raise_416, raise_500, format_date=datetime.fromtimestamp
)
```
#### File: rfs/tests/test_storage_github.py
```python
import json
import pickle
from os.path import realpath, join
import requests
import pytest
UNSUPPORTED_OPERATIONS = (
"copy",
"mkdir",
"remove",
"write",
"shareable_url",
"list_locator",
)
#: Set to True and run tests to update mock test responses with real test responses
UPDATE_MOCK = False
#: Directory where are saved cached responses from GitHub API to use with mock
MOCK_DIR = realpath(join(__file__, "../resources/github_mock_responses"))
class MockResponse:
"""Mocked request.Response"""
def __init__(self, url, headers, status_code, content, reason):
self.headers = headers
self.status_code = status_code
self.content = content
self.url = url
self.reason = reason
def json(self):
"""Mocked Json result"""
return json.loads(self.content)
@property
def text(self):
"""Mocked Text result"""
return self.content.decode()
def raise_for_status(self):
"""Mocked exception"""
if self.status_code >= 400:
raise requests.HTTPError(
f"{self.status_code} Error: {self.reason} for: {self.url}",
response=self,
)
def test_mocked_storage():
"""Tests airfs.github with a mock"""
pytest.skip(
"Unable to test using the generic test scenario due to "
"fixed virtual filesystem tree."
)
def test_github_storage(tmpdir):
"""Tests airfs.github specificities"""
from airfs._core.storage_manager import _DEFAULTS
try:
assert _DEFAULTS["github"]["storage_parameters"]["token"]
except (KeyError, AssertionError):
pytest.skip("GitHub test with real API require a configured API token.")
if UPDATE_MOCK:
# Save all requests response to use them with mock
from os import remove, listdir
from airfs._core import cache
from airfs._core.storage_manager import get_instance
for file in listdir(MOCK_DIR):
remove(join(MOCK_DIR, file))
system = get_instance("https://github.com")
request = system.client._request
def request_save(method, url, *args, params=None, **kwargs):
"""Performs requests and save result"""
resp = request(method, url, *args, params=params, **kwargs)
resp_dict = dict(
url=resp.url,
headers=resp.headers,
status_code=resp.status_code,
content=resp.content,
reason=resp.reason,
)
with open(
join(MOCK_DIR, cache._hash_name(url + json.dumps(params or dict()))),
"wb",
) as resp_cache:
pickle.dump(resp_dict, resp_cache)
return MockResponse(**resp_dict)
cache_dir = cache.CACHE_DIR
cache.CACHE_DIR = str(tmpdir.ensure_dir("cache"))
system.client._request = request_save
system.client.session.request = request_save
try:
github_storage_scenario()
finally:
if UPDATE_MOCK:
system.client.session.request = request
system.client._request = request
cache.CACHE_DIR = cache_dir
def test_github_mocked_storage(tmpdir):
"""Tests airfs.github specificities with a mock"""
if UPDATE_MOCK:
pytest.skip("Mock is updating...")
from collections import OrderedDict
import airfs._core.storage_manager as storage_manager
from airfs._core import cache
cache_dir = cache.CACHE_DIR
cache.CACHE_DIR = str(tmpdir.ensure_dir("cache"))
mounted = storage_manager.MOUNTED
storage_manager.MOUNTED = OrderedDict()
def request_load(_, url, *__, params=None, **___):
"""Loads request result"""
try:
with open(
join(MOCK_DIR, cache._hash_name(url + json.dumps(params or dict()))),
"rb",
) as resp_cache:
return MockResponse(**pickle.load(resp_cache))
except FileNotFoundError:
pytest.fail("Please, update mock responses (see UPDATE_MOCK)")
try:
# Loads requests responses from previously cached responses
storage = storage_manager.mount(storage="github", name="github_test")
client = storage["github"]["system_cached"].client
client._request = request_load
client.session.request = request_load
# Tests
github_storage_scenario()
finally:
storage_manager.MOUNTED = mounted
cache.CACHE_DIR = cache_dir
def github_storage_scenario():
"""
Test scenario. Called from both mocked and non-mocked tests.
"""
exists_scenario()
listdir_scenario()
stat_scenario()
symlink_scenario()
get_scenario()
def listdir_scenario():
"""
Tests listing
"""
from io import UnsupportedOperation
import airfs
# Users
with pytest.raises(UnsupportedOperation):
airfs.listdir("https://github.com/")
# Repos
assert "airfs" in airfs.listdir("https://github.com/jgoutin"), "List repos"
assert sorted(airfs.listdir("https://github.com/jgoutin/airfs")) == [
"HEAD",
"archive",
"blob",
"branches",
"commits",
"refs",
"releases",
"tags",
"tree",
], "List repo content"
with pytest.raises(FileNotFoundError):
airfs.listdir("https://github.com/jgoutin/not_exists")
with pytest.raises(FileNotFoundError):
airfs.listdir("https://github.com/jgoutin/airfs/not_exists")
assert sorted(airfs.listdir("https://github.com/jgoutin/airfs/refs")) == [
"heads",
"tags",
], "List refs"
with pytest.raises(FileNotFoundError):
airfs.listdir("https://github.com/jgoutin/airfs/refs/not_exists")
# HEAD
assert "LICENSE" in airfs.listdir(
"https://github.com/jgoutin/airfs/HEAD"
), "List HEAD"
with pytest.raises(NotADirectoryError):
airfs.listdir("https://github.com/jgoutin/airfs/HEAD/LICENSE")
with pytest.raises(FileNotFoundError):
airfs.listdir("https://github.com/jgoutin/airfs/HEAD/not_exists")
assert "_core" in airfs.listdir(
"https://github.com/jgoutin/airfs/HEAD/airfs"
), "List HEAD subdirectory"
# Branches
assert "master" in airfs.listdir(
"https://github.com/jgoutin/airfs/branches"
), "List branches"
assert "master" in airfs.listdir(
"https://github.com/jgoutin/airfs/refs/heads"
), "List branches in refs"
assert "LICENSE" in airfs.listdir(
"https://github.com/jgoutin/airfs/branches/master"
), "List branch content"
with pytest.raises(FileNotFoundError):
airfs.listdir("https://github.com/jgoutin/airfs/branches/not_exists")
# Commits
commit_id = airfs.listdir("https://github.com/jgoutin/airfs/commits")[0]
assert len(commit_id) == 40, "List commits"
assert "LICENSE" in airfs.listdir(
f"https://github.com/jgoutin/airfs/commits/{commit_id}"
), "List commit content"
with pytest.raises(FileNotFoundError):
airfs.listdir("https://github.com/jgoutin/airfs/commits/not_exists")
# Tags
assert "1.4.0" in airfs.listdir(
"https://github.com/jgoutin/airfs/tags"
), "List tags"
assert "1.4.0" in airfs.listdir(
"https://github.com/jgoutin/airfs/refs/tags"
), "List tags in refs"
assert "LICENSE" in airfs.listdir(
"https://github.com/jgoutin/airfs/tags/1.4.0"
), "List tag content"
with pytest.raises(FileNotFoundError):
airfs.listdir("https://github.com/jgoutin/airfs/tags/not_exists")
# Archives
assert "1.4.0.tar.gz" in airfs.listdir(
"https://github.com/jgoutin/airfs/archive"
), "List tar.gz archives"
assert "1.4.0.zip" in airfs.listdir(
"https://github.com/jgoutin/airfs/archive"
), "List zip archives"
with pytest.raises(NotADirectoryError):
airfs.listdir("https://github.com/jgoutin/airfs/archive/1.4.0.tar.gz")
with pytest.raises(FileNotFoundError):
airfs.listdir("https://github.com/jgoutin/airfs/archive/1.4.0.tar.xz")
# Releases
assert "latest" in airfs.listdir(
"https://github.com/jgoutin/airfs/releases"
), "List releases"
assert "1.4.0" in airfs.listdir(
"https://github.com/jgoutin/airfs/releases/tag"
), "List release tags"
assert sorted(
airfs.listdir("https://github.com/jgoutin/airfs/releases/tag/1.4.0")
) == ["archive", "assets", "tree"], "List release content"
with pytest.raises(FileNotFoundError):
airfs.listdir("https://github.com/jgoutin/airfs/releases/tag/not_exists")
assert sorted(
airfs.listdir("https://github.com/jgoutin/airfs/releases/latest")
) == ["archive", "assets", "tree"], "List latest release content"
assert sorted(
airfs.listdir("https://github.com/jgoutin/airfs/releases/tag/1.4.0/archive")
) == ["source_code.tar.gz", "source_code.zip"], "List release archive"
with pytest.raises(FileNotFoundError):
airfs.listdir(
"https://github.com/jgoutin/airfs/releases/tag/not_exists/archive"
)
with pytest.raises(NotADirectoryError):
airfs.listdir(
"https://github.com/jgoutin/airfs/releases/tag/1.4.0/archive/"
"source_code.tar.gz"
)
assert sorted(
airfs.listdir("https://github.com/jgoutin/airfs/releases/latest/archive")
) == ["source_code.tar.gz", "source_code.zip"], "List latest release archive"
with pytest.raises(NotADirectoryError):
airfs.listdir(
"https://github.com/jgoutin/airfs/releases/latest/archive/"
"source_code.tar.gz"
)
assert sorted(
airfs.listdir("https://github.com/jgoutin/airfs/releases/tag/1.4.0/assets")
) == ["airfs-1.4.0-py3-none-any.whl"], "List release assets"
with pytest.raises(NotADirectoryError):
airfs.listdir(
"https://github.com/jgoutin/airfs/releases/tag/1.4.0/assets/"
"airfs-1.4.0-py3-none-any.whl"
)
with pytest.raises(FileNotFoundError):
airfs.listdir(
"https://github.com/jgoutin/airfs/releases/tag/1.4.0/assets/not_exists"
)
def symlink_scenario():
"""
Tests symbolic links
"""
from io import UnsupportedOperation
import airfs
# Git tree
assert airfs.islink("https://github.com/jgoutin/airfs/HEAD/tests/resources/symlink")
assert airfs.exists("https://github.com/jgoutin/airfs/HEAD/tests/resources/symlink")
assert airfs.lexists(
"https://github.com/jgoutin/airfs/HEAD/tests/resources/symlink"
)
assert not airfs.isdir(
"https://github.com/jgoutin/airfs/HEAD/tests/resources/symlink"
)
assert airfs.isfile("https://github.com/jgoutin/airfs/HEAD/tests/resources/symlink")
assert not airfs.islink("https://github.com/jgoutin/airfs/HEAD/LICENSE")
assert not airfs.islink("https://github.com/jgoutin/airfs/HEAD/tests")
assert (
airfs.readlink("https://github.com/jgoutin/airfs/HEAD/tests/resources/symlink")
== "../../airfs/_core/exceptions.py"
)
assert (
airfs.realpath("https://github.com/jgoutin/airfs/HEAD/tests/resources/symlink")
== "https://github.com/jgoutin/airfs/HEAD/airfs/_core/exceptions.py"
)
assert (
airfs.realpath(
"https://github.com/jgoutin/airfs/HEAD/tests/resources/symlink_to_symlink"
)
== "https://github.com/jgoutin/airfs/HEAD/airfs/_core/exceptions.py"
)
with pytest.raises(UnsupportedOperation):
airfs.symlink(
"https://github.com/jgoutin/airfs/HEAD/airfs/_core/exceptions.py",
"https://github.com/jgoutin/airfs/HEAD/tests/resources/symlink_2",
)
with pytest.raises(OSError):
airfs.readlink("https://github.com/jgoutin/airfs/HEAD/LICENSE")
# HEAD
assert airfs.islink("https://github.com/jgoutin/airfs/HEAD")
assert (
airfs.readlink("https://github.com/jgoutin/airfs/HEAD")
== "https://github.com/jgoutin/airfs/branches/master"
)
assert airfs.realpath("https://github.com/jgoutin/airfs/HEAD").startswith(
"https://github.com/jgoutin/airfs/commits/"
)
# Branches
assert airfs.readlink(
"https://github.com/jgoutin/airfs/branches/master"
).startswith("https://github.com/jgoutin/airfs/commits/")
assert airfs.realpath(
"https://github.com/jgoutin/airfs/branches/master"
).startswith("https://github.com/jgoutin/airfs/commits/")
assert airfs.readlink(
"https://github.com/jgoutin/airfs/refs/heads/master"
).startswith("https://github.com/jgoutin/airfs/commits/")
assert airfs.readlink("https://github.com/jgoutin/airfs/blob/master").startswith(
"https://github.com/jgoutin/airfs/commits/"
)
assert airfs.readlink("https://github.com/jgoutin/airfs/tree/master").startswith(
"https://github.com/jgoutin/airfs/commits/"
)
# Tags
assert airfs.readlink("https://github.com/jgoutin/airfs/tags/1.4.0").startswith(
"https://github.com/jgoutin/airfs/commits/"
)
assert airfs.realpath("https://github.com/jgoutin/airfs/tags/1.4.0").startswith(
"https://github.com/jgoutin/airfs/commits/"
)
assert airfs.readlink(
"https://github.com/jgoutin/airfs/refs/tags/1.4.0"
).startswith("https://github.com/jgoutin/airfs/commits/")
assert airfs.readlink("https://github.com/jgoutin/airfs/blob/1.4.0").startswith(
"https://github.com/jgoutin/airfs/commits/"
)
assert airfs.readlink("https://github.com/jgoutin/airfs/tree/1.4.0").startswith(
"https://github.com/jgoutin/airfs/commits/"
)
# Releases
assert airfs.readlink(
"https://github.com/jgoutin/airfs/releases/latest"
).startswith("https://github.com/jgoutin/airfs/releases/tag/")
def exists_scenario():
"""
Tests exists, isdir, isfile
"""
import airfs
# Root
assert airfs.exists("https://github.com")
assert airfs.isdir("https://github.com")
assert not airfs.isfile("https://github.com")
# User
assert airfs.exists("https://github.com/jgoutin")
assert airfs.isdir("https://github.com/jgoutin")
assert not airfs.isfile("https://github.com/jgoutin")
# Repos
assert airfs.exists("https://github.com/jgoutin/airfs")
assert airfs.isdir("https://github.com/jgoutin/airfs")
assert not airfs.isfile("https://github.com/jgoutin/airfs")
assert not airfs.exists("https://github.com/jgoutin/not_exists")
assert not airfs.isdir("https://github.com/jgoutin/not_exists")
assert not airfs.isfile("https://github.com/jgoutin/not_exists")
assert not airfs.exists("https://github.com/jgoutin/airfs/refs/not_exists")
assert not airfs.isdir("https://github.com/jgoutin/airfs/refs/not_exists")
assert not airfs.isfile("https://github.com/jgoutin/airfs/refs/not_exists")
assert airfs.exists("https://raw.githubusercontent.com/jgoutin/airfs")
assert airfs.isdir("https://raw.githubusercontent.com/jgoutin/airfs")
assert not airfs.isfile("https://raw.githubusercontent.com/jgoutin/airfs")
# HEAD
assert airfs.exists("https://github.com/jgoutin/airfs/HEAD")
assert airfs.isdir("https://github.com/jgoutin/airfs/HEAD")
assert not airfs.isfile("https://github.com/jgoutin/airfs/HEAD")
assert airfs.exists("https://github.com/jgoutin/airfs/tree/HEAD")
assert airfs.isdir("https://github.com/jgoutin/airfs/tree/HEAD")
assert not airfs.isfile("https://github.com/jgoutin/tree/HEAD")
assert airfs.exists("https://github.com/jgoutin/airfs/blob/HEAD")
assert airfs.isdir("https://github.com/jgoutin/airfs/blob/HEAD")
assert not airfs.isfile("https://github.com/jgoutin/blob/HEAD")
assert airfs.exists("https://raw.githubusercontent.com/jgoutin/airfs/HEAD")
assert airfs.isdir("https://raw.githubusercontent.com/jgoutin/airfs/HEAD")
assert not airfs.isfile("https://raw.githubusercontent.com/jgoutin/airfs/HEAD")
# Branches
assert airfs.exists("https://github.com/jgoutin/airfs/branches")
assert airfs.isdir("https://github.com/jgoutin/airfs/branches")
assert not airfs.isfile("https://github.com/jgoutin/airfs/branches")
assert airfs.exists("https://github.com/jgoutin/airfs/branches/master")
assert airfs.isdir("https://github.com/jgoutin/airfs/branches/master")
assert not airfs.isfile("https://github.com/jgoutin/airfs/branches/master")
assert airfs.exists("https://github.com/jgoutin/airfs/refs/heads/master")
assert airfs.isdir("https://github.com/jgoutin/airfs/refs/heads/master")
assert not airfs.isfile("https://github.com/jgoutin/airfs/refs/heads/master")
assert airfs.exists("https://github.com/jgoutin/airfs/tree/master")
assert airfs.isdir("https://github.com/jgoutin/airfs/tree/master")
assert not airfs.isfile("https://github.com/jgoutin/tree/master")
assert airfs.exists("https://github.com/jgoutin/airfs/blob/master")
assert airfs.isdir("https://github.com/jgoutin/airfs/blob/master")
assert not airfs.isfile("https://github.com/jgoutin/blob/master")
assert airfs.exists("https://raw.githubusercontent.com/jgoutin/airfs/master")
assert airfs.isdir("https://raw.githubusercontent.com/jgoutin/airfs/master")
assert not airfs.isfile("https://raw.githubusercontent.com/jgoutin/airfs/master")
assert not airfs.exists("https://github.com/jgoutin/airfs/branches/not_exists")
assert not airfs.isdir("https://github.com/jgoutin/airfs/branches/not_exists")
assert not airfs.isfile("https://github.com/jgoutin/airfs/branches/not_exists")
# Tags
assert airfs.exists("https://github.com/jgoutin/airfs/tags")
assert airfs.isdir("https://github.com/jgoutin/airfs/tags")
assert not airfs.isfile("https://github.com/jgoutin/airfs/tags")
assert airfs.exists("https://github.com/jgoutin/airfs/tags/1.4.0")
assert airfs.isdir("https://github.com/jgoutin/airfs/tags/1.4.0")
assert not airfs.isfile("https://github.com/jgoutin/airfs/tags/1.4.0")
assert airfs.exists("https://github.com/jgoutin/airfs/refs/tags/1.4.0")
assert airfs.isdir("https://github.com/jgoutin/airfs/refs/tags/1.4.0")
assert not airfs.isfile("https://github.com/jgoutin/airfs/refs/tags/1.4.0")
assert airfs.exists("https://github.com/jgoutin/airfs/tree/1.4.0")
assert airfs.isdir("https://github.com/jgoutin/airfs/tree/1.4.0")
assert not airfs.isfile("https://github.com/jgoutin/tree/1.4.0")
assert airfs.exists("https://github.com/jgoutin/airfs/blob/1.4.0")
assert airfs.isdir("https://github.com/jgoutin/airfs/blob/1.4.0")
assert not airfs.isfile("https://github.com/jgoutin/blob/1.4.0")
assert airfs.exists("https://raw.githubusercontent.com/jgoutin/airfs/1.4.0")
assert airfs.isdir("https://raw.githubusercontent.com/jgoutin/airfs/1.4.0")
assert not airfs.isfile("https://raw.githubusercontent.com/jgoutin/airfs/1.4.0")
assert not airfs.exists("https://github.com/jgoutin/airfs/tags/not_exists")
assert not airfs.isdir("https://github.com/jgoutin/airfs/tags/not_exists")
assert not airfs.isfile("https://github.com/jgoutin/airfs/tags/not_exists")
# Commits
assert airfs.exists("https://github.com/jgoutin/airfs/commits")
assert airfs.isdir("https://github.com/jgoutin/airfs/commits")
assert not airfs.isfile("https://github.com/jgoutin/airfs/commits")
commit_id = airfs.listdir("https://github.com/jgoutin/airfs/commits")[0]
assert airfs.exists(f"https://github.com/jgoutin/airfs/commits/{commit_id}")
assert airfs.isdir(f"https://github.com/jgoutin/airfs/commits/{commit_id}")
assert not airfs.isfile(f"https://github.com/jgoutin/airfs/commits/{commit_id}")
assert airfs.exists(f"https://github.com/jgoutin/airfs/tree/{commit_id}")
assert airfs.isdir(f"https://github.com/jgoutin/airfs/tree/{commit_id}")
assert not airfs.isfile(f"https://github.com/jgoutin/tree/{commit_id}")
assert airfs.exists(f"https://github.com/jgoutin/airfs/blob/{commit_id}")
assert airfs.isdir(f"https://github.com/jgoutin/airfs/blob/{commit_id}")
assert not airfs.isfile(f"https://github.com/jgoutin/blob/{commit_id}")
assert airfs.exists(f"https://raw.githubusercontent.com/jgoutin/airfs/{commit_id}")
assert airfs.isdir(f"https://raw.githubusercontent.com/jgoutin/airfs/{commit_id}")
assert not airfs.isfile(
f"https://raw.githubusercontent.com/jgoutin/airfs/{commit_id}"
)
assert not airfs.exists("https://github.com/jgoutin/airfs/commits/not_exists")
assert not airfs.isdir("https://github.com/jgoutin/airfs/commits/not_exists")
assert not airfs.isfile("https://github.com/jgoutin/airfs/commits/not_exists")
# Git Tree
assert airfs.exists("https://github.com/jgoutin/airfs/HEAD/tests")
assert airfs.isdir("https://github.com/jgoutin/airfs/HEAD/tests")
assert not airfs.isfile("https://github.com/jgoutin/airfs/HEAD/tests")
assert airfs.exists("https://github.com/jgoutin/airfs/HEAD/LICENSE")
assert not airfs.isdir("https://github.com/jgoutin/airfs/HEAD/LICENSE")
assert airfs.isfile("https://github.com/jgoutin/airfs/HEAD/LICENSE")
assert airfs.exists("https://raw.githubusercontent.com/jgoutin/airfs/HEAD/LICENSE")
assert not airfs.isdir(
"https://raw.githubusercontent.com/jgoutin/airfs/HEAD/LICENSE"
)
assert airfs.isfile("https://raw.githubusercontent.com/jgoutin/airfs/HEAD/LICENSE")
assert not airfs.exists("https://github.com/jgoutin/airfs/HEAD/not_exists")
assert not airfs.isdir("https://github.com/jgoutin/airfs/HEAD/not_exists")
assert not airfs.isfile("https://github.com/jgoutin/airfs/HEAD/not_exists")
# Archives
assert airfs.exists("https://github.com/jgoutin/airfs/archive")
assert airfs.isdir("https://github.com/jgoutin/airfs/archive")
assert not airfs.isfile("https://github.com/jgoutin/airfs/archive")
assert airfs.exists("https://github.com/jgoutin/airfs/archive/1.4.0.tar.gz")
assert not airfs.isdir("https://github.com/jgoutin/airfs/archive/1.4.0.tar.gz")
assert airfs.isfile("https://github.com/jgoutin/airfs/archive/1.4.0.tar.gz")
assert not airfs.exists("https://github.com/jgoutin/airfs/archive/not_exists")
assert not airfs.isdir("https://github.com/jgoutin/airfs/archive/not_exists")
assert not airfs.isfile("https://github.com/jgoutin/airfs/archive/not_exists")
# Releases
assert airfs.exists("https://github.com/jgoutin/airfs/releases")
assert airfs.isdir("https://github.com/jgoutin/airfs/releases")
assert not airfs.isfile("https://github.com/jgoutin/airfs/releases")
assert airfs.exists("https://github.com/jgoutin/airfs/releases/tag")
assert airfs.isdir("https://github.com/jgoutin/airfs/releases/tag")
assert not airfs.isfile("https://github.com/jgoutin/airfs/releases/tag")
assert airfs.exists("https://github.com/jgoutin/airfs/releases/tag/1.4.0")
assert airfs.isdir("https://github.com/jgoutin/airfs/releases/tag/1.4.0")
assert not airfs.isfile("https://github.com/jgoutin/airfs/releases/tag/1.4.0")
assert airfs.exists("https://github.com/jgoutin/airfs/releases/latest")
assert airfs.isdir("https://github.com/jgoutin/airfs/releases/latest")
assert not airfs.isfile("https://github.com/jgoutin/airfs/releases/latest")
assert airfs.exists("https://github.com/jgoutin/airfs/releases/latest/assets")
assert airfs.isdir("https://github.com/jgoutin/airfs/releases/latest/assets")
assert not airfs.isfile("https://github.com/jgoutin/airfs/releases/latest/assets")
assert airfs.exists("https://github.com/jgoutin/airfs/releases/tag/1.4.0/assets")
assert airfs.isdir("https://github.com/jgoutin/airfs/releases/tag/1.4.0/assets")
assert not airfs.isfile(
"https://github.com/jgoutin/airfs/releases/tag/1.4.0/assets"
)
assert airfs.exists(
"https://github.com/jgoutin/airfs/releases/tag/1.4.0/assets/"
"airfs-1.4.0-py3-none-any.whl"
)
assert not airfs.isdir(
"https://github.com/jgoutin/airfs/releases/tag/1.4.0/assets/"
"airfs-1.4.0-py3-none-any.whl"
)
assert airfs.isfile(
"https://github.com/jgoutin/airfs/releases/tag/1.4.0/assets/"
"airfs-1.4.0-py3-none-any.whl"
)
assert airfs.exists("https://github.com/jgoutin/airfs/releases/latest/archive")
assert airfs.isdir("https://github.com/jgoutin/airfs/releases/latest/archive")
assert not airfs.isfile("https://github.com/jgoutin/airfs/releases/latest/archive")
assert airfs.exists("https://github.com/jgoutin/airfs/releases/tag/1.4.0/archive")
assert airfs.isdir("https://github.com/jgoutin/airfs/releases/tag/1.4.0/archive")
assert not airfs.isfile(
"https://github.com/jgoutin/airfs/releases/tag/1.4.0/archive"
)
assert airfs.exists(
"https://github.com/jgoutin/airfs/releases/latest/archive/source_code.tar.gz"
)
assert not airfs.isdir(
"https://github.com/jgoutin/airfs/releases/latest/archive/source_code.tar.gz"
)
assert airfs.isfile(
"https://github.com/jgoutin/airfs/releases/latest/archive/source_code.tar.gz"
)
assert airfs.exists(
"https://github.com/jgoutin/airfs/releases/tag/1.4.0/archive/source_code.tar.gz"
)
assert not airfs.isdir(
"https://github.com/jgoutin/airfs/releases/tag/1.4.0/archive/source_code.tar.gz"
)
assert airfs.isfile(
"https://github.com/jgoutin/airfs/releases/tag/1.4.0/archive/source_code.tar.gz"
)
def stat_scenario():
"""
Test stat.
"""
import airfs
from stat import S_IFDIR, S_IFREG, S_IFLNK
file = S_IFREG + 0o644
file_exec = S_IFREG + 0o755
directory = S_IFDIR + 0o644
link = S_IFLNK + 0o644
# User
stat = airfs.stat("https://github.com/jgoutin")
assert stat.st_mode == directory
assert stat.st_mtime > 0
assert stat.st_ctime > 0
# Repos
stat = airfs.stat("https://github.com/jgoutin/airfs")
assert stat.st_mode == directory
assert stat.st_mtime > 0
assert stat.st_ctime > 0
stat = airfs.stat("https://github.com/jgoutin/airfs/refs")
assert stat.st_mode == directory
stat = airfs.stat("https://github.com/jgoutin/airfs/refs/heads")
assert stat.st_mode == directory
stat = airfs.stat("https://github.com/jgoutin/airfs/refs/tags")
assert stat.st_mode == directory
with pytest.raises(FileNotFoundError):
airfs.stat("https://github.com/jgoutin/not_exists")
with pytest.raises(FileNotFoundError):
airfs.stat("https://github.com/jgoutin/airfs/refs/not_exists")
# HEAD
stat = airfs.lstat("https://github.com/jgoutin/airfs/HEAD")
assert stat.st_mode == link
assert stat.st_mtime > 0
assert stat.sha # noqa
# Branches
stat = airfs.stat("https://github.com/jgoutin/airfs/branches")
assert stat.st_mode == directory
stat = airfs.lstat("https://github.com/jgoutin/airfs/branches/master")
assert stat.st_mode == link
assert stat.st_mtime > 0
assert stat.sha # noqa
stat = airfs.stat("https://github.com/jgoutin/airfs/branches/master")
assert stat.st_mode == directory
assert stat.st_mtime > 0
assert stat.sha # noqa
stat = airfs.lstat("https://github.com/jgoutin/airfs/refs/heads/master")
assert stat.st_mode == link
assert stat.st_mtime > 0
assert stat.sha # noqa
# Tags
stat = airfs.stat("https://github.com/jgoutin/airfs/tags")
assert stat.st_mode == directory
stat = airfs.lstat("https://github.com/jgoutin/airfs/tags/1.4.0")
assert stat.st_mode == link
assert stat.st_mtime > 0
assert stat.sha # noqa
stat = airfs.lstat("https://github.com/jgoutin/airfs/refs/tags/1.4.0")
assert stat.st_mode == link
assert stat.st_mtime > 0
assert stat.sha # noqa
# Commits
stat = airfs.stat("https://github.com/jgoutin/airfs/commits")
assert stat.st_mode == directory
commit_id = airfs.listdir("https://github.com/jgoutin/airfs/commits")[0]
stat = airfs.stat(f"https://github.com/jgoutin/airfs/commits/{commit_id}")
assert stat.st_mode == directory
assert stat.st_mtime > 0
assert stat.sha # noqa
# Git Tree
stat = airfs.stat("https://github.com/jgoutin/airfs/HEAD/tests")
assert stat.st_mode == directory
assert stat.st_mtime > 0
assert stat.st_size == 0
assert stat.sha # noqa
stat = airfs.stat("https://github.com/jgoutin/airfs/HEAD/LICENSE")
assert stat.st_mode == file
assert stat.st_mtime > 0
assert stat.st_size > 0
assert stat.sha # noqa
stat = airfs.stat("https://github.com/jgoutin/airfs/HEAD/setup.py")
assert stat.st_mode == file_exec
assert stat.st_mtime > 0
assert stat.st_size > 0
assert stat.sha # noqa
symlink_stat = airfs.lstat(
"https://github.com/jgoutin/airfs/HEAD/tests/resources/symlink"
)
assert symlink_stat.st_mode == link
assert symlink_stat.st_mtime > 0
assert symlink_stat.st_size > 0
assert symlink_stat.sha # noqa
stat = airfs.stat("https://github.com/jgoutin/airfs/HEAD/tests/resources/symlink")
assert stat.st_mode == file
assert stat.st_mtime > 0
assert stat.st_size > 0
assert stat.st_size > symlink_stat.st_size
assert stat.sha # noqa
with pytest.raises(FileNotFoundError):
airfs.stat("https://github.com/jgoutin/airfs/HEAD/not_exists")
# Releases
stat = airfs.stat(
"https://github.com/jgoutin/airfs/releases/tag/1.4.0/archive/source_code.tar.gz"
)
assert stat.st_mode == file
assert stat.st_size > 0
assert stat.st_mtime > 0
assert stat.sha # noqa
stat = airfs.stat(
"https://github.com/jgoutin/airfs/releases/latest/archive/source_code.tar.gz"
)
assert stat.st_mode == file
assert stat.st_size > 0
assert stat.st_mtime > 0
assert stat.sha # noqa
stat = airfs.stat(
"https://github.com/jgoutin/airfs/releases/tag/1.4.0/assets/"
"airfs-1.4.0-py3-none-any.whl"
)
assert stat.st_mode == file
assert stat.st_size > 0
assert stat.st_mtime > 0
assert stat.sha # noqa
def get_scenario():
"""
Test get files.
"""
import airfs
from airfs.storage.github import GithubBufferedIO, GithubRawIO
with airfs.open(
"https://github.com/jgoutin/airfs/releases/latest/archive/source_code.tar.gz",
buffering=0,
) as file:
assert isinstance(file, GithubRawIO)
assert file.read()
with airfs.open(
"https://github.com/jgoutin/airfs/releases/latest/archive/source_code.tar.gz"
) as file:
assert isinstance(file, GithubBufferedIO)
assert file.read()
with airfs.open(
"https://github.com/jgoutin/airfs/releases/tag/1.4.0/archive/"
"source_code.tar.gz",
buffering=0,
) as file:
assert file.read()
with airfs.open(
"https://github.com/jgoutin/airfs/releases/tag/1.4.0/assets/"
"airfs-1.4.0-py3-none-any.whl",
buffering=0,
) as file:
assert file.read()
with airfs.open(
"https://github.com/jgoutin/airfs/HEAD/LICENSE",
buffering=0,
) as file:
assert file.read()
with airfs.open(
"https://raw.githubusercontent.com/jgoutin/airfs/HEAD/LICENSE",
buffering=0,
) as file:
assert file.read()
```
#### File: rfs/tests/test_storage_s3.py
```python
import pytest
pytest.importorskip("boto3")
UNSUPPORTED_OPERATIONS = (
"symlink",
# Not supported on some objects
"getctime",
)
def test_handle_client_error():
"""Test airfs.s3._handle_client_error"""
from airfs.storage.s3 import _handle_client_error
from botocore.exceptions import ClientError # type: ignore
from airfs._core.exceptions import ObjectNotFoundError, ObjectPermissionError
response = {"Error": {"Code": "ErrorCode", "Message": "Error"}}
# Any error
with pytest.raises(ClientError):
with _handle_client_error():
raise ClientError(response, "testing")
# 404 error
response["Error"]["Code"] = "404"
with pytest.raises(ObjectNotFoundError):
with _handle_client_error():
raise ClientError(response, "testing")
# 403 error
response["Error"]["Code"] = "403"
with pytest.raises(ObjectPermissionError):
with _handle_client_error():
raise ClientError(response, "testing")
def test_mocked_storage():
"""Tests airfs.s3 with a mock"""
from datetime import datetime
from io import BytesIO, UnsupportedOperation
from airfs.storage.s3 import S3RawIO, _S3System, S3BufferedIO
from botocore.exceptions import ClientError # type: ignore
import boto3 # type: ignore
from tests.test_storage import StorageTester
from tests.storage_mock import ObjectStorageMock
# Mocks client
def raise_404():
"""Raise 404 error"""
raise ClientError({"Error": {"Code": "404", "Message": "Error"}}, "Error")
def raise_416():
"""Raise 416 error"""
raise ClientError(
{"Error": {"Code": "InvalidRange", "Message": "Error"}}, "Error"
)
def raise_500():
"""Raise 500 error"""
raise ClientError({"Error": {"Code": "Error", "Message": "Error"}}, "Error")
storage_mock = ObjectStorageMock(
raise_404, raise_416, raise_500, format_date=datetime.fromtimestamp
)
no_head = False
class Client:
"""boto3.client"""
def __init__(self, *_, **kwargs):
"""boto3.client.__init__"""
self.kwargs = kwargs
@staticmethod
def get_object(Bucket=None, Key=None, Range=None, **_):
"""boto3.client.get_object"""
return dict(
Body=BytesIO(
storage_mock.get_object(Bucket, Key, header=dict(Range=Range))
)
)
@staticmethod
def head_object(Bucket=None, Key=None, **_):
"""boto3.client.head_object"""
if no_head:
return dict()
return storage_mock.head_object(Bucket, Key)
@staticmethod
def put_object(Bucket=None, Key=None, Body=None, **_):
"""boto3.client.put_object"""
storage_mock.put_object(Bucket, Key, Body, new_file=True)
@staticmethod
def delete_object(Bucket=None, Key=None, **_):
"""boto3.client.delete_object"""
storage_mock.delete_object(Bucket, Key)
@staticmethod
def head_bucket(Bucket=None, **_):
"""boto3.client.head_bucket"""
return storage_mock.head_locator(Bucket)
@staticmethod
def create_bucket(Bucket=None, **_):
"""boto3.client.create_bucket"""
storage_mock.put_locator(Bucket)
@staticmethod
def copy_object(Bucket=None, Key=None, CopySource=None, **_):
"""boto3.client.copy_object"""
storage_mock.copy_object(
CopySource["Key"],
Key,
dst_locator=Bucket,
src_locator=CopySource["Bucket"],
)
@staticmethod
def delete_bucket(Bucket=None, **_):
"""boto3.client.delete_bucket"""
storage_mock.delete_locator(Bucket)
@staticmethod
def list_objects_v2(Bucket=None, Prefix=None, MaxKeys=None, **_):
"""boto3.client.list_objects_v2"""
objects = []
for name, header in storage_mock.get_locator(
Bucket, prefix=Prefix, limit=MaxKeys, raise_404_if_empty=False
).items():
header["Key"] = name
objects.append(header)
if not objects:
return dict()
return dict(Contents=objects)
@staticmethod
def list_buckets(**__):
"""boto3.client.list_buckets"""
objects = []
for name, header in storage_mock.get_locators().items():
header["Name"] = name
objects.append(header)
return dict(Buckets=objects)
@staticmethod
def create_multipart_upload(**_):
"""boto3.client.create_multipart_upload"""
return dict(UploadId=123)
@staticmethod
def complete_multipart_upload(
Bucket=None, Key=None, MultipartUpload=None, UploadId=None, **_
):
"""boto3.client.complete_multipart_upload"""
uploaded_parts = MultipartUpload["Parts"]
assert UploadId == 123
parts = []
for part in uploaded_parts:
parts.append(Key + str(part["PartNumber"]))
assert part["ETag"]
storage_mock.concat_objects(Bucket, Key, parts)
@staticmethod
def upload_part(
Bucket=None, Key=None, PartNumber=None, Body=None, UploadId=None, **_
):
"""boto3.client.upload_part"""
assert UploadId == 123
return storage_mock.put_object(Bucket, Key + str(PartNumber), Body)
@staticmethod
def generate_presigned_url(ClientMethod, Params=None, **_):
"""boto3.client.generate_presigned_url"""
assert ClientMethod == "get_object", "get_object Client method"
return f"https://{Params['Bucket']}/{Params['Key']}#token=123456"
class Session:
"""boto3.session.Session"""
client = Client
region_name = ""
def __init__(self, *_, **__):
"""boto3.session.Session.__init__"""
boto3_client = boto3.client
boto3_session_session = boto3.session.Session
boto3.client = Client
boto3.session.Session = Session
# Tests
try:
# Init mocked system
system = _S3System()
storage_mock.attach_io_system(system)
# Tests
with StorageTester(
system,
S3RawIO,
S3BufferedIO,
storage_mock,
unsupported_operations=UNSUPPORTED_OPERATIONS,
) as tester:
# Common tests
tester.test_common()
# Test: Unsecure mode
file_path = tester.base_dir_path + "file0.dat"
with S3RawIO(file_path, unsecure=True) as file:
assert file._client.kwargs["use_ssl"] is False
# Test: Header values may be missing
no_head = True
with pytest.raises(UnsupportedOperation):
system.getctime(file_path)
with pytest.raises(UnsupportedOperation):
system.getmtime(file_path)
with pytest.raises(UnsupportedOperation):
system.getsize(file_path)
no_head = False
# Restore mocked functions
finally:
boto3.client = boto3_client
boto3.session.Session = boto3_session_session
```
|
{
"source": "JGoutin/skio",
"score": 4
}
|
#### File: skio/skio/system.py
```python
import os.path
import re
def validfilename(filename, fullpath=False, posixchars=False, iso9660=False,
posixlenght=False, msdoslenght=False, lenghterror=False):
r"""
Remove all invalid characters from a file or folder name and check its
validity on Linux, Microsoft Windows, Microsoft MS-DOS and Apple Macintosh.
Remove:
- All characters <= 31 on ASCII table (Linux, Windows, Macintosh).\n
- Following special characters: "\", "/", ":", "*", "?", '"', ">", "<" and
"|" (Windows).
- " " on start and end of names.
- "." on end of names (Windows).
- "-" on start of names (Linux).
Check also for Windows/MS-DOS reserved names:
"CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4","COM5", "COM6",
"COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6",
"LPT7", "LPT8", "LPT9".
Parameters
----------
filename : str
File or folder name or path (see "fullpath" parameter).
fullpath : bool, optional
Set to "True" if "filename" contain full path. Set to "False"
if "filename" contain only file or folder name to check.
posixchars : bool, optional
If "True", remove all unauthorized characters with POSIX specification.
With this, only alphanumeric, ".", "-" and "_" are authorized.
iso9660 : bool, optional
If "True", remove all "-" that are incompatible with ISO9660 level 1
optic disk formatting.
posixlenght : bool, optional
If "True", check if length is greater than 14.
msdoslenght : bool, optional
If "True", check if length is greater than 8 for name and 3 for
extension.
lenghterror : bool, optional
If "True", raise error if length is invalid, else, truncate filename.
Return
-------
out : str
Fixed filename.
"""
# Split directory and name
if fullpath:
directory, filename = os.path.split(filename)
else:
directory = ""
# Remove invalid characters
if posixchars:
# Remove POSIX invalid characters
validname = re.sub("[^a-zA-Z0-9_.-]", "", filename)
else:
# Remove Windows and ASCII<31 invalid characters
validname = ""
for char in filename:
if not (char in '\/:*?"><|') and ord(char) > 31:
validname += char
if iso9660:
# Remove '-' for ISO9660
validname = re.sub("[-]", "", validname)
# Remove ending and starting characters that can generate OS errors
def checkendstart(string):
"""- ' ', '.' on end, '-' on start"""
prevlen = 0
while len(string) != prevlen:
prevlen = len(string)
# Remove spaces on start and end
string = string.strip()
# Remove '.' on end
string = string.rstrip('.')
# Remove '-' on start
string = string.lstrip('-')
return string
validname = checkendstart(validname)
# Check if filename is not empty
if not validname:
raise ValueError('All characters in filename are invalid')
# Check MS-DOS length
if msdoslenght:
base, ext = os.path.splitext(validname)
if len(base) > 8:
if lenghterror:
raise ValueError('Filename too long for MS-DOS (8 characters)')
else:
# Truncate basename
validname = base[:8]
if len(ext) > 4:
if lenghterror:
raise ValueError('Extension too long for MS-DOS '
'(3 characters)')
else:
# Truncate extension
validname += ext[:4]
validname = checkendstart(validname)
# Check POSIX length
if posixlenght and len(validname) > 14:
if lenghterror:
# Raise error
raise ValueError('Filename too long for POSIX (14 characters)')
else:
# Truncate name
validname = checkendstart(validname[:14])
# Check Windows/MS-DOS reserved name:
if validname in ('CON', 'PRN', 'AUX', 'NUL', 'COM1', 'COM2', 'COM3',
'COM4', 'COM5', 'COM6', 'COM7', 'COM8', 'COM9', 'LPT1',
'LPT2', 'LPT3', 'LPT4', 'LPT5', 'LPT6', 'LPT7', 'LPT8',
'LPT9'):
raise ValueError("Filename is a Windows/MS-DOS reserved name")
# Return valid filename
if directory:
validname = os.path.join(directory, validname)
return validname
```
#### File: skio/tests/test_codec.py
```python
from skio import intdecode, intencode
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
IDAT = np.array(((0, 1, 2), (3, 4, 5))) # Integer data
FDAT = np.array(((0.0, 0.5, 1.0), (1.5, 2.0, 2.5))) # Float data
FDAT_NEG = np.array(((-2.0, -1.0, 0.0), (1.0, 2.0, 3.0))) # Data with value <0
FDAT_NAN = np.array(((0.0, 0.5, 1.0), (np.nan, 2.0, 2.5))) # Data with nan
MDAT = np.array(((False, False, False), (True, False, False))) # Nan Mask
def test_intdecode_invalid():
"""'intdecode' function: 'invalid' argument"""
# None
assert_equal(intdecode(IDAT, 0.5, invalid=None), FDAT)
# None: Test mask
assert not intdecode(IDAT, 0.5, invalid=None, masked=True).mask.any()
# Single
assert_equal(intdecode(IDAT, 0.5, invalid=3), FDAT_NAN)
# Single: Test mask
assert_equal(intdecode(IDAT, 0.5, invalid=3, masked=True).mask, MDAT)
# Tuple
assert_equal(intdecode(IDAT, 0.5, invalid=(1, 4)),
np.array(((np.nan, np.nan, 1.0), (1.5, np.nan, np.nan))))
# Tuple: Test mask
assert_equal(intdecode(IDAT, 0.5, invalid=(1, 4), masked=True).mask,
np.array(((True, True, False), (False, True, True))))
def test_intdecode_dtype():
"""'intdecode' function: 'dtype' argument"""
# set dtype
assert intdecode(IDAT, 0.5, dtype=np.float32).dtype == np.float32
# Not a floating type
with pytest.raises(ValueError) as excinfo:
intdecode(IDAT, 0.5, dtype=np.int32)
assert 'dtype must be a floating data type' in str(excinfo.value)
def test_intencode_invalidvalue():
"""'intencode' function: 'invalidvalue' argument"""
# False : With no invalid data
data, factor = intencode(FDAT, np.int16, invalidvalue=False)
assert_equal(data, np.array(((0, 6553, 13107), (19660, 26214, 32767))))
assert_almost_equal(factor, 7.6296273689992981e-05)
# False : With invalid data
data = intencode(FDAT_NAN, np.int16, invalidvalue=False)[0]
assert_equal(data.mask, MDAT)
assert_equal(data.data, np.array(((0, 6553, 13107), (0, 26214, 32767))))
# None : With signed int
assert_equal(intencode(FDAT_NAN, np.int16, invalidvalue=None)[0],
np.array(((0, 6553, 13107), (-32768, 26214, 32767))))
# None : With unsigned int
assert_equal(intencode(FDAT_NAN, np.uint16, invalidvalue=None)[0],
np.array(((0, 13107, 26214), (65535, 52427, 65534))))
# Specified value
assert_equal(intencode(FDAT_NAN, np.int16, invalidvalue=-1)[0],
np.array(((0, 6553, 13107), (-1, 26214, 32767))))
def test_intencode_rangeminmax():
"""'intencode' function: 'rangemin' & 'rangemax' arguments"""
# Negative and positive min and max
assert_equal(intencode(FDAT_NEG, np.int16, rangemin=-100,
rangemax=100)[0],
np.array(((-67, -33, 0), (33, 67, 100))))
# Negative and positive min and max with inverted data
assert_equal(intencode(FDAT_NEG * -1, np.int16, rangemin=-100,
rangemax=100)[0], np.array(((67, 33, 0),
(-33, -67, -100))))
# Positive min and max
assert_equal(intencode(FDAT_NEG, np.int16, rangemin=100, rangemax=200)[0],
np.array(((100, 120, 140), (160, 180, 200))))
# Negative min and max
assert_equal(intencode(FDAT_NEG, np.int16, rangemin=-200,
rangemax=-100)[0],
np.array(((-200, -180, -160), (-140, -120, -100))))
# Too larges values
assert_equal(intencode(FDAT_NEG, np.int8, rangemin=-256, rangemax=256)[0],
np.array(((-85, -42, 0), (42, 85, 127))))
def test_intencode_keepsign():
"""'intencode' function: 'keepsign' argument"""
# Keep
assert_equal(intencode(FDAT_NEG, np.int16, keepsign=True)[0],
np.array(((-21845, -10922, 0), (10922, 21845, 32767))))
# Don't keep
assert_equal(intencode(FDAT_NEG, np.int16, keepsign=False)[0],
np.array(((0, 6553, 13107), (19660, 26214, 32767))))
# Keep but unsigned
assert_equal(intencode(FDAT_NEG, np.uint16, keepsign=True)[0],
np.array(((0, 13107, 26214), (39321, 52428, 65535))))
def test_intencode_int_inv_max():
"""'intencode' function: 'intfactor', 'maxfactor', 'invfactor' arguments"""
# Float
assert intencode(FDAT, np.uint8, rangemax=1, intfactor=False)[1] == 2.5
# Float inverted
assert_almost_equal(intencode(FDAT, np.uint8, rangemax=1, invfactor=True,
intfactor=False)[1], 0.4)
# Float inverted maxed
assert intencode(FDAT, np.uint8, maxfactor=10, invfactor=True,
intfactor=False)[1] == 10
# Float inverted maxed with max = int max
assert intencode(FDAT * 1e-5, np.uint8, maxfactor=-1, invfactor=True,
intfactor=False)[1] == 255
# Float maxed
assert_almost_equal(intencode(FDAT, np.uint8, maxfactor=1e-5,
intfactor=False)[1], 1e-5)
# Integer
assert intencode(FDAT, np.uint8, rangemax=1, intfactor=True)[1] == 3
# Integer inverted
assert intencode(FDAT, np.uint8, rangemax=1, invfactor=True,
intfactor=True)[1] == 1
# Integer inverted maxed
assert intencode(FDAT, np.uint8, maxfactor=10, invfactor=True,
intfactor=True)[1] == 10
# Integer maxed
assert intencode(FDAT, np.uint8, maxfactor=1, intfactor=True)[1] == 1
def test_intencode_forcefactor():
"""'intencode' function: 'forcefactor' argument"""
assert intencode(FDAT, np.uint8, forcefactor=42)[1] == 42
def test_intencode_intround():
"""'intencode' function: 'intround' argument"""
# Round
assert_equal(intencode(FDAT_NEG, np.int16, intround=True)[0],
np.array(((-21845, -10922, 0), (10922, 21845, 32767))))
# Don't round
assert_equal(intencode(FDAT_NEG, np.int16, intround=False)[0],
np.array(((-21844, -10922, 0), (10922, 21844, 32767))))
def test_intencode_inttype():
"""'intencode' function: character code as int dtype"""
# Character code
assert_equal(intencode(FDAT, 'h')[0],
np.array(((0, 6553, 13107), (19660, 26214, 32767))))
```
|
{
"source": "jgp0000/atlas-subnational-api",
"score": 3
}
|
#### File: atlas-subnational-api/colombia/datasets.py
```python
import pandas as pd
import copy
import os.path
import re
from flask import current_app
from linnaeus import classification
product_classification = classification.load("product/HS/Colombia_Prospedia/out/products_colombia_prospedia.csv")
location_classification = classification.load("location/Colombia/Prospedia/out/locations_colombia_prosperia.csv")
industry_classification = classification.load("industry/ISIC/Colombia_Prosperia/out/industries_colombia_isic_prosperia.csv")
country_classification = classification.load("location/International/DANE/out/locations_international_dane.csv")
occupation_classification = classification.load("occupation/SOC/Colombia/out/occupations_soc_2010.csv")
livestock_classification = classification.load("product/Datlas/Rural/out/livestock.csv")
agproduct_classification = classification.load("product/Datlas/Rural/out/agricultural_products_expanded.csv")
nonagric_classification = classification.load("product/Datlas/Rural/out/nonagricultural_activities.csv")
land_use_classification = classification.load("product/Datlas/Rural/out/land_use.csv")
farmtype_classification = classification.load("product/Datlas/Rural/out/farm_type.csv")
farmsize_classification = classification.load("product/Datlas/Rural/out/farm_size.csv")
country_classification.table.code = country_classification.table.code.astype(str).str.zfill(3)
def first(x):
"""Return first element of a group in a pandas GroupBy object"""
return x.nth(0)
def sum_group(x):
"""Get the sum for a pandas group by"""
return x.sum()
def null(x):
sample = x.first()
return pd.DataFrame(index=sample.index, columns=sample.columns)
def slugify(s):
"""Get a string like 'Foo Bar' and convert to foo_bar. Usually good for
creating codes from names, especially for languages with special
characters."""
return re.sub(r'[^a-zA-Z0-9\_]', '', s.replace(" ", "_").lower())
DATASET_ROOT = current_app.config["DATASET_ROOT"]
YEAR_MIN_TRADE = current_app.config["YEAR_MIN_TRADE"]
YEAR_MAX_TRADE = current_app.config["YEAR_MAX_TRADE"]
YEAR_MIN_INDUSTRY = current_app.config["YEAR_MIN_INDUSTRY"]
YEAR_MAX_INDUSTRY = current_app.config["YEAR_MAX_INDUSTRY"]
YEAR_MIN_AGPRODUCT = current_app.config["YEAR_MIN_AGPRODUCT"]
YEAR_MAX_AGPRODUCT = current_app.config["YEAR_MAX_AGPRODUCT"]
# These are MSAs (Metropolitan Statistical Area) that have a single
# municipality associated with them - they're mostly "cities" which are munis
# that have population greater than a certain number (100k?). Alternatively it
# could have been that the way we generated MSAs (looking at commute patterns
# between cities) could have generated a MSA that has only one city, but I
# don't think this is the case. These values are from Moncho's Trade dataset,
# in Keys/Colombia_cities_of_onemuni_key.dta
SINGLE_MUNI_MSAS = ['73001', '47001', '23001', '20001', '76109', '41001', '76520',
'19001', '70001', '44001', '68081', '52835', '18001', '05045',
'44430', '05837', '76147', '85001', '13430', '25290', '76111',
'27001', '23417', '41551', '47189', '05154', '54498', '20011',
'23162', '19698', '81001', '73268', '17380', '23466', '13244',
'88001', '05172', '50006', '15176', '70215', '47288', '50313',
'54518']
def prefix_path(to_prefix):
return os.path.join(DATASET_ROOT, to_prefix)
def load_trade4digit_country():
prescriptives = pd.read_stata(prefix_path("Trade/exp_ecomplexity_rc.dta"))
exports = pd.read_stata(prefix_path("Trade/exp_rpy_rc_p4.dta"))
exports = exports.rename(columns={"X_rpy_d": "export_value",
"NP_rpy": "export_num_plants"})
imports = pd.read_stata(prefix_path("Trade/imp_rpy_rc_p4.dta"))
imports = imports.rename(columns={"X_rpy_d": "import_value",
"NP_rpy": "import_num_plants"})
descriptives = exports.merge(imports, on=["yr", "r", "p"], how="outer")
descriptives = descriptives.fillna({
"export_value": 0,
"export_num_plants": 0,
"import_value": 0,
"import_num_plants": 0,
})
combo = prescriptives.merge(descriptives,
left_on=["yr", "r", "p4"],
right_on=["yr", "r", "p"])
combo = combo[combo.yr.between(YEAR_MIN_TRADE, YEAR_MAX_TRADE)]
combo["r"] = "COL"
return combo
trade4digit_country = {
"read_function": load_trade4digit_country,
"field_mapping": {
"r": "location",
"p4": "product",
"yr": "year",
"export_value": "export_value",
"export_num_plants": "export_num_plants",
"import_value": "import_value",
"import_num_plants": "import_num_plants",
"density_intl": "density",
"eci_intl": "eci",
"pci": "pci",
"coi_intl": "coi",
"cog_intl": "cog",
"RCA_intl": "export_rca"
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "country"
},
"product": {
"classification": product_classification,
"level": "4digit"
},
},
"digit_padding": {
"location": 1,
"product": 4
},
"facet_fields": ["location", "product", "year"],
"facets": {
("location_id", "year"): {
"eci": first,
"coi": first,
},
("product_id", "year"): {
"pci": first,
},
("location_id", "product_id", "year"): {
"export_value": first,
"import_value": first,
"export_num_plants": first,
"import_num_plants": first,
"export_rca": first,
"density": first,
"cog": first,
}
}
}
def load_trade4digit_department():
prescriptives = pd.read_stata(prefix_path("Trade/exp_ecomplexity_r2.dta"))
exports = pd.read_stata(prefix_path("Trade/exp_rpy_r2_p4.dta"))
exports = exports.rename(columns={"X_rpy_d": "export_value",
"NP_rpy": "export_num_plants"})
imports = pd.read_stata(prefix_path("Trade/imp_rpy_r2_p4.dta"))
imports = imports.rename(columns={"X_rpy_d": "import_value",
"NP_rpy": "import_num_plants"})
descriptives = exports.merge(imports, on=["yr", "r", "p"], how="outer")
descriptives = descriptives.fillna({
"export_value": 0,
"export_num_plants": 0,
"import_value": 0,
"import_num_plants": 0,
})
combo = prescriptives.merge(descriptives,
left_on=["yr", "r", "p4"],
right_on=["yr", "r", "p"])
combo = combo[combo.yr.between(YEAR_MIN_TRADE, YEAR_MAX_TRADE)]
return combo
trade4digit_department = {
"read_function": load_trade4digit_department,
"field_mapping": {
"r": "location",
"p4": "product",
"yr": "year",
"export_value": "export_value",
"export_num_plants": "export_num_plants",
"import_value": "import_value",
"import_num_plants": "import_num_plants",
"density_intl": "density",
"eci_intl": "eci",
"pci": "pci",
"coi_intl": "coi",
"cog_intl": "cog",
"RCA_intl": "export_rca"
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "department"
},
"product": {
"classification": product_classification,
"level": "4digit"
},
},
"digit_padding": {
"location": 2,
"product": 4
},
"facet_fields": ["location", "product", "year"],
"facets": {
("location_id", "year"): {
"eci": first,
"coi": first,
},
("product_id", "year"): {
"pci": first,
"export_value": sum_group,
"import_value": sum_group,
"export_num_plants": sum_group,
"import_num_plants": sum_group
},
("location_id", "product_id", "year"): {
"export_value": first,
"import_value": first,
"export_num_plants": first,
"import_num_plants": first,
"export_rca": first,
"density": first,
"cog": first,
}
}
}
def load_trade4digit_msa():
prescriptives = pd.read_stata(prefix_path("Trade/exp_ecomplexity_rcity.dta"))
# Fix certain muni codes to msa codes, see MEX-148
is_single_muni_msa = prescriptives.r.isin(SINGLE_MUNI_MSAS)
prescriptives.loc[is_single_muni_msa, "r"] = prescriptives.loc[is_single_muni_msa, "r"].map(lambda x: x + "0")
exports = pd.read_stata(prefix_path("Trade/exp_rpy_ra_p4.dta"))
# Add missing exports from single muni MSAs. See MEX-148
muni_exports = pd.read_stata(prefix_path("Trade/exp_rpy_r5_p4.dta"))
muni_exports = muni_exports[muni_exports.r.isin(SINGLE_MUNI_MSAS)]
muni_exports.r = muni_exports.r.map(lambda x: x + "0")
exports = pd.concat([exports, muni_exports]).reset_index(drop=True)
exports = exports.rename(columns={"X_rpy_d": "export_value",
"NP_rpy": "export_num_plants"})
imports = pd.read_stata(prefix_path("Trade/imp_rpy_ra_p4.dta"))
# Add missing imports from single muni MSAs. See MEX-148
muni_imports = pd.read_stata(prefix_path("Trade/imp_rpy_r5_p4.dta"))
muni_imports = muni_imports[muni_imports.r.isin(SINGLE_MUNI_MSAS)]
muni_imports.r = muni_imports.r.map(lambda x: x + "0")
imports = pd.concat([imports, muni_imports]).reset_index(drop=True)
imports = imports.rename(columns={"X_rpy_d": "import_value",
"NP_rpy": "import_num_plants"})
descriptives = exports.merge(imports, on=["yr", "r", "p"], how="outer")
descriptives = descriptives.fillna({
"export_value": 0,
"export_num_plants": 0,
"import_value": 0,
"import_num_plants": 0,
})
combo = prescriptives.merge(descriptives,
left_on=["yr", "r", "p4"],
right_on=["yr", "r", "p"])
combo = combo[combo.yr.between(YEAR_MIN_TRADE, YEAR_MAX_TRADE)]
return combo
trade4digit_msa = {
"read_function": load_trade4digit_msa,
"field_mapping": {
"r": "location",
"p": "product",
"yr": "year",
"export_value": "export_value",
"export_num_plants": "export_num_plants",
"import_value": "import_value",
"import_num_plants": "import_num_plants",
"density_intl": "density",
"eci_intl": "eci",
"pci": "pci",
"coi_intl": "coi",
"cog_intl": "cog",
"RCA_intl": "export_rca"
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "msa"
},
"product": {
"classification": product_classification,
"level": "4digit"
},
},
"digit_padding": {
"product": 4
},
"facet_fields": ["location", "product", "year"],
"facets": {
("location_id", "year"): {
"eci": first,
"coi": first
},
("product_id", "year"): {
"pci": first,
},
("location_id", "product_id", "year"): {
"export_value": first,
"import_value": first,
"export_num_plants": first,
"import_num_plants": first,
"export_rca": first,
"density": first,
"cog": first,
}
}
}
def load_trade4digit_municipality():
exports = pd.read_stata(prefix_path("Trade/exp_rpy_r5_p4.dta"))
exports = exports.rename(columns={"X_rpy_d": "export_value",
"NP_rpy": "export_num_plants"})
imports = pd.read_stata(prefix_path("Trade/imp_rpy_r5_p4.dta"))
imports = imports.rename(columns={"X_rpy_d": "import_value",
"NP_rpy": "import_num_plants"})
descriptives = exports.merge(imports, on=["yr", "r", "p"], how="outer")
descriptives = descriptives.fillna({
"export_value": 0,
"export_num_plants": 0,
"import_value": 0,
"import_num_plants": 0,
})
descriptives = descriptives[descriptives.yr.between(YEAR_MIN_TRADE, YEAR_MAX_TRADE)]
return descriptives
trade4digit_municipality = {
"read_function": load_trade4digit_municipality,
"field_mapping": {
"r": "location",
"p": "product",
"yr": "year",
"export_value": "export_value",
"export_num_plants": "export_num_plants",
"import_value": "import_value",
"import_num_plants": "import_num_plants",
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "municipality"
},
"product": {
"classification": product_classification,
"level": "4digit"
},
},
"digit_padding": {
"location": 5,
"product": 4
},
"facet_fields": ["location", "product", "year"],
"facets": {
("location_id", "product_id", "year"): {
"export_value": first,
"export_num_plants": first,
"import_value": first,
"import_num_plants": first
}
}
}
trade4digit_rcpy_fields_export = {
"r": "location",
"country": "country",
"p": "product",
"yr": "year",
"X_rcpy_d_export": "export_value",
"NP_rcpy_export": "export_num_plants",
"X_rcpy_d_import": "import_value",
"NP_rcpy_import": "import_num_plants"
}
def read_trade4digit_rcpy(suffix="rc_p4"):
e = pd\
.read_stata(prefix_path("Trade/exp_rcpy_{}.dta".format(suffix)))\
.rename(columns={"ctry_dest": "country"})
i = pd\
.read_stata(prefix_path("Trade/imp_rcpy_{}.dta".format(suffix)))\
.rename(columns={"ctry_orig": "country"})
df = e.merge(i,
on=['r', 'p', 'country', 'yr'],
how='outer',
suffixes=('_export', '_import'))
df = df[df.yr.between(YEAR_MIN_TRADE, YEAR_MAX_TRADE)]
return df.fillna(0)
def replace_country(df):
df["r"] = "COL"
return df
trade4digit_rcpy_country = {
"read_function": lambda: replace_country(read_trade4digit_rcpy(suffix="rc_p4")),
"field_mapping": trade4digit_rcpy_fields_export,
"classification_fields": {
"location": {
"classification": location_classification,
"level": "country"
},
"product": {
"classification": product_classification,
"level": "4digit"
},
"country": {
"classification": country_classification,
"level": "country"
},
},
"digit_padding": {
"country": 3,
"product": 4
},
"facet_fields": ["location", "country", "product", "year"],
"facets": {
("country_id", "location_id", "year"): {
"export_value": sum_group,
"export_num_plants": sum_group,
"import_value": sum_group,
"import_num_plants": sum_group,
},
("product_id", "country_id", "year"): {
"export_value": sum_group,
"export_num_plants": sum_group,
"import_value": sum_group,
"import_num_plants": sum_group,
},
("country_id", "location_id", "product_id", "year"): {
"export_value": first,
"export_num_plants": first,
"import_value": first,
"import_num_plants": first,
}
}
}
trade4digit_rcpy_department = {
"read_function": lambda: read_trade4digit_rcpy(suffix="r2_p4"),
"field_mapping": trade4digit_rcpy_fields_export,
"classification_fields": {
"location": {
"classification": location_classification,
"level": "department"
},
"product": {
"classification": product_classification,
"level": "4digit"
},
"country": {
"classification": country_classification,
"level": "country"
},
},
"digit_padding": {
"location": 2,
"country": 3,
"product": 4
},
"facet_fields": ["location", "country", "product", "year"],
"facets": {
("country_id", "location_id", "year"): {
"export_value": sum_group,
"export_num_plants": sum_group,
"import_value": sum_group,
"import_num_plants": sum_group,
},
("country_id", "location_id", "product_id", "year"): {
"export_value": first,
"export_num_plants": first,
"import_value": first,
"import_num_plants": first,
}
}
}
def load_trade4digit_rcpy_msa():
df = read_trade4digit_rcpy(suffix="ra_p4")
# Add missing exports from single muni MSAs. See MEX-148 COL-959
muni = read_trade4digit_rcpy(suffix="r5_p4")
muni = muni[muni.r.isin(SINGLE_MUNI_MSAS)]
muni.r = muni.r.map(lambda x: x + "0")
return pd.concat([df, muni]).reset_index(drop=True)
trade4digit_rcpy_msa = {
"read_function": load_trade4digit_rcpy_msa,
"field_mapping": trade4digit_rcpy_fields_export,
"classification_fields": {
"location": {
"classification": location_classification,
"level": "msa"
},
"product": {
"classification": product_classification,
"level": "4digit"
},
"country": {
"classification": country_classification,
"level": "country"
},
},
"digit_padding": {
"country": 3,
"product": 4
},
"facet_fields": ["location", "country", "product", "year"],
"facets": {
("country_id", "location_id", "year"): {
"export_value": sum_group,
"export_num_plants": sum_group,
"import_value": sum_group,
"import_num_plants": sum_group,
},
("country_id", "location_id", "product_id", "year"): {
"export_value": first,
"export_num_plants": first,
"import_value": first,
"import_num_plants": first,
}
}
}
trade4digit_rcpy_municipality = {
"read_function": lambda: read_trade4digit_rcpy(suffix="r5_p4"),
"field_mapping": trade4digit_rcpy_fields_export,
"classification_fields": {
"location": {
"classification": location_classification,
"level": "municipality"
},
"product": {
"classification": product_classification,
"level": "4digit"
},
"country": {
"classification": country_classification,
"level": "country"
},
},
"digit_padding": {
"location": 5,
"country": 3,
"product": 4
},
"facet_fields": ["location", "country", "product", "year"],
"facets": {
("country_id", "location_id", "product_id", "year"): {
"export_value": first,
"export_num_plants": first,
"import_value": first,
"import_num_plants": first,
},
("country_id", "location_id", "year"): {
"export_value": sum_group,
"export_num_plants": sum_group,
"import_value": sum_group,
"import_num_plants": sum_group,
}
}
}
def hook_industry(df):
df = df.drop_duplicates(["location", "industry", "year"])
df = df[df.location.notnull()]
df = df[df.year.between(YEAR_MIN_INDUSTRY, YEAR_MAX_INDUSTRY)]
return df
def industry4digit_country_read():
df = pd.read_hdf(prefix_path("Industries/industries_all.hdf"), "data")
df["country_code"] = "COL"
return df
industry4digit_country = {
"read_function": industry4digit_country_read,
"field_mapping": {
"country_code": "location",
"p_code": "industry",
"year": "year",
"all_p_emp": "employment",
"all_p_wage": "wages",
"all_p_wagemonth": "monthly_wages",
"all_p_est": "num_establishments",
"all_p_pci": "complexity"
},
"hook_pre_merge": hook_industry,
"classification_fields": {
"location": {
"classification": location_classification,
"level": "country"
},
"industry": {
"classification": industry_classification,
"level": "class"
},
},
"digit_padding": {
"location": 1,
"industry": 4
},
"facet_fields": ["location", "industry", "year"],
"facets": {
("industry_id", "year"): {
"complexity": first
},
("location_id", "industry_id", "year"): {
"employment": first,
"wages": first,
"monthly_wages": first,
"num_establishments": first,
}
}
}
industry4digit_department = {
"read_function": lambda: pd.read_hdf(prefix_path("Industries/industries_state.hdf"), "data"),
"field_mapping": {
"state_code": "location",
"p_code": "industry",
"year": "year",
"state_p_emp": "employment",
"state_p_wage": "wages",
"state_p_wagemonth": "monthly_wages",
"state_p_est": "num_establishments",
"state_p_rca": "rca",
"state_p_distance_flow": "distance",
"state_p_cog_flow_pred": "cog",
"state_all_coi_flow_pred": "industry_coi",
"all_p_pci": "complexity",
"state_all_eci": "industry_eci"
},
"hook_pre_merge": hook_industry,
"classification_fields": {
"location": {
"classification": location_classification,
"level": "department"
},
"industry": {
"classification": industry_classification,
"level": "class"
},
},
"digit_padding": {
"location": 2,
"industry": 4
},
"facet_fields": ["location", "industry", "year"],
"facets": {
("location_id", "year"): {
"employment": sum_group,
"wages": sum_group,
"monthly_wages": first,
"num_establishments": sum_group,
"industry_eci": first,
"industry_coi":first,
},
("industry_id", "year"): {
"employment": sum_group,
"wages": sum_group,
"monthly_wages": sum_group,
"num_establishments": sum_group,
"complexity": first
},
("location_id", "industry_id", "year"): {
"employment": first,
"wages": first,
"monthly_wages": first,
"num_establishments": first,
"distance": first,
"cog": first,
"rca": first
}
}
}
def hook_industry4digit_msa(df):
df = hook_industry(df)
df.location = df.location.astype(int).astype(str).str.zfill(5) + "0"
return df
industry4digit_msa = {
"read_function": lambda: pd.read_hdf(prefix_path("Industries/industries_msa.hdf"), "data"),
"hook_pre_merge": hook_industry4digit_msa,
"field_mapping": {
"msa_code": "location",
"p_code": "industry",
"year": "year",
"msa_p_emp": "employment",
"msa_p_wage": "wages",
"msa_p_wagemonth": "monthly_wages",
"msa_p_est": "num_establishments",
"msa_p_rca": "rca",
"msa_p_distance_flow": "distance",
"msa_p_cog_flow_pred": "cog",
"msa_all_coi_flow_pred": "industry_coi",
"all_p_pci": "complexity",
"msa_all_eci": "industry_eci"
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "msa"
},
"industry": {
"classification": industry_classification,
"level": "class"
},
},
"digit_padding": {
"industry": 4
},
"facet_fields": ["location", "industry", "year"],
"facets": {
("location_id", "year"): {
"employment": sum_group,
"wages": sum_group,
"monthly_wages": first,
"num_establishments": sum_group,
"industry_eci": first,
"industry_coi": first,
},
("industry_id", "year"): {
"employment": sum_group,
"wages": sum_group,
"complexity": first
},
("location_id", "industry_id", "year"): {
"employment": first,
"wages": first,
"monthly_wages": first,
"num_establishments": first,
"distance": first,
"cog": first,
"rca": first
}
}
}
industry4digit_municipality = {
"read_function": lambda: pd.read_hdf(prefix_path("Industries/industries_muni.hdf"), "data"),
"hook_pre_merge": hook_industry,
"field_mapping": {
"muni_code": "location",
"p_code": "industry",
"year": "year",
"muni_p_emp": "employment",
"muni_p_wage": "wages",
"muni_p_wagemonth": "monthly_wages",
"muni_p_est": "num_establishments",
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "municipality"
},
"industry": {
"classification": industry_classification,
"level": "class"
},
},
"digit_padding": {
"location": 5,
"industry": 4
},
"facet_fields": ["location", "industry", "year"],
"facets": {
("location_id", "industry_id", "year"): {
"employment": first,
"wages": first,
"monthly_wages": first,
"num_establishments": first,
}
}
}
population = {
"read_function": lambda: pd.read_stata(prefix_path("Final_Metadata/col_pop_muni_dept_natl.dta")),
"hook_pre_merge": lambda df: df[~df[["location", "year", "population"]].duplicated()],
"field_mapping": {
"year": "year",
"dept_code": "location",
"dept_pop": "population"
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "department"
},
},
"digit_padding": {
"location": 2
},
"facet_fields": ["location", "year"],
"facets": {
("location_id", "year"): {
"population": first
}
}
}
gdp_nominal_department = {
"read_function": lambda: pd.read_stata(prefix_path("Final_Metadata/col_nomgdp_muni_dept_natl.dta")),
"hook_pre_merge": lambda df: df.drop_duplicates(["location", "year"]),
"field_mapping": {
"dept_code": "location",
"dept_gdp": "gdp_nominal",
"year": "year"
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "department"
},
},
"digit_padding": {
"location": 2
},
"facet_fields": ["location", "year"],
"facets": {
("location_id", "year"): {
"gdp_nominal": first,
}
}
}
gdp_real_department = {
"read_function": lambda: pd.read_stata(prefix_path("Final_Metadata/col_realgdp_dept_natl.dta")),
"field_mapping": {
"dept_code": "location",
"real_gdp": "gdp_real",
"year": "year"
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "department"
},
},
"digit_padding": {
"location": 2
},
"facet_fields": ["location", "year"],
"facets": {
("location_id", "year"): {
"gdp_real": first,
}
}
}
def industry2digit_country_read():
df = pd.read_hdf(prefix_path("Industries/industries_all.hdf"), "data")
df["country_code"] = "COL"
return df
industry2digit_country = {
"read_function": industry2digit_country_read,
"hook_pre_merge": hook_industry,
"field_mapping": {
"country_code": "location",
"d3_code": "industry",
"year": "year",
"all_d3_wage": "wages",
"all_d3_wagemonth": "monthly_wages",
"all_d3_emp": "employment",
"all_d3_est": "num_establishments",
"all_d3_pci": "complexity"
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "country"
},
"industry": {
"classification": industry_classification,
"level": "division"
},
},
"digit_padding": {
"location": 1,
"industry": 2
},
"facet_fields": ["location", "industry", "year"],
"facets": {
("industry_id", "year"): {
"wages": first,
"monthly_wages": first,
"employment": first,
"num_establishments": first,
"complexity": first
}
}
}
industry2digit_department = {
"read_function": lambda: pd.read_hdf(prefix_path("Industries/industries_state.hdf"), "data"),
"hook_pre_merge": hook_industry,
"field_mapping": {
"state_code": "location",
"d3_code": "industry",
"year": "year",
"state_d3_est": "num_establishments",
"state_d3_wage": "wages",
"state_d3_wagemonth": "monthly_wages",
"state_d3_emp": "employment",
"state_d3_rca": "rca",
"state_d3_distance_flow_pred": "distance",
"state_d3_cog_flow_pred": "cog",
"all_d3_pci": "complexity"
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "department"
},
"industry": {
"classification": industry_classification,
"level": "division"
},
},
"digit_padding": {
"location": 2,
"industry": 2
},
"facet_fields": ["location", "industry", "year"],
"facets": {
("location_id", "industry_id", "year"): {
"wages": first,
"monthly_wages": first,
"employment": first,
"num_establishments": first,
"distance": first,
"cog": first,
"rca": first
}
}
}
def hook_industry2digit_msa(df):
df = hook_industry(df)
df.location = df.location.astype(int).astype(str).str.zfill(5) + "0"
return df
industry2digit_msa = {
"read_function": lambda: pd.read_hdf(prefix_path("Industries/industries_msa.hdf"), "data"),
"hook_pre_merge": hook_industry2digit_msa,
"field_mapping": {
"msa_code": "location",
"d3_code": "industry",
"year": "year",
"msa_d3_est": "num_establishments",
"msa_d3_wage": "wages",
"msa_d3_wagemonth": "monthly_wages",
"msa_d3_emp": "employment",
"msa_d3_rca": "rca",
"msa_d3_distance_flow_pred": "distance",
"msa_d3_cog_flow_pred": "cog",
"all_d3_pci": "complexity"
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "msa"
},
"industry": {
"classification": industry_classification,
"level": "division"
},
},
"digit_padding": {
"industry": 2,
"location": 5
},
"facet_fields": ["location", "industry", "year"],
"facets": {
("industry_id", "year"): {
"wages": sum_group,
"monthly_wages": sum_group,
"employment": sum_group,
"num_establishments": sum_group,
"complexity": first
},
("location_id", "industry_id", "year"): {
"wages": first,
"monthly_wages": first,
"employment": first,
"num_establishments": first,
"distance": first,
"cog": first,
"rca": first
}
}
}
occupation2digit_industry2digit = {
"read_function": lambda: pd.read_stata(prefix_path("Vacancies/Vacancies_do130_2d-Ind_X_4d-Occ.dta")),
"field_mapping": {
"onet_4dig": "occupation",
"ciiu_2dig": "industry",
"num_vacantes": "num_vacancies",
"wage_mean": "average_wages"
},
"classification_fields": {
"occupation": {
"classification": occupation_classification,
"level": "minor_group"
},
"industry": {
"classification": industry_classification,
"level": "division"
},
},
"digit_padding": {
"occupation": 7,
"industry": 4
},
"facet_fields": ["occupation", "industry"],
"facets": {
("occupation_id", "industry_id"): {
"average_wages": first,
"num_vacancies": first,
}
}
}
occupation2digit = {
"read_function": lambda: pd.read_stata(prefix_path("Vacancies/Vacancies_do140_4d-Occ.dta")),
"field_mapping": {
"onet_4dig": "occupation",
"num_vacantes": "num_vacancies",
"wage_mean": "average_wages"
},
"classification_fields": {
"occupation": {
"classification": occupation_classification,
"level": "minor_group"
},
},
"digit_padding": {
"occupation": 7,
},
"facet_fields": ["occupation"],
"facets": {
("occupation_id"): {
"average_wages": first,
"num_vacancies": first,
}
}
}
livestock_template = {
"read_function": None,
"field_mapping": {
"livestock": "livestock",
"location_id": "location",
"livestock_level": "livestock_level",
"livestock_number": "num_livestock",
"livestock_farms_number": "num_farms",
"average_livestock_load": "average_livestock_load",
},
"classification_fields": {
"livestock": {
"classification": livestock_classification,
"level": "level1",
},
"location": {
"classification": location_classification,
"level": None,
},
},
"digit_padding": {
"location": None,
},
"facet_fields": ["location", "livestock"],
"facets": {
("location_id", "livestock_id"): {
"num_livestock": first,
"num_farms": first,
"average_livestock_load": first,
},
("location_id",): {
"num_livestock": sum_group,
"num_farms": sum_group,
"average_livestock_load": null,
}
}
}
def read_livestock_level1_country():
df = pd.read_stata(prefix_path("Rural/livestock_Col_2.dta"))
df["location_id"] = "COL"
return df
def hook_livestock(df):
df["livestock"] = df["livestock"].str.lower()
df = df[df.livestock_level == "level1"]
return df
livestock_level1_country = copy.deepcopy(livestock_template)
livestock_level1_country["read_function"] = read_livestock_level1_country
livestock_level1_country["hook_pre_merge"] = hook_livestock
livestock_level1_country["classification_fields"]["location"]["level"] = "country"
livestock_level1_country["digit_padding"]["location"] = 3
livestock_level1_department = copy.deepcopy(livestock_template)
livestock_level1_department["read_function"] = lambda: pd.read_stata(prefix_path("Rural/livestock_dept_2.dta"))
livestock_level1_department["hook_pre_merge"] = hook_livestock
livestock_level1_department["classification_fields"]["location"]["level"] = "department"
livestock_level1_department["digit_padding"]["location"] = 2
livestock_level1_municipality = copy.deepcopy(livestock_template)
livestock_level1_municipality["read_function"] = lambda: pd.read_stata(prefix_path("Rural/livestock_muni_2.dta"))
livestock_level1_municipality["hook_pre_merge"] = hook_livestock
livestock_level1_municipality["classification_fields"]["location"]["level"] = "municipality"
livestock_level1_municipality["digit_padding"]["location"] = 5
agproduct_template = {
"read_function": None,
"field_mapping": {
"location_id": "location",
"product_name_sp": "agproduct",
"product_level": "agproduct_level",
"year": "year",
"land_sown_has": "land_sown",
"land_harv_has": "land_harvested",
"production_tons": "production_tons",
"yieldtonsperha": "yield_ratio",
"indexyield": "yield_index",
},
"classification_fields": {
"agproduct": {
"classification": agproduct_classification,
"level": "level3",
},
"location": {
"classification": location_classification,
"level": None,
},
},
"digit_padding": {
"location": None,
},
"facet_fields": ["location", "agproduct", "year"],
"facets": {
("location_id", "agproduct_id", "year"): {
"land_sown": first,
"land_harvested": first,
"production_tons": first,
"yield_ratio": first,
"yield_index": first,
}
}
}
def read_agproduct_level3_country():
df = pd.read_stata(prefix_path("Rural/agric_2007_2015_Col_final_2.dta"))
df["location_id"] = "COL"
return df
def hook_agproduct(df):
df["agproduct"] = df["agproduct"].map(slugify)
df = df[df.agproduct_level == "level3"]
df = df[df.year != ""]
df.year = df.year.astype(int)
df = df[df.year.between(YEAR_MIN_AGPRODUCT, YEAR_MAX_AGPRODUCT)]
return df
agproduct_level3_country = copy.deepcopy(agproduct_template)
agproduct_level3_country["read_function"] = read_agproduct_level3_country
agproduct_level3_country["hook_pre_merge"] = hook_agproduct
agproduct_level3_country["classification_fields"]["location"]["level"] = "country"
agproduct_level3_country["digit_padding"]["location"] = 3
agproduct_level3_department = copy.deepcopy(agproduct_template)
agproduct_level3_department["read_function"] = lambda: pd.read_stata(prefix_path("Rural/agric_2007_2015_dept_final_2.dta"))
agproduct_level3_department["hook_pre_merge"] = hook_agproduct
agproduct_level3_department["classification_fields"]["location"]["level"] = "department"
agproduct_level3_department["digit_padding"]["location"] = 2
agproduct_level3_municipality = copy.deepcopy(agproduct_template)
agproduct_level3_municipality["read_function"] = lambda: pd.read_stata(prefix_path("Rural/agric_2007_2015_muni_final_2.dta"))
agproduct_level3_municipality["hook_pre_merge"] = hook_agproduct
agproduct_level3_municipality["classification_fields"]["location"]["level"] = "municipality"
agproduct_level3_municipality["digit_padding"]["location"] = 3
def hook_land_use(df):
df = df[df.land_use_level == "level2"]
df["land_use"] = df["land_use"].str.replace('\x92', 'Â’')
return df
land_use_template = {
"read_function": None,
"hook_pre_merge": hook_land_use,
"field_mapping": {
"location_id": "location",
"land_use_type_name_sp": "land_use",
"land_use_level": "land_use_level",
"land_use_ha": "area",
},
"classification_fields": {
"land_use": {
"classification": land_use_classification,
"level": "level2",
},
"location": {
"classification": location_classification,
"level": None,
},
},
"digit_padding": {
"location": None,
},
"facet_fields": ["location", "land_use"],
"facets": {
("location_id", "land_use_id"): {
"area": first,
}
}
}
def read_land_use_level2_country():
df = pd.read_stata(prefix_path("Rural/land_use_Col_c.dta"))
df["location_id"] = "COL"
return df
land_use_level2_country = copy.deepcopy(land_use_template)
land_use_level2_country["read_function"] = read_land_use_level2_country
land_use_level2_country["classification_fields"]["location"]["level"] = "country"
land_use_level2_country["digit_padding"]["location"] = 3
land_use_level2_department = copy.deepcopy(land_use_template)
land_use_level2_department["read_function"] = lambda: pd.read_stata(prefix_path("Rural/land_use_dept_c.dta"))
land_use_level2_department["classification_fields"]["location"]["level"] = "department"
land_use_level2_department["digit_padding"]["location"] = 2
land_use_level2_municipality = copy.deepcopy(land_use_template)
land_use_level2_municipality["read_function"] = lambda: pd.read_stata(prefix_path("Rural/land_use_muni_c.dta"))
land_use_level2_municipality["hook_pre_merge"] = hook_land_use
land_use_level2_municipality["classification_fields"]["location"]["level"] = "municipality"
land_use_level2_municipality["digit_padding"]["location"] = 5
def hook_farmtype(df):
df = df[df.farmtype_level == "level2"]
return df
farmtype_template = {
"read_function": None,
"hook_pre_merge": hook_farmtype,
"field_mapping": {
"location_id": "location",
"farms_types_name": "farmtype",
"farms_level": "farmtype_level",
"farms_number": "num_farms",
},
"classification_fields": {
"farmtype": {
"classification": farmtype_classification,
"level": "level2",
},
"location": {
"classification": location_classification,
"level": None,
},
},
"digit_padding": {
"location": None,
},
"facet_fields": ["location", "farmtype"],
"facets": {
("location_id", "farmtype_id"): {
"num_farms": first,
}
}
}
def read_farmtype_level2_country():
df = pd.read_stata(prefix_path("Rural/farms_Col_c.dta"))
df["location_id"] = "COL"
return df
farmtype_level2_country = copy.deepcopy(farmtype_template)
farmtype_level2_country["read_function"] = read_farmtype_level2_country
farmtype_level2_country["classification_fields"]["location"]["level"] = "country"
farmtype_level2_country["digit_padding"]["location"] = 3
farmtype_level2_department = copy.deepcopy(farmtype_template)
farmtype_level2_department["read_function"] = lambda: pd.read_stata(prefix_path("Rural/farms_dept_c.dta"))
farmtype_level2_department["classification_fields"]["location"]["level"] = "department"
farmtype_level2_department["digit_padding"]["location"] = 2
farmtype_level2_municipality = copy.deepcopy(farmtype_template)
farmtype_level2_municipality["read_function"] = lambda: pd.read_stata(prefix_path("Rural/farms_muni_c.dta"))
farmtype_level2_municipality["hook_pre_merge"] = hook_farmtype
farmtype_level2_municipality["classification_fields"]["location"]["level"] = "municipality"
farmtype_level2_municipality["digit_padding"]["location"] = 5
def hook_farmsize(df):
df = df[df.farmsize_level == "level1"]
return df
farmsize_template = {
"read_function": None,
"hook_pre_merge": hook_farmsize,
"field_mapping": {
"location_id": "location",
"landuse_type_sp": "farmsize",
"landuse_type_level": "farmsize_level",
"av_farms_size_ha": "avg_farmsize",
},
"classification_fields": {
"farmsize": {
"classification": farmsize_classification,
"level": "level1",
},
"location": {
"classification": location_classification,
"level": None,
},
},
"digit_padding": {
"location": None,
},
"facet_fields": ["location", "farmsize"],
"facets": {
("location_id", "farmsize_id"): {
"avg_farmsize": first,
}
}
}
def read_farmsize_level1_country():
df = pd.read_stata(prefix_path("Rural/average_farms_size_Col.dta"))
df["location_id"] = "COL"
return df
farmsize_level1_country = copy.deepcopy(farmsize_template)
farmsize_level1_country["read_function"] = read_farmsize_level1_country
farmsize_level1_country["classification_fields"]["location"]["level"] = "country"
farmsize_level1_country["digit_padding"]["location"] = 3
farmsize_level1_department = copy.deepcopy(farmsize_template)
farmsize_level1_department["read_function"] = lambda: pd.read_stata(prefix_path("Rural/average_farms_size_dept.dta"))
farmsize_level1_department["classification_fields"]["location"]["level"] = "department"
farmsize_level1_department["digit_padding"]["location"] = 2
farmsize_level1_municipality = copy.deepcopy(farmsize_template)
farmsize_level1_municipality["read_function"] = lambda: pd.read_stata(prefix_path("Rural/average_farms_size_muni.dta"))
farmsize_level1_municipality["hook_pre_merge"] = hook_farmsize
farmsize_level1_municipality["classification_fields"]["location"]["level"] = "municipality"
farmsize_level1_municipality["digit_padding"]["location"] = 5
nonagric_template = {
"read_function": None,
"field_mapping": {
"location_id": "location",
"activity_name": "nonag",
"activities_level": "nonag_level",
"farms_number_agric": "num_farms_ag",
"farms_number_nonagric": "num_farms_nonag",
"farms_number": "num_farms",
},
"classification_fields": {
"nonag": {
"classification": nonagric_classification,
"level": "level3",
},
"location": {
"classification": location_classification,
"level": None,
},
},
"digit_padding": {
"location": None,
},
"facet_fields": ["location", "nonag"],
"facets": {
("location_id", "nonag_id"): {
"num_farms": first,
"num_farms_ag": first,
"num_farms_nonag": first,
}
}
}
def fix_nonagric(df):
df = df[df.activities_level == "level3"]
df = df.drop("activity_name_sp", axis=1)
agric = df[df.activities_group == "agric_nonagric"].drop("activities_group", axis=1)
nonagric = df[df.activities_group == "nonagric_nonagric"].drop("activities_group", axis=1)
assert agric.size == nonagric.size, "the agric_agric and nonagric_nonagric categories should have the same number of items"
df = agric.merge(nonagric,
on=["activities_level", "location_id", "activities_subgroup", "activity_name"],
how="outer", suffixes=("_agric", "_nonagric"))
df["farms_number"] = df["farms_number_agric"] + df["farms_number_nonagric"]
return df
def read_nonagric_level3_country():
df = pd.read_stata(prefix_path("Rural/non_agri_activities_Col.dta"))
df["location_id"] = "COL"
df["activity_name"] = df["activities"].str.strip()
df = df.drop("activities", axis=1)
df["activity_name_sp"] = pd.np.nan
df = fix_nonagric(df)
return df
def read_nonagric_level3_department():
df = pd.read_stata(prefix_path("Rural/non_agri_activities_dept.dta"))
df = fix_nonagric(df)
return df
def read_nonagric_level3_municipality():
df = pd.read_stata(prefix_path("Rural/non_agri_activities_muni.dta"))
df = fix_nonagric(df)
return df
def hook_nonagric(df):
df["nonag"] = df["nonag"].map(slugify)
df = df[df.nonag_level == "level3"]
return df
nonagric_level3_country = copy.deepcopy(nonagric_template)
nonagric_level3_country["read_function"] = read_nonagric_level3_country
nonagric_level3_country["hook_pre_merge"] = hook_nonagric
nonagric_level3_country["classification_fields"]["location"]["level"] = "country"
nonagric_level3_country["digit_padding"]["location"] = 3
nonagric_level3_department = copy.deepcopy(nonagric_template)
nonagric_level3_department["read_function"] = read_nonagric_level3_department
nonagric_level3_department["hook_pre_merge"] = hook_nonagric
nonagric_level3_department["classification_fields"]["location"]["level"] = "department"
nonagric_level3_department["digit_padding"]["location"] = 2
nonagric_level3_municipality = copy.deepcopy(nonagric_template)
nonagric_level3_municipality["read_function"] = read_nonagric_level3_municipality
nonagric_level3_municipality["hook_pre_merge"] = hook_nonagric
nonagric_level3_municipality["classification_fields"]["location"]["level"] = "municipality"
nonagric_level3_municipality["digit_padding"]["location"] = 3
```
#### File: atlas-subnational-api/tests/__init__.py
```python
from colombia import create_app
from atlas_core.testing import BaseTestCase as CoreBaseTestCase
class BaseTestCase(CoreBaseTestCase):
SQLALCHEMY_DATABASE_URI = "sqlite://"
def create_app(self):
return create_app({"SQLALCHEMY_DATABASE_URI":
self.SQLALCHEMY_DATABASE_URI,
"TESTING": True})
```
|
{
"source": "jgp505/PerovGen",
"score": 2
}
|
#### File: pygmd/autocal/inputset.py
```python
import os
import sys
import time
import subprocess
import glob
from shutil import copyfile
import ast
import yaml
import numpy as np
import pandas as pd
from collections import defaultdict
from pymatgen.io.vasp.sets import MPRelaxSet
from pymatgen.symmetry.bandstructure import HighSymmKpath
from pymatgen.io.vasp.inputs import Kpoints, Incar, Kpoints_supported_modes
from perovgen.pygmd.shell import ShellPath
from perovgen.pygmd.input_structure import load_structure, GMDStructure
def CopyCHGCAR(path):
b = os.path.abspath(path)
if os.path.isfile(b):
ops = os.path.split(b)[0]
if "CHGCAR" in os.listdir(ops) :
chg_path = "{}/CHGCAR".format(ops)
else :
print("It isn't have CHGCAR file")
sys.exit(0)
else :
if "CHGCAR" in os.listdir(b) :
chg_path = "{}/CHGCAR".format(path)
else :
print("It isn't have CHGCAR file")
sys.exit(0)
return chg_path
def MakingInpcar(structure, path, nelect, kpoints):
with open(path,'w') as fi :
for i in kpoints :
fi.write("%.3f "%(i))
fi.write("\n0.01\n%i\nV\n"%(nelect))
for i in structure.lattice.matrix :
fi.write("%.5f %.5f %.5f\n"%(i[0],i[1],i[2]))
fi.close()
class inputgmd :
def GMDKpoints(self,kpoints) :
kpoint={"POINT":list(kpoints[0])[0]}
for k in kpoints[1:] :
n,v = k.split("=")
n = n.replace(" ","")
v = v.replace(" ","")
if n == 'CONSTK':
kpoint[n]=int(v)
elif n == 'KPTS' :
kpoint[n]=ast.literal_eval(v)
if not "CONSTK" in [*kpoint] and "KPTS" in [*kpoint] :
kpoint['CONSTK']=False
elif "CONSTK" in [*kpoint] and not "KPTS" in [*kpoint]:
kpoint['KPTS']=False
return kpoint
def string_to_dict(self,incars) :
incar = {}
for inc in incars :
n,v = inc.split("=")
n = n.replace(" ","")
v = v.replace(" ","")
try :
if type(int(v)) == int :
v = int(v)
elif type(float(v)) == float :
v = float(v)
incar[n]=v
except :
if n == "MAGMOM" : # list type
incar['MAGMOM']=ast.literal_eval(v)
else :
incar[n]=v
return incar
def GMDIncar(self,incar) :
if incar[0] == "MPJ" :
incars = {"ISMEAR":1}
else :
stream = open("%s/GMDincar.yaml"%(os.path.dirname(__file__)),'r')
incars = yaml.load(stream)
if incar[0] == "PBE" :
pass
elif incar[0] == "PBESol" :
incars["GGA"]='Ps'
elif incar[0] == "VDW" :
incars["IVDW"]=21
elif incar[0] == "SCAN" :
incars["METAGGA"] = "SCAN"
incars["LUSE_VDW"] = True
incars["BPARAM"]=15.7
incars["LASPH"]=True
if len(incar[1:]) != 0 :
incar1 = self.string_to_dict(incar[1:])
for k,v in incar1.items() :
incars[k]=v
return incars
def __init__(self, path) :
self.path = path
f = open(path,'r')
ff = f.readlines()
self.inputgmd = defaultdict(list)
self.class_type_list = ['KPOINTS','INCAR','SHELL','CALMODE']
self.modecheck = ["M","R","C","B","D","A","U","E",'G', # one mode
"RC","CB","CD","DB","BD","BE","EU","CA",'CG','RG', # two mode
"RCB","RCD","CBD","CDB","DBE","CBE","BEU","RCA",'CBG', # THREE MODE
"RCBD","RCDB","CBEU","DBEU","RCBE","CDBE",'RCBG', # four mode
"RCBEU","RCDBE","CDBEU","RCBGE", # FIVE MODE
"RCDBEU",'RCDBGE','RGCDBE',# SIX MODE
"RCADBEU"] # SEVEN MODE
for i in ff :
line = i.split("\n")[0]
if not '#' in line :
if len(line.split(":")) == 2 :
if line.split(":")[0] in self.class_type_list :
classname = line.split(":")[0]
else :
print(line.split(":")[0],"doesn't exist class in the input.gmd")
sys.exit(1)
else :
if not line == '' :
self.inputgmd[classname].append(line)
# Check the KPOINTS class
if not list(self.inputgmd['KPOINTS'][0])[0] in ["A", "G", "M"] :
print("KPOINTS first line have to type A(Auto) or G(Gamma) or M(Monkhorst)!")
sys.exit(1)
elif len(self.inputgmd['KPOINTS']) > 4 :
print("KPOINTS class can only have 3 or less values.")
sys.exit(1)
else :
self.kpoints = self.GMDKpoints(self.inputgmd['KPOINTS'])
# Check the SHELL class
if len(self.inputgmd['SHELL']) != 1:
print("SHELL class must have only one value!")
sys.exit(1)
else :
self.shell = self.inputgmd['SHELL'][0]
# Check the CALMODE class
if not self.inputgmd['CALMODE'][0] in self.modecheck :
print("CALMODE calss is wrong mode!")
sys.exit(1)
else :
self.calmode = list(self.inputgmd['CALMODE'][0])
# Check the INCAR class
if not self.inputgmd['INCAR'][0] in ['PBE','PBESol','VDW','SCAN','MPJ'] :
print("INCAR first line have to type PBE or PBESol or VDW or SCAN or MPJ")
sys.exit(1)
else :
self.exchange_corr = self.inputgmd['INCAR'][0]
self.incar = self.GMDIncar(self.inputgmd['INCAR'])
self.inputgmd['KPOINTS'] = self.kpoints
self.inputgmd['INCAR'] = self.incar
self.inputgmd['SHELL'] = self.shell
self.inputgmd['CALMODE'] = self.calmode
class PerovInputs :
def __init__(self, structure, is_selective=False):
self.is_selective=is_selective
for i in range(structure.num_sites):
try :
structure.replace(i, structure.species[i].element)
except :
structure.replace(i, structure.species[i])
self.poscar = structure
if self.is_selective :
for i in range(self.poscar.num_sites):
try :
self.poscar.replace(i,self.poscar.species[i].element, properties=None)
except :
self.poscar.replace(i,self.poscar.species[i],properties=None)
full_formula = GMDStructure(self.poscar).formula(reduced=False)
try :
symmetry, groupnumber = self.poscar.get_space_group_info()
except :
groupnumber = 0
self.naming = "{0}_{1:03d}".format(full_formula, groupnumber)
def incarmode(self,incar, method):
if method == "R" :
pass
elif method == "C":
incar["NSW"] = 0
incar["LCHARG"] = True
incar["EDIFF"] = 1E-6
elif method == "B" or method == "E" or method == "D":
incar["NSW"] = 0
incar["EDIFF"]=1E-6
incar["LCHARG"] = False
incar["ICHARG"] = 11
incar["SIGMA"]=0.01
incar["ISMEAR"] = 0
if method == "D" :
incar["SIGMA"]=0.01
incar["ISMEAR"] = -5
elif method == "U" :
incar["LREAL"] = False
incar["NSW"] = 0
incar["EDIFF"]=1E-8
incar["ISMEAR"]=0
incar["IBRION"]=8
incar["LEPSILON"]=True
incar["SIGMA"]=0.01
elif method == "A" :
incar["EDIFF"]=1E-8
incar["LPEAD"]=True
incar["NEDPS"]=2000
incar["LOPTICS"]=True
incar["CSHIFT"]=0.100
incar["SIGMA"]=0.01
incar["LSORBIT"]=True
elif method == 'G' :
incar['NKRED']=2
incar['LHFCALC']=True
incar['ALGO']='D' # ver 3.6.8
incar['NSW']=1 # one-shot hybrid
incar['ISYM']=0
incar['SYMPREC']=1E-8
incar['AEXX']=0.25
incar['HFSCREEN']=0.2
incar['TIME']=0.4
incar['PRECFOCK']='FAST'
'''
elif method == "M" :
incar["EDIFF"]=1E-5
incar["ENCUT"]=400
'''
return incar
def inputfolder(self, inputs, method, soc=False):
incar = self.incarmode(incar=inputs.incar,method=method) ; kpoints = inputs.kpoints
mpr = MPRelaxSet(self.poscar, user_incar_settings=incar,user_potcar_functional=None) #"PBE_52")
vi = mpr.get_vasp_input()
if soc :
del vi['INCAR']['MAGMOM']
vi['INCAR']['LSORBIT'] = True
vi['KPOINTS'].comment = "{0}_{1}".format(method, self.naming)
if inputs.exchange_corr != 'MPJ' :
warning = '''
[Warning] both CONSTK and KPTS exist in KPOINTS class in input.gmd.\n
It is reflected as CONSTK.
'''
if kpoints['CONSTK'] and kpoints['KPTS'] :
print(warning)
# KPOINTS mode
if kpoints['POINT'] == 'G' :
vi['KPOINTS'].style = Kpoints_supported_modes.Gamma
elif kpoints['POINT'] == 'M' :
vi['KPOINTS'].style = Kpoints_supported_modes.Monkhorst
elif kpoints['POINT'] == 'A' :
pass
# HSE06 and DOS calculation setting Gamma and kpoints
if method == "G" :
vi['KPOINTS'].style = Kpoints_supported_modes.Gamma
# CONSTK = 25 and even number
lattice = []
for l in ['a','b','c'] :
kl = self.poscar.as_dict()['lattice'][l]
if kl < 25 :
if round(25/kl)%2 == 0 :
lattice.append(round(25/kl))
else :
lattice.append(round(25/kl)+1)
else :
lattice.append(1)
vi['INCAR']['NKREDX']=2
vi['INCAR']['NKREDY']=2
vi["KPOINTS"].kpts[0] = lattice
elif method == 'D' :
vi['KPOINTS'].style = Kpoints_supported_modes.Gamma
if (kpoints['CONSTK'] and kpoints['KPTS']) or kpoints['CONSTK']:
number = kpoints['CONSTK']*2
lattice = [self.poscar.as_dict()['lattice'][l] for l in ['a','b','c']]
a,b,c = [i if i < number else number for i in lattice]
vi["KPOINTS"].kpts[0] = [round(number/a),round(number/b),round(number/c)]
elif kpoints['KPTS'] :
vi['KPOINTS'].kpts[0] = np.array(kpoints['KPTS'])*2
elif method == 'B' :
bandinfo = HighSymmKpath(self.poscar)
vi['KPOINTS']=vi['KPOINTS'].automatic_linemode(divisions=21,ibz=bandinfo)
else :
if (kpoints['CONSTK'] and kpoints['KPTS']) or kpoints['CONSTK']:
number = kpoints['CONSTK']
lattice = [self.poscar.as_dict()['lattice'][l] for l in ['a','b','c']]
a,b,c = [i if i < number else number for i in lattice]
vi["KPOINTS"].kpts[0] = [round(number/a),round(number/b),round(number/c)]
elif kpoints['KPTS'] :
vi['KPOINTS'].kpts[0] = kpoints['KPTS']
else :
# HSE06 and DOS calculation setting Gamma and kpoints
if method == "G" :
vi['KPOINTS'].style = Kpoints_supported_modes.Gamma
# CONSTK = 25 and even number
lattice = []
for l in ['a','b','c'] :
kl = self.poscar.as_dict()['lattice'][l]
if kl < 25 :
if round(25/kl)%2 == 0 :
lattice.append(round(25/kl))
else :
lattice.append(round(25/kl)+1)
else :
lattice.append(1)
vi['INCAR']['NKREDX']=2
vi['INCAR']['NKREDY']=2
vi["KPOINTS"].kpts[0] = lattice
elif method == 'D' :
vi['KPOINTS'].style = Kpoints_supported_modes.Gamma
elif method == 'B' :
bandinfo = HighSymmKpath(self.poscar)
vi['KPOINTS']=vi['KPOINTS'].automatic_linemode(divisions=21,ibz=bandinfo)
return vi
class RunningShell :
def __init__(self, shell, name, path):
self.shell_path = ShellPath().shellpath
try :
vaspsh = open("{}/{}".format(self.shell_path,shell),"r")
except :
print("There isn't {} file".format(shell))
sys.exit(1)
self.vaspsh_list = vaspsh.readlines()
for i in self.vaspsh_list :
if "-N" in i :
nameline = i.split()
name_index = self.vaspsh_list.index(i)
del self.vaspsh_list[name_index]
del nameline[-1]
lines2=''
for i in nameline :
lines2 += i+" "
lines2 += name + '\n'
self.vaspsh_list.insert(name_index,lines2)
self.path = path
def write_vaspsh(self):
with open("{}/vasp.sh".format(self.path),"w") as fi :
for i in self.vaspsh_list :
fi.write(i)
fi.close()
def SOC_read_vaspsh(self):
for i in range(len(self.vaspsh_list)):
if 'std' in self.vaspsh_list[i] :
std_line = self.vaspsh_list[i].split("#")
if len(std_line) == 1 :
a = list(std_line[0])
a.insert(0,"#")
aa=""
for j in a :
aa += j
self.vaspsh_list[i] = aa
else :
pass
elif 'ncl' in self.vaspsh_list[i] :
ncl_line = self.vaspsh_list[i].split("#")
if len(ncl_line) == 1 :
a = list(std_line[0])
a.insert(0,"#")
aa=""
for j in a :
aa += j
self.vaspsh_list[i] = aa
else :
self.vaspsh_list[i] = ncl_line[-1]
if 'gam' in self.vaspsh_list[i] :
std_line = self.vaspsh_list[i].split("#")
if len(std_line) == 1 :
a = list(std_line[0])
a.insert(0,"#")
aa=""
for j in a :
aa += j
self.vaspsh_list[i] = aa
else :
pass
with open("{}/vasp.sh".format(self.path),"w") as fi :
for i in self.vaspsh_list :
fi.write(i)
fi.close()
def running_mode(self,soc=False, run=True):
pwd = os.getcwd()
if soc :
self.SOC_read_vaspsh()
else :
self.write_vaspsh()
if run :
os.chdir(self.path)
subprocess.check_call(['qsub','vasp.sh'])
#os.system("qsub < vasp.sh")
os.chdir(pwd)
```
#### File: pygmd/cli/gmd_analysis.py
```python
import os
import sys
import numpy as np
import pandas as pd
import yaml
from collections import defaultdict
from shutil import copy
from pymatgen.io.vasp.outputs import Vasprun
from perovgen.pygmd.shell import ShellPath
from perovgen.pygmd.input_structure import load_structure,GMDStructure
from perovgen.pygmd.analysis.energy import GMDPhaseDiagram
def graphyaml(string) :
path = os.path.abspath(__file__).split(os.sep)[:-2]
path.append("analysis")
path.append("graph.yaml")
path = os.sep.join(path)
with open(path) as fi :
graph = yaml.load(fi,Loader=yaml.FullLoader)
return graph[string]
def pdospath() :
path = os.path.abspath(__file__).split(os.sep)[:-3]
path.append("pdos")
path.append("pdos")
path = os.sep.join(path)
return path
def analysis(args) :
if args.graph :
dic = graphyaml(args.graph)
with open('graph.yaml','w') as fi :
yaml.dump(dic,fi,default_flow_style=False)
print("%s generate the graph.yaml file"%(os.getcwd()))
elif args.input :
shell, numberlist = ShellPath().check()
while True :
number = int(input("Please enter the number >> "))
if number in numberlist :
break
mn = str(input("Please enter the mode ex) RCDB >> "))
with open("input.gmd",'w') as fi :
fi.write("### Perovgen input.gmd\n")
fi.write("KPOINTS:\nA\nCONSTK=30\n\n")
fi.write("INCAR:\nMPJ\n\n")
fi.write("SHELL:\n%s\n\n"%(shell[number-1][-1]))
fi.write("CALMODE:\n%s\n"%(mn))
fi.close()
elif args.pdos :
path = pdospath()
s = load_structure(os.getcwd())[0][0]
if not s :
print("Structure file doesn't exist!\n")
sys.exit(1)
# Total DOS
print("Total DOS")
os.system("%s dos width=0.03"%(path))
# partial DOS
dic = defaultdict(list)
print("pDOS")
for e,i in enumerate(s.species):
dic[i.symbol].append(e+1)
for k,v in dic.items():
for s in ['tot','s','p'] :
with open("LIST",'w') as fi :
fi.write("{}_{}\n".format(k,s))
for i in v :
if s == "tot" :
fi.write("%i tot tot\n"%(i))
elif s == "s" :
fi.write("%i s tot\n"%(i))
elif s == "p" :
fi.write("%i px tot\n"%(i))
fi.write("%i py tot\n"%(i))
fi.write("%i pz tot\n"%(i))
os.system("{} pdos width=0.03".format(path))
elif args.convert:
files = load_structure(args.convert)[0]
for e,f in enumerate(files) :
GMDStructure(f).convert_to_cif()
```
#### File: pygmd/cli/gmd_auto.py
```python
import os
import sys
import time
import subprocess
from shutil import copyfile
from pymatgen.io.vasp.sets import MPRelaxSet
from pymatgen.io.vasp.outputs import Vasprun
from perovgen.pygmd.input_structure import load_structure, GMDStructure
from perovgen.pygmd.autocal.inputset import *
from perovgen.pygmd.autocal.algorithm import openingphrase
from perovgen.pygmd.analysis.electronic import BSPlotting, DOSPlotting, GMDAnalysis
from perovgen.pygmd.analysis.energy import GMDExcitonbinding
def analyze_calculation(args):
pwd = os.getcwd()
if args.inputpath :
inputs = inputgmd(args.inputpath)
else :
print("\nUserError : gmd3 auto [path of input.gmd] [path of structure] [optional]\n")
sys.exit(1)
if not args.strucpath :
print("\nUserError : gmd3 auto [path of input.gmd] [path of structure] [optional]\n")
sys.exit(1)
else :
if not load_structure(args.strucpath) :
print("\n[Warning] the structure file does not exist.\n")
sys.exit(1)
else :
strucpath = load_structure(args.strucpath)
openingphrase(inputs,strucpath[-1])
if args.directory :
for struc, filename in zip(strucpath[0], strucpath[-1]) :
fn = os.path.basename(filename).split(".")[0]
for mt in inputs.calmode:
runfolder = []
p = PerovInputs(structure=struc, is_selective=args.deleteselect)
try :
symmetry, groupnumber = struc.get_space_group_info()
except :
groupnumber = 0
# Revised INPUT FILES
vi = p.inputfolder(inputs=inputs, method=mt,soc=args.soc)
inputs = inputgmd(args.inputpath)
# Writing the Input directory
full_formula = GMDStructure(struc).formula(reduced=False)
pretty_formula = GMDStructure(struc).formula()
root_dir = "{0}/{1}_{2:03d}".format(pwd, pretty_formula, groupnumber)
folder_name = "{0}/{1}_{2}_{3}".format(root_dir,mt,full_formula,fn)
vi.write_input(output_dir=folder_name) ; runfolder.append(folder_name)
if mt == "E" :
folder_name_H = "{0}/H_{1}_{2}".format(root_dir,full_formula,fn)
vi.write_input(output_dir=folder_name_H); runfolder.append(folder_name_H)
for runf in runfolder :
naming = os.path.basename(runf)
rs = RunningShell(shell=inputs.shell, name=naming, path=runf)
rs.running_mode(soc=args.soc, run=False)
else :
for struc, filename in zip(strucpath[0], strucpath[-1]) :
fn = os.path.basename(filename).split(".")[0]
# directory
f = open("{}/calnohup.py".format(os.path.dirname(__file__)),"r")
ff = f.readlines()
ff.insert(0,"inputpath='{}'\n".format(os.path.abspath(args.inputpath)))
ff.insert(1,"strucpath='{}'\n".format(filename))
if args.deleteselect :
ff.insert(2,"ds=True\n")
else :
ff.insert(2,"ds=False\n")
if args.soc :
ff.insert(3,"orbit=True\n")
else :
ff.insert(3,"orbit=False\n")
ff.insert(4,"mole=False\n")
ff.insert(5,"\n")
# directory name
try :
symmetry, groupnumber = struc.get_space_group_info()
except :
groupnumber = 0
pretty_formula = GMDStructure(struc).formula()
root_dir = "{0}/{1}_{2:03d}".format(pwd, pretty_formula, groupnumber)
os.makedirs(root_dir,exist_ok=True)
with open("{}/run_{}.py".format(root_dir,fn),"w") as fi :
for i in ff :
fi.write(i)
fi.close()
#os.system("python {}/run_{}.py".format(root_dir,fn))
os.system("nohup python -u {}/run_{}.py > {}/gmd_{}.out &".format(root_dir,fn,root_dir,fn))
```
#### File: pygmd/cli/gmd_replace.py
```python
import os
import sys
from pymatgen.core import Structure
from pymatgen.io.vasp.outputs import Vasprun
from perovgen.pygmd.input_structure import GMDStructure, load_structure
from perovgen.pygmd.shell import ShellPath
from perovgen.pygmd.autocal.substitute import RandomMolecule, InputInform, RandomAtom
from perovgen.pygmd.autocal.algorithm import openingphrase
from perovgen.pygmd.autocal.inputset import *
import time
import subprocess
def molecule(args) :
if args.ma :
mafa = "MA"
elif args.fa :
mafa = "FA"
elif args.gua :
mafa = "GUA"
elif args.dima :
mafa = "diMA"
elif args.trima :
mafa = "triMA"
elif args.tetrama :
mafa = "tetraMA"
elif args.zolium :
mafa = "Zolium"
ms = RandomMolecule._loadmolecule(mafa)
inputatom, changenum, fixcalc, multiple = InputInform(random_coord=args.position, random_degree=args.degree).molecule_input()
mole = RandomMolecule(random_coord=args.position, random_degree=args.degree)
# Input
strucpath = []
structure = load_structure(args.path)[0]
e = 0
for s in structure :
for i in range(multiple) :
molestruc, csv = mole.tiltingmolecule(s, ms, inputatom=inputatom, changenum=changenum,fixcalc = fixcalc)
vaspname = GMDStructure(molestruc).formula(reduced=False)
if not os.path.exists(vaspname):
os.makedirs(vaspname)
molestruc.to(filename="{0}/POSCAR_{1}_{2:04d}".format(vaspname,vaspname,e+1))
strucpath.append("{0}/POSCAR_{1}_{2:04d}".format(vaspname,vaspname,e+1))
if args.csv :
if not os.path.exists("{}/CSV".format(vaspname)):
os.makedirs("{}/CSV".format(vaspname))
csv.to_csv("{0}/CSV/{1}_{2:04d}.csv".format(vaspname,vaspname,e+1))
e+=1
return strucpath
def element(args):
r_atom = RandomAtom(random_coord=args.position, random_degree=args.degree)
atom1, atom2, change, multiple = InputInform(random_coord=args.position, random_degree=args.degree).atom_input()
#Input
strucpath = []
structure = load_structure(args.path)[0]
e = 0
for s in structure :
for i in range(multiple) :
s1, s = r_atom.substitution(structure=s,atom1=atom1, atom2=atom2, ratio=change)
vaspname = GMDStructure(s1).formula(reduced=False)
if not os.path.exists(vaspname) :
os.makedirs(vaspname)
s1.to(filename="{0}/POSCAR_{1}_{2:04d}".format(vaspname,vaspname,e+1))
strucpath.append("{0}/POSCAR_{1}_{2:04d}".format(vaspname,vaspname,e+1))
e+=1
return strucpath
def randomreplace(args) :
if args.sub :
strucpath=element(args)
else :
strucpath=molecule(args)
if args.input :
inputs = inputgmd(args.input[0])
if inputs.calmode[0] == 'M' :
print(strucpath, inputs.calmode)
else :
print("If you want to calculate the molecule relaxed, please enter the M mode in CALMODE class")
sys.exit(1)
strucpath = load_structure(strucpath)
print(inputs.inputgmd)
for struc, filename in zip(strucpath[0], strucpath[-1]) :
for isif in ['ISIF7','ISIF2', 'ISIF3']:
if isif == 'ISIF7' :
os.makedirs("ISIF7")
elif isif == 'ISIF2' :
os.makedirs("ISIF2")
```
#### File: PerovGen/pygmd/input_structure.py
```python
import os
import sys
import numpy as np
import pandas as pd
from collections import defaultdict
from pymatgen.core import Structure
def load_structure(path):
'''
The structure file that exists in the PATH is read and return
in the form of a list
Args :
path(str) : OS.PATH
'''
struclist=[] ; spath=[]
if type(path) == str :
path = [path]
if path :
for p in path :
path1 = os.path.abspath(p)
if os.path.isfile(path1):
try :
s = Structure.from_file(path1)
for i in range(s.num_sites) :
try :
s.replace(i, s.species[i].element)
except :
s.replace(i, s.species[i])
struclist.append(s)
spath.append(path1)
except :
pass
else :
for j in os.listdir(path1):
try :
s = Structure.from_file("%s/%s"%(p,j))
for i in range(s.num_sites) :
try :
s.replace(i, s.species[i].element)
except :
s.replace(i, s.species[i])
struclist.append(s)
spath.append(j)
except :
pass
return struclist, spath
class GMDStructure :
'''
This module provides structural files(ex. cif, POSCAR)
needed for automatic calculation
'''
def __init__(self, structure):
structure.sort()
self.structure = structure
self.coords = np.dot(self.structure.frac_coords, self.structure.lattice.matrix)
self.species = self.structure.species
def _split_molecule(self):
d = self.structure.distance_matrix
hcn_coords = defaultdict(list)
for i in range(len(self.coords)) :
if self.species[i].symbol == "C" :
hcn_coords["C"].append(i)
elif self.species[i].symbol == "H":
hcn_coords["H"].append(i)
elif self.species[i].symbol == "N" :
hcn_coords['N'].append(i)
# the number H and N of the round C
molecule = defaultdict(list)
for c in hcn_coords['C'] :
chbonding = np.where(d[c,hcn_coords['H'][0]:hcn_coords['H'][-1]+1] < 1.5)[0]
cnbonding = np.where(d[c,hcn_coords['N'][0]:hcn_coords['N'][-1]+1] < 1.5)[0]
if len(chbonding) == 0 and len(cnbonding) == 3 :
molecule["GUA"].append(1)
elif len(chbonding) == 1 and len(cnbonding) == 2 :
nhbonding = np.where(d[hcn_coords['N'][0]+cnbonding[0],hcn_coords['H'][0]:hcn_coords['H'][-1]+1]<1.5)[0]
if len(nhbonding) == 2 :
molecule["FA"].append(1)
else :
molecule['Zolium'].append(1)
elif len(chbonding) == 3 and len(cnbonding) == 1:
nhbonding = np.where(d[hcn_coords['N'][0]+cnbonding[0],hcn_coords['H'][0]:hcn_coords['H'][-1]+1]<1.5)[0]
if len(nhbonding) == 3 :
molecule["MA"].append(1)
elif len(nhbonding) == 2 :
molecule['DMA'].append(1)
elif len(nhbonding) == 1 :
molecule['triMA'].append(1)
elif len(nhbonding) == 0 :
molecule['tetraMA'].append(1)
else :
molecule['H'].append(1)
molecule['C'].append(1)
molecule['N'].append(1)
for k,v in molecule.items() :
if k == "DMA" :
molecule['DMA']=int(len(v)/2)
elif k == 'triMA' :
molecule['triMA']=int(len(v)/3)
elif k == 'tetraMA':
molecule['tetraMA']=int(len(v)/4)
else :
molecule[k]=len(v)
return molecule
def formula_dict(self) :
sn = self.structure.composition.get_el_amt_dict()
sn_dict = dict()
if 'C' in sn and 'H' in sn and 'N' in sn :
mole=self._split_molecule()
for k,v in mole.items() :
if k == 'FA' :
sn['C']-=v
sn['H']-=v*5
sn['N']-=v*2
elif k == 'MA' :
sn['C']-=v
sn['N']-=v
sn['H']-=v*6
elif k == 'GUA' :
sn['C']-=v
sn['N']-=v*3
sn['H']-=v*6
elif k == 'DMA' :
sn['C']-=v*2
sn['N']-=v
sn['H']-=v*8
elif k == 'triMA' :
sn['C']-=v*3
sn['N']-=v
sn['H']-=v*10
elif k == 'tetraMA':
sn['C']-=v*4
sn['N']-=v
sn['H']-=v*12
elif k == 'Zolium' :
sn['C']-=v*3
sn['N']-=v*2
sn['H']-=v*5
sn_dict[k]=v
if sn['C'] == 0 :
del sn['C']
else :
sn_dict['C'] = sn['C']
if sn['H'] == 0 :
del sn['H']
else :
sn_dict['H'] = sn['H']
if sn['N'] == 0 :
del sn['N']
else :
sn_dict['N'] = sn['N']
for k,v in sn.items():
sn_dict[k]=v
return sn_dict
def formula(self, reduced=True):
if reduced :
sn = self.structure.composition.to_reduced_dict
else :
sn = self.structure.composition.get_el_amt_dict()
vaspname=''
if 'C' in sn and 'H' in sn and 'N' in sn :
mole=self._split_molecule()
for k,v in mole.items() :
if v == 1 :
vaspname += k
else :
vaspname += "%s%i"%(k,v)
if k == 'FA' :
sn['C']-=v
sn['H']-=v*5
sn['N']-=v*2
elif k == 'MA' :
sn['C']-=v
sn['N']-=v
sn['H']-=v*6
elif k == 'GUA' :
sn['C']-=v
sn['N']-=v*3
sn['H']-=v*6
elif k == 'DMA' :
sn['C']-=v*2
sn['N']-=v
sn['H']-=v*8
elif k == 'triMA' :
sn['C']-=v*3
sn['N']-=v
sn['H']-=v*10
elif k == 'tetraMA':
sn['C']-=v*4
sn['N']-=v
sn['H']-=v*12
elif k == 'Zolium' :
sn['C']-=v*3
sn['N']-=v*2
sn['H']-=v*5
for k,v in sn.items() :
if int(v) == 1 :
vaspname += "%s"%(k)
elif int(v) <= 0 :
pass
else :
vaspname += "%s%i"%(k,v)
return vaspname
def convert_to_cif(self, name=False) :
'''
convert POSCAR to cif. if name is false, file name is [formula]_[symmetry number].cif.
name (str) = designate the name
'''
if not name :
try :
symmetry, groupnumber = self.structure.get_space_group_info()
except :
groupnumber = 0
formula = self.formula(reduced=True)
name = "{0}_{1:03d}".format(formula, groupnumber)
return self.structure.to(filename="{}.cif".format(name))
```
|
{
"source": "jgpattis/Desres-sars-cov-2-apo-mpro",
"score": 2
}
|
#### File: Desres-sars-cov-2-apo-mpro/cryptic_pockets/plot_exposons.py
```python
import numpy as np
import pickle
import matplotlib.pyplot as plt
import sys
sys.path.append("..")
from util.util import backup
mi_name = 'sasa_mi.npy'
exp_name = 'exposons.npy'
out_name = 'plots/exposons_try1'
o_type = 'pdf'
mi = np.load(open(mi_name,"rb"))
exp = np.load(open(exp_name,"rb"))
#out000 -= out000.diagonal() * np.eye(*out000.shape)
#lab_arr = obj1.labels + 1
#lab_list = lab_arr.tolist()
mi[mi < 0.005] = np.nan
#minr = lab_arr.min()
#maxr = lab_arr.max()
plt.imshow(mi, origin='lower', interpolation='none', cmap='viridis_r') #extent=[minr, maxr, minr, maxr])
cb = plt.colorbar()
plt.xlabel('Residue Number', fontsize=16)
plt.ylabel('Residue Number', fontsize=16)
cb.set_label('Log(MI)', fontsize=16)
plt.tight_layout()
plt.savefig(out_name + '_matrix.' + o_type)
plt.clf()
plt.plot(exp)
plt.xlabel('Residue Number', fontsize=16)
plt.ylabel('exposon', fontsize=16)
plt.tight_layout()
plt.savefig(out_name + '_exposon.' + o_type)
plt.clf()
def print_vmd_string(inds):
string = f'(residue {inds[0]}'
for i in range(1, len(inds)):
string = string + f' or residue {inds[i]}'
return string + ') and (sidechain or type CA)'
unique, un_inds, un_counts = np.unique(exp, return_index=True, return_counts=True)
order = np.argsort(un_counts)
name = out_name + '_VMD_selection.txt'
backup(name)
f = open(name,'w')
f.write('#selection strings to highlight exposons in VMD\n')
f.write('\n')
for i in range(2,12):
inds = np.where(exp == unique[order][-i])[0]
new_inds = print_vmd_string(inds)
f.write(new_inds + '\n')
f.write('\n')
```
#### File: jgpattis/Desres-sars-cov-2-apo-mpro/featurize_02.py
```python
import mdtraj as md
import pyemma.coordinates as coor
import numpy as np
import pickle
import pyemma
import os
#traj_num = [ '0000', '0020', '0040', '0060', '0080']
traj_num = [f'{i:04d}' for i in range(100)]
traj_path = '../DESRES-Trajectory_sarscov2-10880334-no-water-no-ion-glueCA/sarscov2-10880334-no-water-no-ion-glueCA/sarscov2-10880334-no-water-no-ion-glueCA-'
traj_list = [ traj_path + str(i) + '.dcd' for i in traj_num]
pdb = '../DESRES_protease_chainid.pdb'
# Define features
def filtered_ca_distances(chain=0):
''' Pairwize filtered carbon alpha distances defined in filter_distances_01.py'''
dist_indsA = np.load(open("filtered_distance_featurization_01/filtered_dis_ind_10_035_morechainA.npy","rb"))
dist_indsB = np.load(open("filtered_distance_featurization_01/filtered_dis_ind_10_035_morechainB.npy","rb"))
featurizer = coor.featurizer(pdb)
if chain == 0:
featurizer.add_distances(dist_indsA)
elif chain == 1:
featurizer.add_distances(dist_indsB)
else:
raise ValueError("chain must be 0 or 1")
return featurizer
def filtered_ca_distances_larger(chain=0):
''' Pairwize filtered carbon alpha distances defined in filter_distances_01.py'''
dist_indsA = np.load(open("filtered_distance_featurization_01/filtered_dis_ind_12_03chainA.npy","rb"))
dist_indsB = np.load(open("filtered_distance_featurization_01/filtered_dis_ind_12_03chainA.npy","rb"))
featurizer = coor.featurizer(pdb)
if chain == 0:
featurizer.add_distances(dist_indsA)
elif chain == 1:
featurizer.add_distances(dist_indsB)
else:
raise ValueError("chain must be 0 or 1")
return featurizer
def ca_distances_skip5(chain=0):
''' Pairwise distances between every 5th carbon alpha '''
featurizer = coor.featurizer(pdb)
skip5 = featurizer.select(f'name == CA and chainid == {chain}')[::5]
featurizer.add_distances(skip5)
return featurizer
def backbone(chain=0):
''' Bachbone Phi and Psi torsion angles '''
featurizer = coor.featurizer(pdb)
featurizer.add_backbone_torsions(cossin=True, selstr=f'chainid == {chain}')
return featurizer
def backbone_chi1(chain=0):
''' Bachbone Phi and Psi as well as sidechain chi 1 torsion angles '''
featurizer = coor.featurizer(pdb)
featurizer.add_backbone_torsions(cossin=True, selstr=f'chainid == {chain}')
featurizer.add_sidechain_torsions(which='chi1', cossin=True, selstr=f'chainid == {chain}')
return featurizer
def backbone_chi1_2(chain=0):
''' Bachbone Phi and Psi as well as sidechain chi 1 and chi 2 torsion angles '''
featurizer = coor.featurizer(pdb)
featurizer.add_backbone_torsions(cossin=True, selstr=f'chainid == {chain}')
featurizer.add_sidechain_torsions(which=['chi1','chi2'], cossin=True, selstr=f'chainid == {chain}')
return featurizer
def sasa_per_res(chain=0):
''' Salvent acessable surfase area per residue '''
def calc_sasa(traj, chain=0, featurizer=None):
small_traj = traj.atom_slice(atom_indices=featurizer.select(f'chainid == {chain}'))
res = md.shrake_rupley(small_traj, probe_radius=0.14, n_sphere_points=960, mode='residue')
return res
featurizer = coor.featurizer(pdb)
featurizer.add_custom_func(calc_sasa, dim= int(featurizer.topology.n_residues/2), chain=0, featurizer=featurizer)
return featurizer
# Add new feature options to feature list and give a 5 character label in feture_short_label
feature_list = (filtered_ca_distances, filtered_ca_distances_larger, ca_distances_skip5, backbone, backbone_chi1, backbone_chi1_2, sasa_per_res)
feature_short_label = ('fil dis', 'm fdis', 'skip5', 'BB Tor', 'BB+C1', 'C1+C2', 'sasa')
chain_list = (0, 1)
label = []
label_file_name = 'feature_list_1.pickl'
stride = 1
# Featurize
for i,j in enumerate(feature_list):
for k in chain_list:
file_name = f"feature_data_02/{j.__name__}_chain_{k}.h5"
feat = j(chain=k)
if k == 1:
label.append({
'long_label': j.__name__,
'short_label': feature_short_label[i],
'path_a': f'feature_data_02/{j.__name__}_chain_0.h5',
'path_b': f'feature_data_02/{j.__name__}_chain_1.h5',
'num_features': feat.dimension()
})
if os.path.exists(file_name):
print(f'{j.__name__} exists')
continue
reader = coor.source(traj_list, features=feat, stride=stride)
reader.write_to_hdf5(file_name)
# Save as pandas dataframe
import pandas as pd
results = pd.DataFrame(label)
print(results.head())
results.to_pickle(label_file_name)
```
#### File: jgpattis/Desres-sars-cov-2-apo-mpro/pull_out_structure_08.py
```python
import numpy as np
import mdtraj as md
import pyemma.coordinates as coor
import pyemma.plots as pyemma_plots
import pyemma
from util.util import check_iter_of_sequences, KDTree
import matplotlib.pyplot as plt
def heatmap_label(tica_data_cat=None, cl=None, cl_list=None, ic2=1):
fig, ax = plt.subplots()
fig, ax, cb = pyemma_plots.plot_density(tica_data_cat[:,0], tica_data_cat[:,ic2], ax=ax, logscale=True)
ax.scatter(tica_data_cat[0,0], tica_data_cat[0,ic2], marker='x', s=200, c='k')
if cl_list == None:
cl_list = list(range(cl.n_clusters))
ax.scatter(cl.cluster_centers_[cl_list, 0],cl.cluster_centers_[cl_list, ic2], c='k')
for i in cl_list:
ax.annotate(f'{i}', (cl.cluster_centers_[i,0], cl.cluster_centers_[i,ic2]), fontsize=16, weight='bold', textcoords="offset points", xytext=(0,10), ha='center')
ax.set_xlabel(f'IC 1', fontsize=16)
ax.set_ylabel(f'IC {ic2 + 1}', fontsize=16)
cb['cbar'].set_label('Density', fontsize=16)
fig.tight_layout()
return fig, ax
def colorTIME_label(tica_data_cat=None, cl=None, cl_list=None, ic2=1, stride=2):
fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True, figsize=[12.8, 4.8])
time = list(range(100000))
time_us = [i/1000 for i in time]
mid = int(len(tica_data_cat)/2)
print(mid)
cbb = ax1.scatter(tica_data_cat[:mid:stride,0], tica_data_cat[:mid:stride,ic2], c=time_us[::stride])
ax1.scatter(tica_data_cat[0,0], tica_data_cat[0,ic2], marker='x', s=200, c='k')
if cl_list == None:
cl_list = list(range(cl.n_clusters))
ax1.scatter(cl.cluster_centers_[cl_list, 0],cl.cluster_centers_[cl_list, ic2], c='k')
for i in cl_list:
ax1.annotate(f'{i}', (cl.cluster_centers_[i,0], cl.cluster_centers_[i,ic2]), fontsize=16, weight='bold', textcoords="offset points", xytext=(0,10), ha='center')
ax1.set_xlabel(f'IC 1', fontsize=16)
ax1.set_ylabel(f'IC {ic2 + 1}', fontsize=16)
ax1.set_title('Chain A', fontsize=18)
cb = ax2.scatter(tica_data_cat[mid::stride,0], tica_data_cat[mid::stride,ic2], c=time_us[::stride])
ax2.scatter(tica_data_cat[0,0], tica_data_cat[0,ic2], marker='x', s=200, c='k')
ax2.scatter(cl.cluster_centers_[cl_list, 0],cl.cluster_centers_[cl_list, ic2], c='k')
for i in cl_list:
ax2.annotate(f'{i}', (cl.cluster_centers_[i,0], cl.cluster_centers_[i,ic2]), fontsize=16, weight='bold', textcoords="offset points", xytext=(0,10), ha='center')
ax2.set_xlabel(f'IC 1', fontsize=16)
ax2.set_title('Chain B', fontsize=18)
cb2 = fig.colorbar(cbb, ax=ax1)
cb2.set_label(f'Time (\u03BCs)', fontsize=16)
cb1 = fig.colorbar(cb, ax=ax2)
cb1.set_label(f'Time (\u03BCs)', fontsize=16)
fig.tight_layout()
return fig, (ax1, ax2)
sys = 'fdis'
n_clusters = 50
tica_data = coor.load('tica_data_05/fdis_tica_data.h5')
tica_data_cat = np.concatenate(tica_data)
cl = pyemma.load(f'{sys}_{n_clusters}_mini_cluster_object.h5')
#label_list = [12, 41, 46]
label_list = [19, 48]
pull_structure = True
fig1, ax1 = heatmap_label(tica_data_cat=tica_data_cat, ic2=2, cl=cl, cl_list=label_list)
fig1.savefig(f'structure_08/{sys}_heatmap_label_IC3_two.pdf')
fig2, ax2 = colorTIME_label(tica_data_cat=tica_data_cat, ic2=2, cl=cl, cl_list=label_list)
fig2.savefig(f'structure_08/{sys}_color_label_ic3_two.pdf')
if pull_structure == True:
traj_num = [f'{i:04d}' for i in range(100)]
traj_path = '../DESRES-Trajectory_sarscov2-10880334-no-water-no-ion-glueCA/sarscov2-10880334-no-water-no-ion-glueCA/sarscov2-10880334-no-water-no-ion-glueCA-'
traj_list = [ traj_path + str(i) + '.dcd' for i in traj_num]
pdb = '../DESRES_protease_chainid.pdb'
tree = KDTree(tica_data)
dist, index_list = tree.query(cl.cluster_centers_[label_list], k=1)
chain_check = []
for i,j in zip(index_list, label_list):
if i[0] >= 100:
chain_check.append('b')
chain_b = i[0] - 100
new_pdb = md.load_frame(traj_list[chain_b], index=i[1], top=pdb) # if using a stride multiply index by stride
new_pdb_chain = new_pdb.atom_slice(new_pdb.topology.select('chainid == 1'))
new_pdb_chain.save_pdb(f'structure_08/structure_ic3_chainB_index_{j}.pdb')
else:
chain_check.append('a')
new_pdb = md.load_frame(traj_list[i[0]], index=i[1], top=pdb) # if using a stride multiply index by stride
new_pdb_chain = new_pdb.atom_slice(new_pdb.topology.select('chainid == 0'))
new_pdb_chain.save_pdb(f'structure_08/structure_ic3_chainA_index_{j}.pdb')
if len(set(chain_check)) == 1:
print(f'All structures are from chain {chain_check[0]}')
else:
print('WARNING: structures are from different chains')
print('indexes are ', index_list)
print('distances from center are ', dist)
```
#### File: Desres-sars-cov-2-apo-mpro/util/plot_structure_util.py
```python
import numpy as np
import mdtraj as md
def plot_vmd_cylinder_from_inds(structure_file, inds, outname, residue=False, color='blue', width=3):
'''writes a tcl file which can draw cylinders in vmd
structure file should be pdb
inds should be 0 indexed atom indicies
or if residue = True 0 indexed residue indicie
color should be string of VMD color
instructuns:
1. open structure_file in vmd
2. open tk consol: extentions > tk consol
3. default color is dark blue for other colors type: set color 3
number is vmd color code
4. source outname.tcl'''
t = md.load(structure_file)
top = t.topology
first_frame = t.xyz
if outname.endswith('.tcl'):
f = open(outname,'w')
else:
f = open(outname + '.tcl','w')
start = '{'
end = '}'
bk = '\n'
f.write('set center {0 0 0}\n')
f.write(f'draw color {color} {bk}')
for i in range(len(inds)):
j = inds[i,0]
k = inds[i,1]
if residue == True:
l = top.select('resid ' + str(j) + ' and name CA')
m = top.select('resid ' + str(k) + ' and name CA')
n1 = first_frame[0,l,:] * 10
o1 = first_frame[0,m,:] * 10
n = n1[0]
o = o1[0]
else:
n = first_frame[0,j,:] * 10
o = first_frame[0,k,:] * 10
f.write(f'graphics top line {start} {n[0]:.4f} {n[1]:.4f} {n[2]:.4f} {end} {start} {o[0]:.4f} {o[1]:.4f} {o[2]:.4f} {end} width {width} style solid {bk}' )
f.close()
def plot_pymol_cylinder_from_inds(structure_file, inds, outname, residue=False):
'''writes a python file which can draw cylinders in pymol
structure file should be pdb
inds should be 0 indexed atom indicies
or if residue = True 0 indexed residue indicie
instructuns:
1. open pymol
2. go to command line
3. run outname.py'''
t = md.load(structure_file)
top = t.topology
first_frame = t.xyz
if outname.endswith('.py'):
f = open(outname,'w')
else:
f = open(outname + '.py','w')
f.write('from pymol import cmd\n')
f.write('from pymol.cgo import *\n')
f.write("cmd.load('" + str(structure_file) + "', 'prot')\n")
f.write("cmd.show('cartoon')\n")
f.write('obj=[]\n')
for i in range(len(inds)):
j = inds[i,0]
k = inds[i,1]
if residue == True:
l = top.select('resid ' + str(j) + ' and name CA')
m = top.select('resid ' + str(k) + ' and name CA')
n1 = first_frame[0,l,:] * 10
o1 = first_frame[0,m,:] * 10
n = n1[0]
o = o1[0]
else:
n = first_frame[0,j,:] * 10
o = first_frame[0,k,:] * 10
bk = '\n'
f.write(f'obj.extend([CYLINDER, {n[0]:.4f}, {n[1]:.4f}, {n[2]:.4f}, {o[0]:.4f}, {o[1]:.4f}, {o[2]:.4f}, 0.15, 0.3917, 0.3917, 0.9961, 0.3917, 0.3917, 0.9961, ]){bk}' )
f.write("cmd.load_cgo(obj, 'contacts')")
f.close()
```
|
{
"source": "jgperrin/net.jgp.books.spark.ch13",
"score": 3
}
|
#### File: python/lab100_json_shipment/jsonShipmentDisplayApp.py
```python
import os
from pyspark.sql import SparkSession
def get_absolute_file_path(path, filename):
# To get absolute path for a given filename
current_dir = os.path.dirname(__file__)
relative_path = "{}{}".format(path, filename)
absolute_file_path = os.path.join(current_dir, relative_path)
return absolute_file_path
def main(spark):
# The processing code.
filename = 'shipment.json'
path = '../../../../data/json/'
absolute_file_path = get_absolute_file_path(path, filename)
# Reads a JSON, stores it in a dataframe
df = spark.read.format("json") \
.option("multiline", True) \
.load(absolute_file_path)
# Shows at most 5 rows from the dataframe (there's only one anyway)
df.show(5, 16)
df.printSchema()
if __name__ == '__main__':
# Creates a session on a local master
spark = SparkSession.builder.appName("Display of shipment") \
.master("local[*]").getOrCreate()
# Comment this line to see full log
spark.sparkContext.setLogLevel('error')
main(spark)
spark.stop()
```
|
{
"source": "jgphilpott/blackbox",
"score": 2
}
|
#### File: blackbox/app/root.py
```python
from flask import Flask, render_template
from flask_socketio import SocketIO, emit
from sense_hat import SenseHat
from sensors import get_readings
from os import urandom
app = Flask("blackbox", template_folder="app", static_folder="app")
app.config["SECRET_KEY"] = urandom(42).hex()
app.jinja_env.auto_reload = True
sense_hat = SenseHat()
sense_hat.clear()
socketio = SocketIO(app)
@app.route("/")
def home():
return render_template("html/home.html")
@socketio.on("request_reading")
def request_reading():
try:
socketio.emit("new_reading", get_readings(sense_hat), broadcast=True)
except:
print("Reading Error!")
@socketio.on("new_comms")
def new_comms(message):
sense_hat.show_message(message)
if __name__ == "__main__":
socketio.run(app, host="0.0.0.0", port=5000, debug=True)
```
|
{
"source": "jgphilpott/dream_net",
"score": 3
}
|
#### File: jgphilpott/dream_net/dream_net.py
```python
import os
import numpy as np
from PIL import Image
import tensorflow as tf
import tensorflow_hub as hub
os.environ["TFHUB_MODEL_LOAD_FORMAT"] = "COMPRESSED"
hub_model_one = hub.load("https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/1")
hub_model_two = hub.load("https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2")
content_image = ""
style_image = ""
print("\nPlease provide the path to the desired content image (including the extension).\n")
while not os.path.isfile(content_image):
content_image = input()
if not os.path.isfile(content_image):
print("\nThe path you provided does not exist, please try again.\n")
print("\nPlease provide the path to the desired style image (including the extension).\n")
while not os.path.isfile(style_image):
style_image = input()
if not os.path.isfile(style_image):
print("\nThe path you provided does not exist, please try again.\n")
max_size = None
print("\nPlease provide the desired size of the output image (about 500px is recommended).\n")
while not type(max_size) == int:
try:
max_size = int(input())
except:
print("\nThe input you provided is not an integer, please try again.\n")
def load_image(path_to_image):
def resize_image(image):
shape = tf.cast(tf.shape(image)[:-1], tf.float32)
scale = max_size / max(shape)
new_shape = tf.cast(shape * scale, tf.int32)
return tf.image.resize(image, new_shape)[tf.newaxis, :]
image = tf.io.read_file(path_to_image)
image = tf.image.decode_image(image, channels=3)
image = tf.image.convert_image_dtype(image, tf.float32)
return resize_image(image)
stylized_image_one = hub_model_one(tf.constant(load_image(content_image)), tf.constant(load_image(style_image)))[0]
stylized_image_two = hub_model_two(tf.constant(load_image(content_image)), tf.constant(load_image(style_image)))[0]
Image.fromarray(np.array(stylized_image_one * 255, dtype=np.uint8)[0]).save("stylized_image_one.jpg")
Image.fromarray(np.array(stylized_image_two * 255, dtype=np.uint8)[0]).save("stylized_image_two.jpg")
print("\nDone!\n")
```
|
{
"source": "jgphilpott/iGraph",
"score": 2
}
|
#### File: mongo/backups/loader.py
```python
from back.mongo.backups.js import load_js
from back.mongo.backups.json import load_json
from back.mongo.backups.sass import load_sass
def load_data(path):
load_js(path)
load_json(path)
load_sass(path)
# Thread(target=load_json, args=(path,)).start()
```
|
{
"source": "jgphilpott/polyMaker",
"score": 2
}
|
#### File: mongo/data/base.py
```python
from mongo.client import mongo_client
client = mongo_client()
database = client.polymorph
def mongo_database():
return database
```
#### File: collect/images/mongo.py
```python
from mongo.data.collect.ions import mongo_collection
collection = mongo_collection("images")
def new_image(image):
return str(collection.insert_one(image).inserted_id)
def new_images(images):
return str(collection.insert_many(images).inserted_ids)
def find_image(query={}, filter={"_id": 0}):
return dict(collection.find_one(query, filter))
def find_images(query={}, filter={"_id": 0}, sort=[("id", 1)], limit=0):
collection.create_index(sort)
return list(collection.find(query, filter).sort(sort).limit(limit))
def update_image(image):
return collection.update_one({"id": image["id"]}, {"$set": image})
def update_images(images):
for image in images:
update_image(image)
def delete_image(image):
return collection.delete_one({"id": image["id"]})
def delete_images(images):
for image in images:
delete_image(image)
```
#### File: data/collect/ions.py
```python
from mongo.data.base import mongo_database
database = mongo_database()
def mongo_collection(collection):
return database[collection]
def mongo_collections():
return database.list_collection_names()
```
#### File: collect/models/mongo.py
```python
from mongo.data.collect.ions import mongo_collection
collection = mongo_collection("models")
def new_model(model):
return str(collection.insert_one(model).inserted_id)
def new_models(models):
return str(collection.insert_many(models).inserted_ids)
def find_model(query={}, filter={"_id": 0}):
return dict(collection.find_one(query, filter))
def find_models(query={}, filter={"_id": 0}, sort=[("id", 1)], limit=0):
collection.create_index(sort)
return list(collection.find(query, filter).sort(sort).limit(limit))
def update_model(model):
return collection.update_one({"id": model["id"]}, {"$set": model})
def update_models(models):
for model in models:
update_model(model)
def delete_model(model):
return collection.delete_one({"id": model["id"]})
def delete_models(models):
for model in models:
delete_image(model)
```
|
{
"source": "jgphilpott/polyplot",
"score": 3
}
|
#### File: backups/js/__init__.py
```python
from os import makedirs
from os.path import exists
from requests import get
from urllib.request import urlretrieve
def load_js(path):
path += "/libs/mjs/vendor"
if not exists(path):
makedirs(path)
urlretrieve("https://cdnjs.cloudflare.com/ajax/libs/js-sha256/0.9.0/sha256.min.js", path + "/sha256.js")
urlretrieve("https://cdnjs.cloudflare.com/ajax/libs/socket.io/2.4.0/socket.io.min.js", path + "/socket.js")
urlretrieve("https://cdnjs.cloudflare.com/ajax/libs/jquery/3.6.0/jquery.min.js", path + "/jQuery.js")
urlretrieve("https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.12.1/jquery-ui.min.js", path + "/jQueryUI.js")
urlretrieve("https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min.js", path + "/d3.js")
urlretrieve("https://cdnjs.cloudflare.com/ajax/libs/three.js/110/three.min.js", path + "/three.js")
urlretrieve("https://raw.githubusercontent.com/jeromeetienne/threex.domevents/1100828c40df668a82a97c811895918088cc607f/threex.domevents.js", path + "/threeX.js")
with open(path + "/math.js", "w") as file:
numeric = get("https://raw.githubusercontent.com/sloisel/numeric/656fa1254be540f428710738ca9c1539625777f1/src/numeric.js").content.decode("utf-8")
calc = get("https://gist.githubusercontent.com/jgphilpott/4276345a5b7c96fc010afa28cc5d38b6/raw/26c8d0b1217c0e6dbf771fc80fd22dd3a35cb963/calculus.js").content.decode("utf-8")
regr = get("https://gist.githubusercontent.com/jgphilpott/d38279e8fac9af31054e10b7363bf17e/raw/3684fcc69634970a75b1fa454b1a0f7b3c2c1a03/regression.js").content.decode("utf-8")
trig = get("https://gist.githubusercontent.com/jgphilpott/1378cc2cccde6d65c5fb2b6111b5a98f/raw/587408f905ba1da6fcc970cb095bdf129ffa308b/trigonometry.js").content.decode("utf-8")
angles = get("https://gist.githubusercontent.com/jgphilpott/092c0f3e1bcfa75f543e8485b9b23e7d/raw/813b2b7ac4c3cbcfc5caec5eec3600bba3bf5edc/angleConverter.js").content.decode("utf-8")
scaling = get("https://gist.githubusercontent.com/jgphilpott/6332dc7f5636db9ba455e1575407c496/raw/b72589532af0b7c63e321b15254acbb848248209/scaling.js").content.decode("utf-8")
roots = get("https://gist.githubusercontent.com/jgphilpott/e483b5fbe52a7233c292f35737e5a682/raw/d85ccaecf7d6b606809764b39f841a063c9a1fdc/roots.js").content.decode("utf-8")
math = numeric + "\n" + calc + "\n" + regr + "\n" + trig + "\n" + angles + "\n" + scaling + "\n" + roots
file.write(math)
with open(path + "/tools.js", "w") as file:
particleWeb = get("https://raw.githubusercontent.com/jgphilpott/particleWeb/bd00fb4eaf1b8abeb0f3f720f3f938f9d5b5c772/particleWeb.js").content.decode("utf-8")
camalize = get("https://gist.githubusercontent.com/jgphilpott/19e7a94cdf6d6d4cd868cc18e628026c/raw/2c5d68bb84f0d4e14478bcac359a77137f6a25f5/camalize.js").content.decode("utf-8")
rotation = get("https://gist.githubusercontent.com/jgphilpott/1bc17b82063f14fabb8f3e38825f6f10/raw/b5ddf5f386213f47ac4fd4b9f41bc116b37f29a3/rotation.js").content.decode("utf-8")
d3Sorting = get("https://gist.githubusercontent.com/jgphilpott/cf12c05f7c30844a4d7fa70ec9a7945a/raw/14fbbe992d10c07a467afb4dad2647356238ab3c/d3Sorting.js").content.decode("utf-8")
cookieFuncs = get("https://gist.githubusercontent.com/jgphilpott/b9ce64b9ef8b04c5ac58902b133b1a28/raw/8931a5cd26c48945e932a7399f853b593687f557/cookieFunctions.js").content.decode("utf-8")
validEmail = get("https://gist.githubusercontent.com/jgphilpott/a1ffedea1d1a70320b8075597df1943a/raw/29b8f25b2a439a117783523f209ba42ef5e9cf9d/validEmail.js").content.decode("utf-8")
localStorage = get("https://gist.githubusercontent.com/jgphilpott/e26b92eb41b64e9565032d5c4d3c2878/raw/593ae1721ca986210862da158db663f21dec42af/localStorage.js").content.decode("utf-8")
format = get("https://gist.githubusercontent.com/jgphilpott/787659ac4ea57a9971da58a76191079b/raw/d87c450947083ab134999408cec38fb70756593a/numberFormater.js").content.decode("utf-8")
subset = get("https://gist.githubusercontent.com/jgphilpott/a1367ca419ac2807ed4340d69356b7f1/raw/48ad3970a6a370853d18157142421ab02b1e2398/subset.js").content.decode("utf-8")
background = get("https://gist.githubusercontent.com/jgphilpott/bfae397ee7d483b49754291b29db87e0/raw/62abb7a1736f78c1c0800b68f9d45beeddb90020/panelBackground.js").content.decode("utf-8")
tools = particleWeb + "\n" + camalize + "\n" + rotation + "\n" + d3Sorting + "\n" + cookieFuncs + "\n" + validEmail + "\n" + localStorage + "\n" + format + "\n" + subset + "\n" + background
file.write(tools)
```
#### File: back/mongo/client.py
```python
from os import environ
from pymongo import MongoClient
def mongo_client():
return MongoClient(environ["MONGO_PORT_27017_TCP_ADDR"], 27017)
```
#### File: collect/maps/mongo.py
```python
from back.mongo.data.collect.ions import find_collection
from back.mongo.data.collect.maps.model import Map
def find_map(query={}, filter={"_id": 0}, detail="micro"):
collection = find_collection("maps_" + detail)
return dict(collection.find_one(query, filter))
def find_maps(query={}, filter={"_id": 0}, sort=[("properties.code", 1)], limit=0, detail="micro"):
collection = find_collection("maps_" + detail)
collection.create_index(sort)
return list(collection.find(query, filter).sort(sort).limit(limit))
```
#### File: collect/metas/model.py
```python
class Meta():
def __init__(self, meta):
self.code = meta["code"]
self.value = meta["value"]
```
#### File: collect/ports/mongo.py
```python
from back.mongo.data.collect.ions import find_collection
from back.mongo.data.collect.ports.model import Port
collection = find_collection("ports")
def find_port(query={}, filter={"_id": 0}):
return dict(collection.find_one(query, filter))
def find_ports(query={}, filter={"_id": 0}, sort=[("properties.flow", -1), ("properties.code", 1)], limit=0):
collection.create_index(sort)
return list(collection.find(query, filter).sort(sort).limit(limit))
```
#### File: collect/railroads/model.py
```python
class Railroad():
def __init__(self, railroad):
self.geometry = railroad["geometry"]
self.properties = railroad["properties"]
self.type = railroad["type"]
```
#### File: sked/tasks/indicators.py
```python
from time import sleep
from back.mongo.data.collect.indicators.mongo import Indicator, find_indicator, find_indicators, update_indicator
def update_indicators():
for indicator in find_indicators({}, {"_id": 0, "code": 1}):
update_indicator(Indicator(find_indicator({"code": indicator["code"]})).update().__dict__)
sleep(60)
```
#### File: socket/airports/__init__.py
```python
from flask_socketio import emit
from back.mongo.data.collect.airports.mongo import find_airport, find_airports
def connect_airports(app):
@app.on("get_airport")
def get_airport(query={}, filter={"_id": 0}):
emit("new_airport", find_airport(query, filter))
@app.on("get_airports")
def get_airports(query={}, filter={"_id": 0}, sort=[("properties.flow", -1), ("properties.code", 1)], limit=0):
emit("new_airports", find_airports(query, filter, [tuple(item) for item in sort], limit))
```
#### File: socket/graticules/__init__.py
```python
from flask_socketio import emit
from back.mongo.data.collect.graticules.mongo import find_graticule, find_graticules
def connect_graticules(app):
@app.on("get_graticule")
def get_graticule(query={}, filter={"_id": 0}):
emit("new_graticule", find_graticule(query, filter))
@app.on("get_graticules")
def get_graticules(query={}, filter={"_id": 0}, sort=[("step", -1)], limit=0):
emit("new_graticules", find_graticules(query, filter, [tuple(item) for item in sort], limit))
```
#### File: socket/maps/__init__.py
```python
from flask_socketio import emit
from back.mongo.data.collect.maps.mongo import find_map, find_maps
def connect_maps(app):
@app.on("get_map")
def get_map(query={}, filter={"_id": 0}, detail="micro"):
emit("new_map", find_map(query, filter, detail))
@app.on("get_maps")
def get_maps(query={}, filter={"_id": 0}, sort=[("properties.code", 1)], limit=0, detail="micro"):
emit("new_maps", find_maps(query, filter, [tuple(item) for item in sort], limit, detail))
```
#### File: socket/ports/__init__.py
```python
from flask_socketio import emit
from back.mongo.data.collect.ports.mongo import find_port, find_ports
def connect_ports(app):
@app.on("get_port")
def get_port(query={}, filter={"_id": 0}):
emit("new_port", find_port(query, filter))
@app.on("get_ports")
def get_ports(query={}, filter={"_id": 0}, sort=[("properties.flow", -1), ("properties.code", 1)], limit=0):
emit("new_ports", find_ports(query, filter, [tuple(item) for item in sort], limit))
```
#### File: socket/rivers/__init__.py
```python
from flask_socketio import emit
from back.mongo.data.collect.rivers.mongo import find_river, find_rivers
def connect_rivers(app):
@app.on("get_river")
def get_river(query={}, filter={"_id": 0}, detail="micro"):
emit("new_river", find_river(query, filter, detail))
@app.on("get_rivers")
def get_rivers(query={}, filter={"_id": 0}, sort=[("properties.id", 1)], limit=0, detail="micro"):
emit("new_rivers", find_rivers(query, filter, [tuple(item) for item in sort], limit, detail))
```
#### File: errors/_500_/route.py
```python
from flask import render_template, request
from back.mongo.data.collect.clients import valid_client
def register_500_error_route(app):
@app.errorhandler(500)
def internal_server_error(error):
data = {"plot": {"type": "500"}, "code": 500, "message": "Internal Server Error"}
if "id" in request.cookies: data["client"] = valid_client(request.cookies.get("id"))
return render_template("tree/errors/_500_/page.html", data=data)
```
#### File: api/graticules/route.py
```python
from ast import literal_eval
from flask import jsonify, request
from back.mongo.data.collect.graticules.mongo import find_graticules
def register_api_graticules_route(app):
@app.route("/api/graticules")
def api_graticules():
query = literal_eval(request.args.get("query")) if "query" in request.args else {"step": {"$in": [10, 20, 30]}}
filter = literal_eval(request.args.get("filter")) if "filter" in request.args else {}
sort = literal_eval(request.args.get("sort")) if "sort" in request.args else [("step", -1)]
limit = literal_eval(request.args.get("limit")) if "limit" in request.args else 0
filter["_id"] = 0
data = find_graticules(query, filter, sort, limit)
return jsonify(data)
```
#### File: api/maps/route.py
```python
from ast import literal_eval
from flask import jsonify, request
from back.mongo.data.collect.maps.mongo import find_maps
def register_api_maps_route(app):
@app.route("/api/maps")
def api_maps():
query = literal_eval(request.args.get("query")) if "query" in request.args else {}
filter = literal_eval(request.args.get("filter")) if "filter" in request.args else {}
sort = literal_eval(request.args.get("sort")) if "sort" in request.args else [("properties.name", 1)]
limit = literal_eval(request.args.get("limit")) if "limit" in request.args else 0
detail = literal_eval(request.args.get("detail")) if "detail" in request.args else "micro"
filter["_id"] = 0
data = find_maps(query, filter, sort, limit, detail)
return jsonify(data)
```
#### File: rivers/river/route.py
```python
from ast import literal_eval
from flask import jsonify, request
from back.mongo.data.collect.rivers.mongo import find_river
def register_api_river_route(app):
@app.route("/api/river")
def api_river():
query = literal_eval(request.args.get("query")) if "query" in request.args else {}
filter = literal_eval(request.args.get("filter")) if "filter" in request.args else {}
detail = literal_eval(request.args.get("detail")) if "detail" in request.args else "micro"
filter["_id"] = 0
data = find_river(query, filter, detail)
return jsonify(data)
```
#### File: home/indicators/route.py
```python
from flask import request, make_response, render_template
from back.mongo.data.collect.clients import valid_client
from back.mongo.data.collect.indicators import find_indicators
def register_indicators_route(app):
@app.route("/indicators")
def indicators():
data = {"plot": {"plots": find_indicators({"countries": {"$exists": True, "$ne": []}, "completeness": {"$gt": 0}}, {"_id": 0, "code": 1, "name": 1, "categories": 1, "completeness": 1, "size": 1}, [("name", 1)]), "type": "Indicators"}}
if "id" in request.cookies: data["client"] = valid_client(request.cookies.get("id"))
r_code = request.cookies.get("r") if "r" in request.cookies else "SP.POP.TOTL"
x_code = request.cookies.get("x") if "x" in request.cookies else "SP.DYN.LE00.IN"
y_code = request.cookies.get("y") if "y" in request.cookies else "SP.DYN.TFRT.IN"
z_code = request.cookies.get("z") if "z" in request.cookies else "NY.GDP.PCAP.KD.ZG"
response = make_response(render_template("tree/home/indicators/page.html", data=data))
response.set_cookie("r", r_code)
response.set_cookie("x", x_code)
response.set_cookie("y", y_code)
response.set_cookie("z", z_code)
return response
```
#### File: home/poly3/route.py
```python
from flask import request, make_response, render_template
from back.mongo.data.collect.clients import valid_client
from back.mongo.data.collect.countries import find_countries
def register_poly3_route(app):
@app.route("/poly3")
def poly3():
data = {"plot": {"title": "World Bank Development Indicators", "type": "Poly3"}}
if "id" in request.cookies: data["client"] = valid_client(request.cookies.get("id"))
r_code = request.args.get("r") if "r" in request.args else request.cookies.get("r") if "r" in request.cookies else "SP.POP.TOTL"
x_code = request.args.get("x") if "x" in request.args else request.cookies.get("x") if "x" in request.cookies else "SP.DYN.LE00.IN"
y_code = request.args.get("y") if "y" in request.args else request.cookies.get("y") if "y" in request.cookies else "SP.DYN.TFRT.IN"
z_code = request.args.get("z") if "z" in request.args else request.cookies.get("z") if "z" in request.cookies else "NY.GDP.PCAP.KD.ZG"
r_dash_code = r_code.replace(".", "-")
x_dash_code = x_code.replace(".", "-")
y_dash_code = y_code.replace(".", "-")
z_dash_code = z_code.replace(".", "-")
countries = find_countries({}, {"_id": 0, "code": 1, "name": 1, "formal_name": 1, "region": 1, "indicators." + r_dash_code: 1, "indicators." + x_dash_code: 1, "indicators." + y_dash_code: 1, "indicators." + z_dash_code: 1, "centroid": 1, "factbook": 1, "wiki": 1, "min_year": 1, "max_year": 1})
min_cap = request.args.get("minCap") if "minCap" in request.args else request.cookies.get("minCap") if "minCap" in request.cookies else countries[0]["min_year"]
year = request.args.get("year") if "year" in request.args else request.cookies.get("year") if "year" in request.cookies else round((countries[0]["min_year"] + countries[0]["max_year"]) / 2)
max_cap = request.args.get("maxCap") if "maxCap" in request.args else request.cookies.get("maxCap") if "maxCap" in request.cookies else countries[0]["max_year"]
data["plot"]["r"] = {"code": r_code, "name": countries[0]["indicators"][r_dash_code]["name"], "categories": countries[0]["indicators"][r_dash_code]["categories"]}
data["plot"]["x"] = {"code": x_code, "name": countries[0]["indicators"][x_dash_code]["name"], "categories": countries[0]["indicators"][x_dash_code]["categories"]}
data["plot"]["y"] = {"code": y_code, "name": countries[0]["indicators"][y_dash_code]["name"], "categories": countries[0]["indicators"][y_dash_code]["categories"]}
data["plot"]["z"] = {"code": z_code, "name": countries[0]["indicators"][z_dash_code]["name"], "categories": countries[0]["indicators"][z_dash_code]["categories"]}
data["plot"]["t"] = {"minCap": int(min_cap), "year": int(year), "maxCap": int(max_cap)}
for country in countries:
country["r"] = country["indicators"][r_dash_code]["history"]
country["x"] = country["indicators"][x_dash_code]["history"]
country["y"] = country["indicators"][y_dash_code]["history"]
country["z"] = country["indicators"][z_dash_code]["history"]
del country["indicators"]
data["plot"]["plots"] = countries
response = make_response(render_template("tree/home/poly3/page.html", data=data))
response.set_cookie("r", r_code)
response.set_cookie("x", x_code)
response.set_cookie("y", y_code)
response.set_cookie("z", z_code)
response.set_cookie("minCap", str(min_cap))
response.set_cookie("year", str(year))
response.set_cookie("maxCap", str(max_cap))
return response
```
|
{
"source": "jgphilpott/sensors",
"score": 3
}
|
#### File: Biometrics/Pulse/MCP3008.py
```python
from spidev import SpiDev
class MCP3008:
def __init__(self, bus = 0, device = 0):
self.bus, self.device = bus, device
self.spi = SpiDev()
self.open()
self.spi.max_speed_hz = 1000000 # 1MHz
def open(self):
self.spi.open(self.bus, self.device)
self.spi.max_speed_hz = 1000000 # 1MHz
def read(self, channel = 0):
cmd1 = 4 | 2 | (( channel & 4) >> 2)
cmd2 = (channel & 3) << 6
adc = self.spi.xfer2([cmd1, cmd2, 0])
data = ((adc[1] & 15) << 8) + adc[2]
return data
def close(self):
self.spi.close()
```
#### File: Lasers/SunFounder_Laser/laser.py
```python
import sys
import time
import RPi.GPIO as GPIO
LedPin = 11
GPIO.setmode(GPIO.BOARD)
GPIO.setup(LedPin, GPIO.OUT)
GPIO.output(LedPin, GPIO.HIGH)
on_time = int(sys.argv[1]) / 1000 if len(sys.argv) > 1 else 1
off_time = int(sys.argv[2]) / 1000 if len(sys.argv) > 2 else 1
def loop():
while True:
GPIO.output(LedPin, GPIO.LOW) # ON
time.sleep(on_time)
GPIO.output(LedPin, GPIO.HIGH) # OFF
time.sleep(off_time)
def stop():
GPIO.output(LedPin, GPIO.HIGH)
GPIO.cleanup()
try:
loop()
except KeyboardInterrupt:
stop()
```
#### File: Motion/PIR_Motion_Sensor/move.py
```python
import time
import RPi.GPIO as GPIO
SENSOR_PIN = 23
GPIO.setmode(GPIO.BCM)
GPIO.setup(SENSOR_PIN, GPIO.IN)
def action(event):
print("There was a movement!")
try:
GPIO.add_event_detect(SENSOR_PIN, GPIO.RISING, callback=action)
while True:
time.sleep(100)
except KeyboardInterrupt:
print("Keyboard Interrupt!")
GPIO.cleanup()
```
|
{
"source": "jgpstuart/pica-bot",
"score": 3
}
|
#### File: jgpstuart/pica-bot/bot.py
```python
import os
import discord
from discord import message
from dotenv import dotenv_values
from discord.ext import commands
import sqlite3
import re
import random
import ssl
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
###############################################################################
# SQL queries
###############################################################################
""" check to see if user exists in the database """
def check_user(userID):
c.execute("SELECT * FROM users WHERE userid=?", (userID,))
return c.fetchone()
""" add a user, code, and email address to the database """
def add_user(userID):
c.execute("INSERT INTO users VALUES (?, ?, ?, ?)", (userID, None, None, 0))
conn.commit()
""" remove the user from the database if they leave the server """
def remove_user(userID):
c.execute("DELETE FROM users WHERE userID=?", (userID,))
""" update email address """
def update_email(userID, email):
c.execute("UPDATE users SET email=? WHERE userid=?", (email, userID))
conn.commit()
""" update code """
def update_code(userID, code):
c.execute("UPDATE users SET code=? WHERE userid=?", (code, userID))
conn.commit()
""" verify user """
def verify_user(userID):
c.execute("UPDATE users SET verified=1 WHERE userid=?", (userID,))
conn.commit()
###############################################################################
# Various functions
###############################################################################
""" check if an email address...is an email address """
def email_check(email):
regex = "(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|\"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*\")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])"
if re.search(regex,email):
return True
else:
return False
""" check if the user has been verified yet """
def check_if_verified(userID):
record = check_user(userID)
verified = record[3]
if verified == 0:
return False
elif verified == 1:
return True
""" check if user has been sent a verification code """
def check_if_email(userID):
record = check_user(userID)
email = record[1]
if email == None:
return False
else:
return True
""" get the absolute path for a file """
def abs_path(path):
return os.path.abspath(path)
###############################################################################
# Get the Discord token and Gmail password from the .env file so pica can login
###############################################################################
token_values = dotenv_values(abs_path(".env"))
TOKEN = token_values['PICA_TOKEN']
GMAIL_PASSWORD = token_values['GMAIL_PASSWORD']
GUILD_ID = token_values['GUILD_ID']
###############################################################################
# Setup the users database
###############################################################################
conn = sqlite3.connect(abs_path("magpies.db"))
c = conn.cursor()
c.execute("""CREATE TABLE IF NOT EXISTS users(
userid INT,
email TEXT,
code INT,
verified INT);
""")
###############################################################################
# Log in the bot and start it up!
###############################################################################
intents = discord.Intents.default()
intents.members = True
bot = commands.Bot(command_prefix="P;!", intents=intents, help_command=None)
###############################################################################
# Event handler for connecting to discord
###############################################################################
@bot.event
async def on_ready():
print('pica has connected to Discord')
await bot.change_presence(activity=discord.Game("https://github.com/jgpstuart/pica-bot"))
###############################################################################
# Print message when they join the server
###############################################################################
@bot.event
async def on_member_join(member):
add_user(member.id)
await member.send("Welcome to the Manzara's Magpies CTF team Discord server!\n\n" \
"To confirm you are a member of the team, please reply to this " \
"message with your @magpie.com email address and we will send " \
"you a email with your verification code.")
###############################################################################
# When someone leaves the server
###############################################################################
@bot.event
async def on_member_remove(member):
# remove the user from the database
remove_user(member.id)
###############################################################################
# Main message loop
###############################################################################
@bot.event
async def on_message(message):
# check to see if the bot is talking to itself!
if message.author == bot.user:
return
# strip the message of whitespace
message_content = message.content.strip()
# Only reply to direct messages
if isinstance(message.channel, discord.DMChannel):
# the message is an email address
if email_check(message_content):
# if they have not been verified
if not check_if_verified(message.author.id):
# If it is a valid @magpie.com email address:
if "@magpie.com" in message_content:
# generate verification code
verification_code = random.randint(100000, 999999)
# add their email address and verification code to the database
update_email(message.author.id, message_content)
update_code(message.author.id, verification_code)
# setup the email message to send them
port = 465
email_message = MIMEMultipart("alternative")
email_message["Subject"] = "Manzara's Magpies Verification Code"
email_message["From"] = "<EMAIL>"
email_message["To"] = message_content
text = str(verification_code)
compiled = MIMEText(text, "plain")
email_message.attach(compiled)
# Create a secure SSL context for the gmail account to send the email
context = ssl.create_default_context()
# send the verification email from the gmail account
with smtplib.SMTP_SSL("smtp.gmail.com", port, context=context) as server:
server.login("<EMAIL>", GMAIL_PASSWORD)
server.sendmail("<EMAIL>", message_content, email_message.as_string())
await message.channel.send("A verification code has been emailed to you. **Please reply to me with the " \
"verficiation code to be added to the Manzara's Magpies server.** If you haven't received it, check your spam folder.")
else:
await message.channel.send("That is not a valid email address. If you do not yet have a valid @magpie.com " \
"email address, please contact The University of Calgary Bureaucracy Club.")
else:
# If they're already verified, tell them to smarten up!
await message.channel.send("You have already been verified. Cut it out!")
# check if this is a verification code
elif (len(message_content) == 6) and message_content.isdigit():
# check if they've submitted a valid @magpie.com email address yet
if not check_if_email(message.author.id):
await message.channel.send("You have not submitted a valid @magpie.com email address yet. " \
"You must submit a valid email address before you can submit a " \
"verification code.")
# check if they're verified, and if not check their verification code
elif not check_if_verified(message.author.id):
# get their verification code from the database
user_records = check_user(message.author.id)
verification_code = user_records[2]
# check the verification code in the database against the message they sent
if verification_code == int(message_content):
# assign them the magpie role
server = bot.get_guild(int(GUILD_ID))
role = discord.utils.get(server.roles, name="magpie")
member = server.get_member(message.author.id)
await member.add_roles(role)
# announce that they're in to the server!
channel = discord.utils.get(server.text_channels, name='general')
if channel is not None:
new_user = message.author
await channel.send(f"A new magpie has landed! Everyone welcome {new_user}!!!\n" \
"https://c.tenor.com/EdyX5M8Vi7wAAAAC/magpie.gif")
# add them as verified in the database
verify_user(message.author.id)
await message.channel.send("Verification code match! Welcome to Manzara's Magpies!")
else:
await message.channel.send("Verification code does not match.")
else:
await message.channel.send("You have already been verified. Cut it out!")
await bot.process_commands(message)
###############################################################################
# Help message for interacting with pica
###############################################################################
@bot.command(name="help")
async def help_command(ctx):
# Display help message
response = "Hello, I am pica.\n\nThese are my user commands, remember to prefix them with \"P;!\" :\n" \
" help: Display this message\n" \
" addrole: Give yourself a specialization role\n" \
embed = discord.Embed(description=response, color=0x4c4fb1)
await ctx.send(embed=embed)
###############################################################################
# Command to give a user a requested specialization role. The desired role
# should be listed in the command
###############################################################################
@bot.command(name="addrole")
async def addrole(ctx, *, role=''):
channel = ctx.message.channel
if not isinstance(channel, discord.DMChannel):
try:
if "cryptography" in role.lower():
await ctx.author.add_roles(discord.utils.get(ctx.author.guild.roles, name="Cryptography"))
await ctx.send("You have been given the Cryptography role.")
elif "forensics" in role.lower():
await ctx.author.add_roles(discord.utils.get(ctx.author.guild.roles, name="Forensics"))
await ctx.send("You have been given the Forensics role.")
elif "binary exploitation" in role.lower():
await ctx.author.add_roles(discord.utils.get(ctx.author.guild.roles, name="Binary Exploitation"))
await ctx.send("You have been given the Binary Exploitation role.")
elif "web exploitation" in role.lower():
await ctx.author.add_roles(discord.utils.get(ctx.author.guild.roles, name="Web Exploitation"))
await ctx.send("You have been given the Web Exploitation role.")
elif "reverse engineering" in role.lower():
await ctx.author.add_roles(discord.utils.get(ctx.author.guild.roles, name="Reverse Engineering"))
await ctx.send("You have been given the Reverse Engineering role.")
elif "networks" in role.lower():
await ctx.author.add_roles(discord.utils.get(ctx.author.guild.roles, name="Networks"))
await ctx.send("You have been given the Networks role.")
elif "osint" in role.lower():
await ctx.author.add_roles(discord.utils.get(ctx.author.guild.roles, name="OSINT"))
await ctx.send("You have been given the OSINT role.")
else:
response = "Please use this command followed immediately by a desired role selected from cryptography, forensics, binary exploitation, web exploitation," \
"osint, or reverse engineering.\nExample usage: P;!addrole binary exploitation"
await ctx.send(response)
except:
await ctx.send("There was an error using this command. Make sure you are using it in an appropriate server.")
else:
await ctx.send("I cannot add roles from direct messages. Please run this command in the server!")
bot.run(TOKEN)
```
|
{
"source": "jgraber/PythonFriday",
"score": 4
}
|
#### File: PythonFriday/JSON/json_example.py
```python
import json
def example():
# You collect and combine data in an
# arbitrarily nested data structure:
data = [
{
'name': "<NAME>",
'phone': "(154) 221-8558",
'zipCode': "900185",
'country': "South Korea",
'options': ['a','b','c'],
'total': "$74.79"
},
{
'name': "<NAME>",
'phone': "1-762-301-2264",
'zipCode': "25566",
'country': "Russian Federation",
'options': {
'a': 'full',
'f': 'partial',
'c': {'k1': 1,
'k2': 3}
},
'total': "$21.78"
}
]
return data
def save():
data = example()
data_json = json.dumps(data, indent=4)
with open("data.json", "w") as f:
f.write(data_json)
def load():
with open("data.json", "r") as f:
data_json = f.read()
data = json.loads(data_json)
print(data)
if __name__ == '__main__':
save()
load()
```
#### File: SQLAlchemy/Core/sqlalchemy_core_crud.py
```python
from sqlalchemy import MetaData, Table, Column, String, Integer, Text, DateTime, Boolean, create_engine, select, insert, update, delete
metadata = MetaData()
employees = Table('Employee', metadata,
Column('Id', Integer(), primary_key=True),
Column('LastName', String(8000)),
Column('FirstName', String(8000)),
Column('BirthDate', String(8000))
)
def show_metadata():
for t in metadata.sorted_tables:
print(f"Table {t.name}:")
for c in t.columns:
print(f"{c} ({c.type})")
def do_insert():
stmt = insert(employees).values(
LastName='Collins',
FirstName='Arnold',
BirthDate='2000-01-31')
new_id = 0
connection_string = "sqlite:///Northwind_small.sqlite"
engine = create_engine(connection_string, echo=False)
with engine.begin() as con:
result = con.execute(stmt)
new_id = result.inserted_primary_key['Id']
print(f"New Id: {new_id}")
return new_id
def select_by_id(id):
stmt = select(employees).where(employees.c.Id == id)
connection_string = "sqlite:///Northwind_small.sqlite"
engine = create_engine(connection_string, echo=False)
with engine.begin() as con:
result = con.execute(stmt).first()
if result:
print(result)
else:
print(f"no rows found with Id == {id}")
def do_update(id):
stmt = update(employees).values(
FirstName="Michael"
).where(employees.c.Id == id)
connection_string = "sqlite:///Northwind_small.sqlite"
engine = create_engine(connection_string, echo=False)
with engine.begin() as con:
con.execute(stmt)
def do_delete(id):
stmt = delete(employees).where(employees.c.Id == id)
connection_string = "sqlite:///Northwind_small.sqlite"
engine = create_engine(connection_string, echo=False)
with engine.begin() as con:
con.execute(stmt)
def statement_infos():
stmt = select(employees.c.LastName, employees.c.FirstName).where(employees.c.Id == 30)
print(f"statement with placeholder: \n{str(stmt)}")
print(f"\nparams: \n{str(stmt.compile().params)}")
if __name__ == '__main__':
print("---- show_metadata() ----")
show_metadata()
print("---- do_insert() ----")
id = do_insert()
print("---- select_by_id() ----")
select_by_id(id)
print("---- do_update() ----")
do_update(id)
select_by_id(id)
print("---- do_delete() ----")
do_delete(id)
select_by_id(id)
print("---- end ----")
# https://overiq.com/sqlalchemy-101/crud-using-sqlalchemy-core/
```
#### File: SQLAlchemy/ORM/app_with_db.py
```python
import os
from datetime import datetime
import data.db_session as db_session
from data.employee import Employee
def setup_db():
db_file = os.path.join(
os.path.dirname(__file__),
'db',
'Northwind_small.sqlite')
db_session.global_init(db_file)
def add_employee():
employee = Employee()
employee.last_name = "King"
employee.first_name = "Robert"
employee.birth_date = '1990-05-29'
print(employee)
session = db_session.factory()
session.add(employee)
session.commit()
print(employee)
return employee.id
if __name__ == '__main__':
print("--- setup_db() ---")
setup_db()
print("--- add_employee() ---")
id = add_employee()
```
#### File: ORM/data/book.py
```python
import sqlalchemy as sa
from sqlalchemy.sql.schema import UniqueConstraint
from data.modelbase import ModelBase
class Book(ModelBase):
__tablename__ = 'Book'
id = sa.Column('Id', sa.Integer, primary_key=True, autoincrement=True)
title = sa.Column('Title', sa.String, nullable=False)
#isbn = sa.Column('ISBN', sa.String(13), nullable=False, index=True, unique=True)
isbn = sa.Column('ISBN', sa.String(13), nullable=False)
pages = sa.Column('Pages', sa.Integer)
published_by = sa.Column('PublishedBy', sa.Integer, sa.ForeignKey('Publisher.Id'), nullable=False)
publisher = sa.orm.relation("Publisher", back_populates="books")
details = sa.orm.relation("BookDetails", uselist=False, back_populates="book")
authors = sa.orm.relation("Author", secondary='BookAuthor', back_populates="books")
def __repr__(self):
return f'<Book {self.id} ({self.title} {self.isbn}) {self.pages}>'
```
#### File: ORM/data/publisher.py
```python
import sqlalchemy as sa
from data.modelbase import ModelBase
class Publisher(ModelBase):
__tablename__ = 'Publisher'
id = sa.Column('Id', sa.Integer, primary_key=True, autoincrement=True)
name = sa.Column('Name', sa.String, nullable=False, index=True)
books = sa.orm.relationship("Book", back_populates="publisher")
def __repr__(self):
return f'<Publisher {self.id} ({self.name})>'
```
|
{
"source": "jgraber/Python_Scripts",
"score": 3
}
|
#### File: Python_Scripts/GPSInfo/stuff2.py
```python
import click
@click.group()
@click.version_option(version='1.0.0')
def greet():
pass
@greet.command()
@click.argument('name') # add the name argument
def hello(**kwargs):
print('Hello, {0}!'.format(kwargs['name']))
@greet.command()
@click.argument('name')
def goodbye(**kwargs):
print('Goodbye, {0}!'.format(kwargs['name']))
if __name__ == '__main__':
greet()
```
#### File: Python_Scripts/GPSInfo/stuff.py
```python
import click
@click.command(context_settings={"ignore_unknown_options": True})
@click.argument('files', nargs=-1, type=click.Path(exists=True, file_okay=True, dir_okay=True, resolve_path=True))
def hello(files):
"""Print all FILES file names."""
for filename in files:
click.echo(filename)
@click.version_option(version='2.0.0', prog_name='abc')
@click.group()
def cli():
pass
if __name__ == '__main__':
hello()
```
|
{
"source": "jgradio64/birthday_bot",
"score": 3
}
|
#### File: jgradio64/birthday_bot/bot_functions.py
```python
from my_imports import *
import re
# Bot help function, sends a short explanation of how to use the bot to the server
async def bot_help(msg):
await msg.channel.send('To check if any one has a birthday today just type "$check_bdays"\n')
await msg.channel.send('To add your birthday use "$add_my_bday: Month-Day".\n'
'Use digits for the month and day.\n'
'\tExample: "$add_my_bday: 11-4"\n')
await msg.channel.send('To remove your birthday from the server use "$remove_my_bday"\n'
'\tExample: "$remove_my_bday"\n')
# Function to check if there are birthdays on the date that this is called.
async def check_birthdays(msg):
# Set values to check for in the database
my_query = {"guild_id": msg.guild.id, "month": today.month, "day": today.day}
# check the number of birthdays recorded in the database
number_of_birthdays = collection.count_documents(my_query)
# if there are no birthdays
if number_of_birthdays == 0:
await msg.channel.send('There are no birthdays today. :weary:')
# If birthdays were found
if number_of_birthdays != 0:
# Get the birthdays from the database.
list_of_birthdays = collection.find(my_query)
# Alert the channel to how many borthdays are occuring that day.
if number_of_birthdays == 1:
await msg.channel.send(f'There is {number_of_birthdays} birthday today!')
else:
await msg.channel.send(f'There are {number_of_birthdays} birthdays today!')
# Loop through the birthdays wishing happy birthday to everyone!
for birthday in list_of_birthdays:
await msg.channel.send("Happy birthday " + birthday['user'] + "!!!\t")
# Function to handle adding a user's birthday to the database
async def add_birthday(msg):
my_query = {"guild_id": msg.guild.id, "_id": msg.author.id}
# if there is not a preexisting result in the database by that user
if collection.count_documents(my_query) == 0:
date = re.findall("([0-9]+)", msg.content)
# get month value, convert from str to int
birthday_month = int(date[0])
# get date value, convert from str to int
birthday_date = int(date[1])
# If the month is an acceptable month
if check_month(birthday_month):
# If the date is an acceptable value based upon the month
if check_date(birthday_month, birthday_date):
# Create Schema for the users birthday
birthday = create_birthday(msg.author.id, msg.guild.id, msg.author.name, birthday_month, birthday_date)
# Alert the user that their birthday has been added.
await msg.channel.send(f'Adding your birthday, {msg.author.name}!')
# Insert the data into the Database
collection.insert_one(birthday)
await msg.channel.send(f'I\'ll send you a birthday wish on {birthday_month}-{birthday_date}!')
else:
# If the date is not an acceptable value based upon the month
await msg.channel.send('Please enter a DATE within the MONTH that you chose.')
else:
# If the month the user entered is not an acceptable month
await msg.channel.send('Please enter a number corresponding to a month from 1 to 12.')
else:
# if there is a preexisting result in the database by that user.
await msg.channel.send("Your birthday is already recorded.")
# Function to remove a user's birthday
async def remove_birthday(msg):
my_query = {"guild_id": msg.guild.id, "_id": msg.author.id}
# if there is not a preexisting result in the database by that user
if collection.count_documents(my_query) == 0:
await msg.channel.send(f'No birthday was previously recorded for {msg.author.name}.')
# if there is a preexisting result in the database by that user.
if collection.count_documents(my_query) > 0:
# Find in the database and delete
collection.find_one_and_delete({"_id": msg.author.id})
await msg.channel.send(f'{msg.author.name}\'s birthday has been removed from the database.')
# Checks to see if the month is within the range of possible selections
def check_month(month):
month_range = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
return month in month_range
# Build a birthday schema based upon input.
def create_birthday(user_id, guild_id, user_name, month, date):
return {
"_id": user_id,
"guild_id": guild_id,
"user": user_name,
"month": month,
"day": date
}
# Check if the date giving is in the date range, depending on the month.
def check_date(month, day):
# January, March, May, July, August, October, December
if month in [1, 3, 5, 7, 8, 10, 12]:
if day in range(1, 31):
return True
else:
return False
# April, June, September, November
elif month in [4, 6, 9, 11]:
if day in range(1, 30):
return True
else:
return False
# February
elif month == 2:
if day in range(1, 28):
return True
else:
return False
# Function to update a user's birthday.
async def update_birthday(msg):
my_query = {"guild_id": msg.guild.id, "_id": msg.author.id}
# if there is not a preexisting result in the database by that user
if collection.count_documents(my_query) == 0:
await msg.channel.send(f'No birthday was previously recorded for {msg.author.name}.')
# if there is a preexisting result in the database by that user.
elif collection.count_documents(my_query) > 0:
date = re.findall("([0-9]+)", msg.content)
# get month value, convert from str to int
birthday_month = int(date[0])
# get date value, convert from str to int
birthday_date = int(date[1])
# If the month is an acceptable month
if check_month(birthday_month):
# If the date is an acceptable value based upon the month
if check_date(birthday_month, birthday_date):
await msg.channel.send(f'Updating your birthday, {msg.author.name}!')
# Insert the data into the Database
collection.find_one_and_update(my_query, {"$set": {"month": birthday_month, "day": birthday_date}})
await msg.channel.send(f'I will now send you a birthday wish on {birthday_month}-{birthday_date}!')
else:
# If the date is not an acceptable value based upon the month
await msg.channel.send('Please enter a DATE within the MONTH that you chose.')
else:
# If the month the user entered is not an acceptable month
await msg.channel.send('Please enter a number corresponding to a month from 1 to 12.')
```
|
{
"source": "jgradio64/menu-ordering-system",
"score": 4
}
|
#### File: menu-ordering-system/tests/test.py
```python
import unittest
from src.order import Order
class TestOrder(unittest.TestCase):
def test_order_output(self):
test_cases = {
"1": ["Breakfast 1,2,3",
"Eggs, Toast, Coffee"],
"2": ["Breakfast 2,3,1",
"Eggs, Toast, Coffee"],
"3": ["Breakfast 1,2,3,3,3",
"Eggs, Toast, Coffee(3)"],
"4": ["Breakfast 1",
"Unable to process: Side is missing"],
"5": ["Lunch 1,2,3",
"Sandwich, Chips, Soda"],
"6": ["Lunch 1,2",
"Sandwich, Chips, Water"],
"7": ["Lunch 1,1,2,3",
"Unable to process: Sandwich cannot be ordered more than once"],
"8": ["Lunch 1,2,2",
"Sandwich, Chips(2), Water"],
"9": ["Lunch",
"Unable to process: Main is missing, side is missing"],
"10": ["Dinner 1,2,3,4",
"Steak, Potatoes, Wine, Water, Cake"],
"11": ["Dinner 1,2,3",
"Unable to process: Dessert is missing"],
}
for key in test_cases:
new_order = Order()
print(test_cases[key])
result = new_order.build_order(test_cases[key][0])
self.assertEqual(result, test_cases[key][1])
print("works")
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jgraef/python3-pb2nano",
"score": 3
}
|
#### File: python3-pb2nano/examples/write_read.py
```python
import pickle
from io import BytesIO
from pprint import pprint
from pb2nano.protocol import *
from pb2nano.reader import *
from pb2nano.writer import *
# A totally random example on how to use pb2nano
# Specify the protocol like this:
Object = Pb2Message("Object")\
.field("required", "uint64", "id", 1)\
.field("optional", "uint32", "some_val", 2)\
.field("required", "bool", "is_important", 3)\
.field("optional", "string", "name", 4)\
.field("optional", "bytes", "object", 5,
filter = (pickle.loads, pickle.dumps))
Type = Pb2Enum("Type")\
.define("DO_THAT", 1)\
.define("DO_THIS", 2)
Command = Pb2Message("Command")\
.field("required", "Type", "type", 1)\
.field("repeated", "Object", "arg", 2)
TestProtocol = Pb2Protocol()\
.enum(Type)\
.message(Object)\
.message(Command)
# Some test data:
class AnyPickableClass:
def __init__(self, a, b):
self.a = a
self.b = b
def xor(self):
return self.a ^ self.b
def __eq__(self, other):
return self.a == other.a and self.b == other.b
my_python_object = AnyPickableClass(42, 1337)
obj = {
"type": "DO_THAT",
"arg": [{ # <-- arg is repeated, therefore it must be an iterable
"id": 12345,
"is_important": False,
"name": "awesome_name",
"object": my_python_object
},
{
"id": 54321,
"is_important": True,
}]
}
# Let's serialize that:
buf = BytesIO()
w = Pb2Writer(Pb2WireWriter(buf), TestProtocol, Command)
w.write(obj)
print("Serialized data:")
print(buf.getvalue())
print()
# And now unserialize it again
buf.seek(0)
r = Pb2Reader(Pb2WireReader(buf), TestProtocol, Command)
obj2 = r.read()
# Let's see
assert obj == obj2
print("Read object:")
pprint(obj2)
print()
print("xor:", obj2["arg"][0]["object"].xor())
```
#### File: python3-pb2nano/pb2nano/reader.py
```python
from io import BytesIO
from .error import Pb2ReaderException
class Pb2WireReader:
def __init__(self, f):
self.f = f
self._read_wire_type = {
0: self.read_varint,
1: self.read_fixed64,
2: self.read_bytes,
5: self.read_fixed32
}
def read(self):
field_number, wire_type = self.read_key()
wire_val = self.read_wire_type(wire_type)
return field_number, wire_type, wire_val
def read_key(self):
vi = self.read_varint()
return (vi >> 3), (vi & 7)
def read_wire_type(self, wire_type):
return self._read_wire_type[wire_type]()
def read_varint(self):
x = 0
i = 0
while (True):
b = self.f.read(1)
if (not b):
raise EOFError
b = b[0]
x |= (b & 0x7F) << i
if (b & 0x80 == 0):
return x
i += 7
def read_fixed32(self):
return int.from_bytes(self.f.read(4), "little")
def read_fixed64(self):
return int.from_bytes(self.f.read(8), "little")
def read_bytes(self):
n = self.read_varint()
return self.f.read(n)
class Pb2Reader:
def __init__(self, wire, protocol, message):
self.wire = wire
self.protocol = protocol
self.message = message
self._read_field_type = {
#"double": self.interpret_double,
#"float": self.interpret_float,
#"int32": self.interpret_int,
#"int64": self.interpret_int,
"uint32": self.read_asis,
"uint64": self.read_asis,
#"sint32": self.interpret_sint,
#"sint64": self.interpret_sint,
"bool": self.read_bool,
"string": self.read_string,
"bytes": self.read_asis
# TODO
}
def read(self):
obj = {}
try:
while (True):
field_number, wire_type, wire_val = self.wire.read()
# parse value according to protocol
try:
field = self.message.fields_by_number[field_number]
except KeyError:
continue
val = self.read_field(field, wire_val)
# add value to object
if (field.label == "repeated"):
try:
l = obj[field.name]
except KeyError:
obj[field.name] = l = []
l.append(val)
else:
obj[field.name] = val
except EOFError:
pass
# check if all required fields are present and put empty lists for
# non-present repeated fields.
for field in self.message.fields_by_name.values():
if (field.name not in obj and field.label == "required"):
raise Pb2ReaderException("Required field {} not present in {}".format(field.name, self.message.name))
return obj
def read_field(self, field, wire_val):
reader = self._read_field_type.get(field.type)
if (reader):
val = reader(wire_val)
else:
message = self.protocol.messages.get(field.type)
if (message):
val = self.read_message(wire_val, message)
else:
enum = self.protocol.enums.get(field.type)
if (enum):
val = self.read_enum(wire_val, enum)
else:
raise Pb2ReaderException("Unknown field type {}".format(field.type))
if (field.filter):
val = field.filter[0](val)
return val
def read_asis(self, wire_val):
return wire_val
def read_bool(self, wire_val):
assert type(wire_val) == int
return bool(wire_val)
def read_string(self, wire_val):
assert type(wire_val) == bytes
return wire_val.decode()
def read_message(self, wire_val, message):
wire = Pb2WireReader(BytesIO(wire_val))
reader = Pb2Reader(wire, self.protocol, message)
return reader.read()
def read_enum(self, wire_val, enum):
try:
return enum.defs_by_number[wire_val]
except KeyError:
raise Pb2ReaderException("Value {:d} doesn't match any enum value in {}".format(wire_val, enum.name))
__all__ = [
"Pb2WireReader",
"Pb2Reader"
]
```
|
{
"source": "Jgraf42507/Portfolio-Code",
"score": 4
}
|
#### File: Jgraf42507/Portfolio-Code/Robot.py
```python
import turtle as t
def rectangle(horizontal, vertical, color):
t.pendown()
t.pensize(1)
t.color(color)
t.begin_fill()
for counter in range(1, 3):
t.forward(horizontal)
t.right(90)
t.forward(vertical)
t.right(90)
t.end_fill()
t.penup()
t.penup()
t.speed('slow')
t.bgcolor('Dodger blue')
# feet
t.goto(-100, -150)
rectangle(50, 20, 'blue')
t.goto(-30, -150)
rectangle(50, 20, 'blue')
# legs
t.goto(-25, -50)
rectangle(15, 100, 'grey')
t.goto(-55, -50)
rectangle(-15, 100, 'grey')
# body
t.goto(-90, 100)
rectangle(100, 150, 'red')
# arms
t.goto(-150, 70)
rectangle(60, 15, 'grey')
t.goto(-150, 110)
rectangle(15, 40, 'grey')
t.goto(10, 70)
rectangle(60, 15, 'grey')
t.goto(55, 110)
rectangle(15, 40, 'grey')
# neck
t.goto(-50, 120)
rectangle(15, 20, 'grey')
# head
t.goto(-85, 170)
rectangle(80, 50, 'red')
# eyes
t.goto(-60, 160)
rectangle(30, 10, 'white')
t.goto(-55, 155)
rectangle(5, 5, 'black')
t.goto(-40, 155)
rectangle(5, 5, 'black')
```
|
{
"source": "jgrahamc/bluelight",
"score": 2
}
|
#### File: bluelight/luatool/luatool.py
```python
import sys
import serial
from time import sleep
import socket
import argparse
from os.path import basename
tqdm_installed = True
try:
from tqdm import tqdm
except ImportError, e:
if e.message == 'No module named tqdm':
tqdm_installed = False
else:
raise
version = "0.6.4"
class TransportError(Exception):
"""Custom exception to represent errors with a transport
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class AbstractTransport:
def __init__(self):
raise NotImplementedError('abstract transports cannot be instantiated.')
def close(self):
raise NotImplementedError('Function not implemented')
def read(self, length):
raise NotImplementedError('Function not implemented')
def writeln(self, data, check=1):
raise NotImplementedError('Function not implemented')
def writer(self, data):
self.writeln("file.writeline([==[" + data + "]==])\r")
def performcheck(self, expected):
line = ''
char = ''
i = -1
while char != chr(62): # '>'
char = self.read(1)
if char == '':
raise Exception('No proper answer from MCU')
if char == chr(13) or char == chr(10): # LF or CR
if line != '':
line = line.strip()
if line+'\r' == expected and not args.bar:
sys.stdout.write(" -> ok")
elif line+'\r' != expected:
if line[:4] == "lua:":
sys.stdout.write("\r\n\r\nLua ERROR: %s" % line)
raise Exception('ERROR from Lua interpreter\r\n\r\n')
else:
expected = expected.split("\r")[0]
sys.stdout.write("\r\n\r\nERROR")
sys.stdout.write("\r\n send string : '%s'" % expected)
sys.stdout.write("\r\n expected echo : '%s'" % expected)
sys.stdout.write("\r\n but got answer : '%s'" % line)
sys.stdout.write("\r\n\r\n")
raise Exception('Error sending data to MCU\r\n\r\n')
line = ''
else:
line += char
if char == chr(62) and expected[i] == char:
char = ''
i += 1
class SerialTransport(AbstractTransport):
def __init__(self, port, baud, delay):
self.port = port
self.baud = baud
self.serial = None
self.delay = delay
try:
self.serial = serial.Serial(port, baud)
except serial.SerialException as e:
raise TransportError(e.strerror)
self.serial.timeout = 3
self.serial.interCharTimeout = 3
def writeln(self, data, check=1):
if self.serial.inWaiting() > 0:
self.serial.flushInput()
if len(data) > 0 and not args.bar:
sys.stdout.write("\r\n->")
sys.stdout.write(data.split("\r")[0])
self.serial.write(data)
sleep(self.delay)
if check > 0:
self.performcheck(data)
elif not args.bar:
sys.stdout.write(" -> send without check")
def read(self, length):
return self.serial.read(length)
def close(self):
self.serial.flush()
self.serial.close()
class TcpSocketTransport(AbstractTransport):
def __init__(self, host, port):
self.host = host
self.port = port
self.socket = None
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error as e:
raise TransportError(e.strerror)
try:
self.socket.connect((host, port))
except socket.error as e:
raise TransportError(e.strerror)
# read intro from telnet server (see telnet_srv.lua)
self.socket.recv(50)
def writeln(self, data, check=1):
if len(data) > 0 and not args.bar:
sys.stdout.write("\r\n->")
sys.stdout.write(data.split("\r")[0])
self.socket.sendall(data)
if check > 0:
self.performcheck(data)
elif not args.bar:
sys.stdout.write(" -> send without check")
def read(self, length):
return self.socket.recv(length)
def close(self):
self.socket.close()
def decidetransport(cliargs):
if cliargs.ip:
data = cliargs.ip.split(':')
host = data[0]
if len(data) == 2:
port = int(data[1])
else:
port = 23
return TcpSocketTransport(host, port)
else:
return SerialTransport(cliargs.port, cliargs.baud, cliargs.delay)
if __name__ == '__main__':
# parse arguments or use defaults
parser = argparse.ArgumentParser(description='ESP8266 Lua script uploader.')
parser.add_argument('-p', '--port', default='/dev/ttyUSB0', help='Device name, default /dev/ttyUSB0')
parser.add_argument('-b', '--baud', default=9600, help='Baudrate, default 9600')
parser.add_argument('-f', '--src', default='main.lua', help='Source file on computer, default main.lua')
parser.add_argument('-t', '--dest', default=None, help='Destination file on MCU, default to source file name')
parser.add_argument('-c', '--compile', action='store_true', help='Compile lua to lc after upload')
parser.add_argument('-r', '--restart', action='store_true', help='Restart MCU after upload')
parser.add_argument('-d', '--dofile', action='store_true', help='Run the Lua script after upload')
parser.add_argument('-v', '--verbose', action='store_true', help="Show progress messages.")
parser.add_argument('-a', '--append', action='store_true', help='Append source file to destination file.')
parser.add_argument('-l', '--list', action='store_true', help='List files on device')
parser.add_argument('-w', '--wipe', action='store_true', help='Delete all lua/lc files on device.')
parser.add_argument('-i', '--id', action='store_true', help='Query the modules chip id.')
parser.add_argument('-e', '--echo', action='store_true', help='Echo output of MCU until script is terminated.')
parser.add_argument('--bar', action='store_true', help='Show a progress bar for uploads instead of printing each line')
parser.add_argument('--delay', default=0.3, help='Delay in seconds between each write.', type=float)
parser.add_argument('--delete', default=None, help='Delete a lua/lc file from device.')
parser.add_argument('--ip', default=None, help='Connect to a telnet server on the device (--ip IP[:port])')
args = parser.parse_args()
transport = decidetransport(args)
if args.bar and not tqdm_installed:
sys.stdout.write("You must install the tqdm library to use the bar feature\n")
sys.stdout.write("To install, at the prompt type: \"pip install tqdm\"\n")
sys.exit(0)
if args.list:
transport.writeln("local l = file.list();for k,v in pairs(l) do print('name:'..k..', size:'..v)end\r", 0)
while True:
char = transport.read(1)
if char == '' or char == chr(62):
break
sys.stdout.write(char)
sys.exit(0)
if args.id:
transport.writeln("=node.chipid()\r", 0)
id=""
while True:
char = transport.read(1)
if char == '' or char == chr(62):
break
if char.isdigit():
id += char
print("\n"+id)
sys.exit(0)
if args.wipe:
transport.writeln("local l = file.list();for k,v in pairs(l) do print(k)end\r", 0)
file_list = []
fn = ""
while True:
char = transport.read(1)
if char == '' or char == chr(62):
break
if char not in ['\r', '\n']:
fn += char
else:
if fn:
file_list.append(fn.strip())
fn = ''
for fn in file_list[1:]: # first line is the list command sent to device
if args.verbose:
sys.stderr.write("Delete file {} from device.\r\n".format(fn))
transport.writeln("file.remove(\"" + fn + "\")\r")
sys.exit(0)
if args.delete:
transport.writeln("file.remove(\"" + args.delete + "\")\r")
sys.exit(0)
if args.dest is None:
args.dest = basename(args.src)
# open source file for reading
try:
try:
f = open(args.src, "rt")
except:
import os
base_dir = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(base_dir, args.src), "rt")
os.chdir(base_dir)
except:
sys.stderr.write("Could not open input file \"%s\"\n" % args.src)
sys.exit(1)
# Verify the selected file will not exceed the size of the serial buffer.
# The size of the buffer is 256. This script does not accept files with
# lines longer than 230 characters to have some room for command overhead.
num_lines = 0
for ln in f:
if len(ln) > 230:
sys.stderr.write("File \"%s\" contains a line with more than 240 "
"characters. This exceeds the size of the serial buffer.\n"
% args.src)
f.close()
sys.exit(1)
num_lines += 1
# Go back to the beginning of the file after verifying it has the correct
# line length
f.seek(0)
# set serial timeout
if args.verbose:
sys.stderr.write("Upload starting\r\n")
# remove existing file on device
if args.append==False:
if args.verbose:
sys.stderr.write("Stage 1. Deleting old file from flash memory")
transport.writeln("file.open(\"" + args.dest + "\", \"w\")\r")
transport.writeln("file.close()\r")
transport.writeln("file.remove(\"" + args.dest + "\")\r")
else:
if args.verbose:
sys.stderr.write("[SKIPPED] Stage 1. Deleting old file from flash memory [SKIPPED]")
# read source file line by line and write to device
if args.verbose:
sys.stderr.write("\r\nStage 2. Creating file in flash memory and write first line")
if args.append:
transport.writeln("file.open(\"" + args.dest + "\", \"a+\")\r")
else:
transport.writeln("file.open(\"" + args.dest + "\", \"w+\")\r")
line = f.readline()
if args.verbose:
sys.stderr.write("\r\nStage 3. Start writing data to flash memory...")
if args.bar:
for i in tqdm(range(0, num_lines)):
transport.writer(line.strip())
line = f.readline()
else:
while line != '':
transport.writer(line.strip())
line = f.readline()
# close both files
f.close()
if args.verbose:
sys.stderr.write("\r\nStage 4. Flush data and closing file")
transport.writeln("file.flush()\r")
transport.writeln("file.close()\r")
# compile?
if args.compile:
if args.verbose:
sys.stderr.write("\r\nStage 5. Compiling")
transport.writeln("node.compile(\"" + args.dest + "\")\r")
transport.writeln("file.remove(\"" + args.dest + "\")\r")
# restart or dofile
if args.restart:
transport.writeln("node.restart()\r")
if args.dofile: # never exec if restart=1
transport.writeln("dofile(\"" + args.dest + "\")\r", 0)
if args.echo:
if args.verbose:
sys.stderr.write("\r\nEchoing MCU output, press Ctrl-C to exit")
while True:
sys.stdout.write(transport.read(1))
# close serial port
transport.close()
# flush screen
sys.stdout.flush()
sys.stderr.flush()
if not args.bar:
sys.stderr.write("\r\n--->>> All done <<<---\r\n")
```
|
{
"source": "jgraichen/codeocean",
"score": 3
}
|
#### File: seeds/even_odd/exercise_tests.py
```python
from exercise import *
import unittest
class ExerciseTests(unittest.TestCase):
def test_even(self):
for x in [1, 3, 5, 7, 9]:
self.assertFalse(even(x))
for x in [2, 4, 6, 8, 10]:
self.assertTrue(even(x))
def test_odd(self):
for x in [1, 3, 5, 7, 9]:
self.assertTrue(odd(x))
for x in [2, 4, 6, 8, 10]:
self.assertFalse(odd(x))
```
|
{
"source": "jgraichen/salt-acme",
"score": 2
}
|
#### File: salt-acme/_modules/acme_dns.py
```python
import ipaddress
import logging
import time
from contextlib import contextmanager
try:
import dns
import dns.tsigkeyring
from dns.update import Update
from dns.rdataclass import IN
from dns.rdatatype import TXT
from dns.rcode import NOERROR
_HAS_DNS = True
except ImportError:
_HAS_DNS = False
from salt.exceptions import (
CommandExecutionError,
SaltConfigurationError,
TimeoutError as SaltTimeoutError,
)
_LOG = logging.getLogger(__name__)
def __virtual__():
if not _HAS_DNS:
return False, "dnspython missing"
return True
def _make_record(token, alias=None, **_kwargs):
if alias:
name = dns.name.from_unicode(alias)
else:
name = dns.name.from_unicode(f"_acme-challenge.{token['name']}")
rdata = dns.rdata.from_text(IN, TXT, str(token["token"]))
return (name, rdata)
def _query_addresses(name, resolver=None):
if resolver is None:
resolver = dns.resolver
addresses = []
for rdtype in ("A", "AAAA"):
try:
addresses.extend([r.to_text() for r in resolver.query(name, rdtype)])
except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN):
pass
return addresses
def _verify(nameserver, port, zone, verify_timeout=120, **_kwargs):
"""
Verify all nameservers listed as NS in `zone` serve the current or a newer
SOA serial.
"""
# Use primary nameserver for NS lookup and as first resolver
# to handle local or split-horizon scenarios
resolver = dns.resolver.Resolver(configure=False)
try:
ipaddress.ip_address(nameserver)
resolver.nameservers = [nameserver]
except ValueError as e:
resolver.nameservers = _query_addresses(nameserver)
if not resolver.nameservers:
raise SaltConfigurationError(f"Nameserver not found: {nameserver}") from e
# All resolved address of the primary NS must use the configured port
resolver.nameserver_ports.update({ns: port for ns in resolver.nameservers})
# The public resolver first tries the primary NS first, otherwise falls
# back to the system resolver. This is used to lookup e.g. other NS names
# which might not be served by the primary.
public = dns.resolver.Resolver()
public.nameservers = resolver.nameservers + public.nameservers
public.nameserver_ports.update(resolver.nameserver_ports)
# Verify SOA serial propagation to all nameserver
serial = resolver.query(zone, "SOA")[0].serial
deadline = time.monotonic() + verify_timeout
# Collect all NS records of the zone. We explicitly use the primary NS
# as the system resolver might serve internal NS in a split-horizon setup.
nameservers = []
resolvers = {}
for rdata in resolver.query(zone, "NS", raise_on_no_answer=False):
name = rdata.target.to_unicode()
resolvers[name] = dns.resolver.Resolver(configure=False)
resolvers[name].nameservers = _query_addresses(name, resolver=public)
nameservers.append(name)
if not nameservers:
_LOG.warning("Skip DNS record verify: No nameservers found for %s", zone)
return
_LOG.info("Verify SOA serial %d for %d nameservers...", serial, len(nameservers))
while deadline > time.monotonic():
for ns in nameservers[:]:
ns_serial = resolvers[ns].query(zone, "SOA")[0].serial
if ns_serial < serial:
_LOG.debug("Nameserver %s still at %d...", ns, ns_serial)
else:
nameservers.remove(ns)
if nameservers:
_LOG.debug("%d nameservers still pending...", len(nameservers))
time.sleep(0.5)
else:
_LOG.debug("All nameservers up-to-date!")
break
if nameservers:
_LOG.error("Nameserver failed to update: %s", nameservers)
raise SaltTimeoutError("Some nameserver failed to receive DNS updates")
@contextmanager
def _update(zone, nameserver, port=53, timeout=10, tsig=None, verify=True, **kwargs):
update = Update(zone)
if tsig:
algorithm, keyname, secret = str(tsig).split(":", 3)
keyring = dns.tsigkeyring.from_text({keyname: secret})
update.use_tsig(keyring, keyname=keyname, algorithm=algorithm)
yield update
answer = dns.query.tcp(update, nameserver, timeout, port)
rcode = answer.rcode()
if rcode != NOERROR:
raise CommandExecutionError(
f"DNS update for {zone} failed: {dns.rcode.to_text(rcode)}"
)
if verify:
_verify(nameserver, port, zone, **kwargs)
def install(name, tokens, ttl=120, **kwargs):
if "zone" not in kwargs:
kwargs["zone"] = name
with _update(**kwargs) as update:
for token in tokens:
name, rdata = _make_record(token, **kwargs)
update.add(name, ttl, rdata)
def remove(name, tokens, **kwargs):
if "zone" not in kwargs:
kwargs["zone"] = name
# No need to verify propagation when removing challenges
kwargs["verify"] = False
with _update(**kwargs) as update:
for token in tokens:
name, rdata = _make_record(token, **kwargs)
update.delete(name, rdata)
```
#### File: test/runners/test_acme.py
```python
import os
from unittest.mock import patch
import pytest
import yaml
from cryptography import x509
from cryptography.hazmat.backends import default_backend
import salt.version
from salt.exceptions import AuthorizationError
@pytest.fixture
def runner(master):
return master.runner
def read_fixture(file, mode="r"):
with open(os.path.join("test/fixtures", file), mode) as f:
return f.read()
def _patch_cmd(runner, fn):
# Salt 3003+ changed how the __salt__ global is handled inside the loader.
# To patch that in tests, we need to target the loader directly instead of a
# pack as in previous versions.
if salt.version.__version__ > '3003':
return patch.dict(runner, {"salt.cmd": fn})
else:
return patch.dict(runner.pack["__salt__"], {"salt.cmd": fn})
def test_sign(runner):
"""
One full roundtrip test. The CSR is passed to the `acme.sign` execution
module and a dict with {"text": "<certificate>"} is returned.
"""
csr = read_fixture("example.csr")
result = runner["acme.sign"](csr)
assert "text" in result
crt = x509.load_pem_x509_certificate(
result["text"].encode(), backend=default_backend()
)
assert isinstance(crt, x509.Certificate)
def test_sign_broken_pem(runner):
"""
When a minion invokes a runner using `publish.runner`, the arguments can get
scrambled. Newlines might be replaced with single spaces.
The runner must fix these missing newlines in the PEM-encoded csr and pass a
correct string to the execution module.
"""
csr = read_fixture("example.csr")
def check_fn(cmd, pem):
assert cmd == "acme.sign"
assert pem == csr.strip()
with _patch_cmd(runner, check_fn):
runner["acme.sign"](csr.replace("\n", " "))
def test_sign_authorize(runner, tmpdir):
auth_file = os.path.join(tmpdir, "auth.yml")
with open(auth_file, "w") as f:
yaml.safe_dump(
{"*": ["example.org", "*.example.org", "example.com", "*.example.com"]}, f
)
def fxcmd(*_args):
return True
with patch.dict(
runner.opts,
{"id": "minion", "acme": {"runner": {"auth_file": auth_file}}},
):
with _patch_cmd(runner, fxcmd):
assert runner["acme.sign"](read_fixture("example.csr"))
def test_sign_authorize_multiple_rules(runner, tmpdir):
"""
Test that all matching rules are applied.
"""
auth_file = os.path.join(tmpdir, "auth.yml")
with open(auth_file, "w") as f:
yaml.safe_dump(
{
"minion": ["example.org"],
"minion*": ["*.example.org"],
"*": ["example.com", "*.example.com"],
},
f,
)
def fxcmd(*_args):
return True
with patch.dict(
runner.opts,
{"id": "minion", "acme": {"runner": {"auth_file": auth_file}}},
):
with _patch_cmd(runner, fxcmd):
assert runner["acme.sign"](read_fixture("example.csr"))
def test_sign_reject_unauthorized(runner, tmpdir):
auth_file = os.path.join(tmpdir, "auth.yml")
csr = read_fixture("example.csr")
with open(auth_file, "w") as f:
yaml.safe_dump({"minion": ["example.org", "*.example.org"]}, f)
with patch.dict(
runner.opts,
{"id": "minion", "acme": {"runner": {"auth_file": auth_file}}},
):
with pytest.raises(AuthorizationError) as e:
runner["acme.sign"](csr)
assert str(e.value) == "Unauthorized domains: example.com, www.example.com"
```
|
{
"source": "jgraichen/salt-pki",
"score": 2
}
|
#### File: fixtures/_modules/test.py
```python
import datetime
from cryptography import x509
from cryptography.hazmat.backends import default_backend as _default_backend
from cryptography.hazmat.primitives import hashes, serialization
def sign(csr):
"""
Minimal execution module to test signing with a CA.
"""
with open("test/fixtures/ca.crt", "rb") as f:
ca_cert = x509.load_pem_x509_certificate(f.read(), backend=_default_backend())
with open("test/fixtures/ca.key", "rb") as f:
ca_pkey = serialization.load_pem_private_key(
f.read(), password=None, backend=_default_backend()
)
obj = x509.load_pem_x509_csr(csr.encode(), backend=_default_backend())
crt = x509.CertificateBuilder(
issuer_name=ca_cert.subject,
subject_name=obj.subject,
public_key=obj.public_key(),
serial_number=x509.random_serial_number(),
not_valid_after=datetime.datetime.utcnow() + datetime.timedelta(days=90),
not_valid_before=datetime.datetime.utcnow(),
extensions=obj.extensions,
).sign(ca_pkey, algorithm=hashes.SHA384(), backend=_default_backend())
return {"text": crt.public_bytes(serialization.Encoding.PEM).decode()}
```
|
{
"source": "jgraichen/salt-template",
"score": 3
}
|
#### File: salt-template/_templates/systemd.py
```python
from typing import TYPE_CHECKING, Callable, Dict
if TYPE_CHECKING:
__salt__: Dict[str, Callable]
def _escape(string: str):
return "\\\n ".join(string.splitlines())
def _value(value):
if value is None:
return ""
if isinstance(value, bool):
return "yes" if value else "no"
if isinstance(value, str):
return _escape(value)
if isinstance(value, (int, float)):
return str(value)
raise ValueError(f"Unsupport value: {value!r}")
def _render_section(name, values):
lines = [f"[{name}]"]
for key, value in values.items():
if isinstance(value, list):
for val in value:
lines.append(f"{key}={_value(val)}")
else:
lines.append(f"{key}={_value(value)}")
lines.append("")
return lines
def run():
"""
Renders data into a systemd syntax file.
See `template.prepare` for context arguments on loading data, and
`template.managed` for additional arguments when rendering the output.
Arguments:
section (str): Render key value pairs from data into this section instead of
using the top dict for sections. Nested dicts are not supported.
Example:
/etc/systemd/system/unit.service:
file.managed:
- template: py
- source: salt://_files/serialize/systemd.py
- context:
default:
Unit:
Description: "A Unit Description"
Service:
ExecStart:
- command
- command
Output:
# Preamble
[Unit]
Description=A Unit Description
[Service]
ExecStart=command
ExecStart=command
"""
args = globals().get("context", {})
data = __salt__["template.prepare"](**args)
if "section" in args:
data = {args["section"]: data}
lines = []
for name, values in data.items():
if isinstance(values, list):
for items in values:
lines.extend(_render_section(name, items))
else:
lines.extend(_render_section(name, values))
return __salt__["template.managed"](lines, **args)
```
|
{
"source": "jgraille/CtCy-6th-Edition-Python-",
"score": 3
}
|
#### File: CtCy-6th-Edition-Python-/ch-01-ArrayString/oneaway.py
```python
import difflib
import re
import unittest
import datetime
import random
import string
random.seed(9001)
def one_edit_replace(s1, s2):
edited = False
for c1, c2 in zip(s1, s2):
if c1 != c2:
if edited:
return False
edited = True
return True
def one_edit_insert(s1, s2):
edited = False
i, j = 0, 0
while i < len(s1) and j < len(s2):
if s1[i] != s2[j]:
if edited:
return False
edited = True
j += 1
else:
i += 1
j += 1
return True
def one_away(s1, s2):
if len(s1) == len(s2):
return one_edit_replace(s1, s2)
elif len(s1) + 1 == len(s2):
return one_edit_insert(s1, s2)
elif len(s1) - 1 == len(s2):
return one_edit_insert(s2, s1)
return False
def find(val):
res = re.search(r"(-|\+)", val)
if res != None:
return res.group(0)
else:
return None
def OneAway(string1,string2):
d = difflib.Differ()
resMatch = list(filter(lambda x: x != None, list(map(find,list(d.compare(string1,string2))))))
dic = {'-':resMatch.count('-'), '+':resMatch.count('+')}
if abs(dic['-'] - dic['+']) > 1 or (dic['-'] == dic['+'] and dic['-'] > 1) or (abs(dic['-'] - dic['+']) == 1 and dic['-'] != 0 and dic['+'] != 0):
return False
else:
return True
class Gen:
def __init__(self):
self.size = random.randint(50,100)
self.string = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in range(self.size))
# https://github.com/careercup/CtCI-6th-Edition-Python/blob/master/Chapter1/5_One%20Away/OneAway.py
class Test(unittest.TestCase):
'''Test Cases'''
a = Gen().string
b = Gen().string
print(a)
print(b)
data = [
('pale', 'ple', True),
('pales', 'pale', True),
('pale', 'bale', True),
('paleabc', 'pleabc', True),
('pale', 'ble', False),
('a', 'b', True),
('', 'd', True),
('d', 'de', True),
('pale', 'pale', True),
('pale', 'ple', True),
('ple', 'pale', True),
('pale', 'bale', True),
('pale', 'bake', False),
('pale', 'pse', False),
('ples', 'pales', True),
('pale', 'pas', False),
('pas', 'pale', False),
('pale', 'pkle', True),
('pkle', 'pable', False),
('pal', 'palks', False),
('palks', 'pal', False),
(a,b,False),
]
def test_one_away1(self):
start = datetime.datetime.now()
for [test_s1, test_s2, expected] in self.data:
actual = one_away(test_s1, test_s2)
self.assertEqual(actual, expected)
time = datetime.datetime.now() - start
print(time)
def test_one_away2(self):
start = datetime.datetime.now()
for [test_s1, test_s2, expected] in self.data:
actual = OneAway(test_s1, test_s2)
self.assertEqual(actual, expected)
time = datetime.datetime.now() - start
print(time)
def main():
unittest.main()
'''
a = 'doretdeplatine'
# remove case
deletionOne = 'doretdeplatin'
deletionSeveral = 'doretdeplati'
# insert case
insertOne = 'doretdeplatine1'
insertSeveral = 'doreetdeplatine12'
# repalce case
replaceOne = 'boretdeplatine'
replaceSeveral = 'boretdeplat1ne'
print(a,deletionOne,'->',OneAway(a,deletionOne))
print(a,deletionSeveral,'->',OneAway(a,deletionSeveral))
print(a,insertOne,'->',OneAway(a,insertOne))
print(a,insertSeveral,'->',OneAway(a,insertSeveral))
print(a,replaceOne,'->',OneAway(a,replaceOne))
print(a,replaceSeveral,'->',OneAway(a,replaceSeveral))
print(a,a,'->',OneAway(a,a))
'''
if __name__ == '__main__':
main()
```
#### File: CtCy-6th-Edition-Python-/ch-02-LinkedLists/kthTolast.py
```python
class Node:
def __init__(self,value,following):
self.value = value
self.following = following
def display(l):
print(l.value)
while l.following is not None:
l.value = l.following.value
print(l.value)
l.following = l.following.following
def size(l):
size = 1
while l.following is not None:
size += 1
l.following = l.following.following
return size
def kthtolast(l, k):
si = size(l)
i = 0
head1 = l.value
head2 - l.following.value
print(head1)
print(head2)
def main():
linkedlist = Node(3,(Node(2,Node(1,Node(9,None)))))
# kthtolast(linkedlist,5)
print(size(linkedlist))
if __name__ == '__main__':
main()
```
#### File: CtCy-6th-Edition-Python-/ch-04-TreesGraphs/routebtwNodes.py
```python
import unittest
class Node:
def __init__(self,val,adjacent=None):
self.val = val
self.adjacent = adjacent
self._marked = False
@property
def marked(self):
return self._marked
@marked.setter
def marked(self,value):
if isinstance(value,bool):
self._marked = value
else:
raise TypeError("Value is not a boolean")
def addEdge(self,node):
self.adjacent += [node]
class Queue:
def __init__(self,listQueue=None):
self.listQueue = listQueue
def addElement(self,val):
self.listQueue.append(val)
return self.listQueue
def removeElement(self):
if self.listQueue:
return self.listQueue.pop(0)
def routebtwNodes(start,end):
if start == end:
return True
thequeue = Queue([])
node_s = start
node_s.marked = True
thequeue.addElement(node_s)
while thequeue.listQueue:
node_r = thequeue.removeElement()
if node_r.adjacent != None:
for r in node_r.adjacent:
if r.marked == False:
if r.val == end.val:
return True
else:
thequeue.addElement(r)
r.marked = True
return False
def resetMarked(nodes):
for i in nodes:
i.marked = False
class Test(unittest.TestCase):
def test_routebtwNodes(self):
node_c = Node('C')
node_d = Node('D')
node_b = Node('B', [node_c])
node_a = Node('A',[node_d,node_b])
node_e = Node('E',[node_a])
node_b.addEdge(node_a)
nodes = [node_a,node_b,node_c,node_d,node_e]
testCases = [[node_e,node_e,True],
[node_a,node_b,True],
[node_b,node_a,True],
[node_e,node_c,True],
[node_d,node_c,False]
]
for case in testCases:
print('(node_' + case[0].val + ',node_' + case[1].val + ')' + ' ' + str(case[2]))
self.assertEqual(routebtwNodes(case[0],case[1]),case[2])
resetMarked(nodes)
if __name__ == '__main__':
unittest.main()
```
#### File: CtCy-6th-Edition-Python-/HackerRank/compareTriplets.py
```python
import math
import os
import random
import re
def compareTriplets(a, b):
i = 0
res = [0, 0]
while (i < 3):
if a[i] < b[i]:
res[1] += 1
elif a[i] > b[i]:
res[0] += 1
else:
pass
i += 1
return (res)
if __name__ == '__main__':
a = [17, 28, 30]
b = [99, 16, 8]
result = compareTriplets(a , b)
print(result)
```
#### File: CtCy-6th-Edition-Python-/StrategicSingletonPattern/benchmark.py
```python
class BenchMark:
# https://python-3-patterns-idioms-test.readthedocs.io/en/latest/Singleton.html
class __BenchMark:
def __init__(self,arg):
self.val = arg
def __str__(self):
return repr(self)
instance = None
def __init__(self,arg):
if not BenchMark.instance:
BenchMark.instance = BenchMark.__BenchMark(arg)
else:
BenchMark.instance.val = arg
```
#### File: CtCy-6th-Edition-Python-/StrategicSingletonPattern/employee.py
```python
from abc import ABC,abstractmethod
# Another exemple of subclasses
class Employee(ABC):
def __init__(self,rank=None):
self.currentcall = None
self._rank = rank
def isfree(self):
return self.currentcall == None
class Director(Employee):
def rankdirector(self):
self._rank = 2
class Manager(Employee):
def rankmanager(self):
self._rank = 1
class Respondent(Employee):
def rankrespondent(self):
self._rank = 0
```
|
{
"source": "jgraille/reveng",
"score": 2
}
|
#### File: jgraille/reveng/__main__.py
```python
from cmd import Cmd
from pyfiglet import Figlet
from utils.utils import init_params
from __init__ import Database
from src.automl import AutoMl
from src.explainautoml import ExplainModel
from src.preprocessing import PreProcessing
from src.geneticlearn import GeneticProgramming
from src.preclustering import PreClustering
import pandas
import numpy
pandas.set_option('display.max_columns', 500)
pandas.set_option('display.width', 1000)
class Menu(Cmd):
f = Figlet(font='slant')
print(f.renderText(' Rev Eng\n'))
print('----------------------------------------------')
print('---------------------Menu---------------------')
print('----------------------------------------------\n')
print('* Computing preclustering / outliers enter--> cl\n')
print('* Computing genetic programming enter--> gp\n')
print('* Running an autml instance enter--> automl\n')
print('* Press q to exit at anytime \n')
prompt = 'reveng> '
def __init__(self):
config_db, config_automl, config_file1, config_file2, config_file3 = init_params()
super(Menu, self).__init__()
self.config_db = config_db
self.config_automl = config_automl
self.config_file = config_file1
self.preprocessing = PreProcessing(config_file=self.config_file)
def do_q(self, args=True):
print('\n Closed RevEng')
raise SystemExit
def do_codb(self, args=True):
print('\nConnecting to a database...')
try:
db = Database(config=self.config_db)
except Exception as e:
print('\nFailed to connect to the database:', e.__repr__())
return 1
print('Connection ok\n')
def do_cl(self, args=True):
try:
outliers, data = self.preprocessing.outliers()
print('\nOutliers done... ')
if outliers:
print('\nFound {} outliers'.format(len(outliers)))
else:
print('\nNo outliers found')
scaled_data = self.preprocessing.frequency_and_scale()
print('\nFrequency and scale done... ')
clusters = PreClustering(data=scaled_data)
labels_clusters = clusters.running()
print('\nDbscan done... ')
print('\nClusters labels: \n', numpy.unique(labels_clusters))
clusters.display_clusters_outliers(outliers=outliers, data=data, labels_clusters=labels_clusters)
except Exception as e:
print('\nFailed to compute dbscan...', e.__repr__())
def do_gp(self, args=True):
try:
train_test_split, feature_names = self.preprocessing.encoding_and_split()
res = GeneticProgramming(x_train=train_test_split[0],
y_train=train_test_split[2],
excludes=self.preprocessing.excludes,
header=self.preprocessing.header,
config_file=self.config_file)
pop, log, hof = res.calculate()
print(pop[50])
except Exception as e:
print('\nFailed to compute gp...', e.__repr__())
def do_automl(self, args=True):
try:
train_test_split, feature_names = self.preprocessing.encoding_and_split()
reveng_model = AutoMl(train=train_test_split[0],
test=train_test_split[1],
labels_train=train_test_split[2],
labels_test=train_test_split[3],
feature_names=feature_names,
categforical_features=self.preprocessing.categorical_features)
reveng_model.running(config_automl=self.config_automl)
reveng_model.h2oshellinformations(model=reveng_model.model, test_h2o_df=reveng_model.test_h2o_df, automl=True)
exp = ExplainModel(train_test_split=train_test_split,
feature_names=feature_names,
class_names=self.preprocessing.class_names,
categorical_features=self.preprocessing.categorical_features,
categorical_names=self.preprocessing.categorical_names,
row=3).explainer(model=reveng_model.model)
print(exp.as_list())
reveng_model.close()
except Exception as e:
print('\nFailed to connect to h2o / process the file...', e.__repr__())
def main():
menu = Menu().cmdloop()
if __name__ == '__main__':
main()
```
#### File: reveng/src/h2owrapper.py
```python
import numpy
import pandas
import h2o
class H2oWrapper:
# from https://marcotcr.github.io/lime/tutorials/Tutorial_H2O_continuous_and_cat.html
def __init__(self, model, column_names):
self.model = model
self.column_names = column_names
def predict_proba(self, this_array):
# If we have just 1 row of data we need to reshape it
shape_tuple = numpy.shape(this_array)
if len(shape_tuple) == 1:
this_array = this_array.reshape(1, -1)
# We convert the numpy array that Lime sends to a pandas dataframe and
# convert the pandas dataframe to an h2o frame
self.pandas_df = pandas.DataFrame(data=this_array, columns=self.column_names)
self.h2o_df = h2o.H2OFrame(self.pandas_df)
# Predict with the h2o drf
self.predictions = self.model.predict(self.h2o_df).as_data_frame()
# the first column is the class labels, the rest are probabilities for
# each class
#self.predictions = self.predictions.iloc[:, 1:].values
self.predictions = self.predictions.iloc[:, 0:].values
return self.predictions
```
#### File: reveng/src/preclustering.py
```python
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster import DBSCAN
import numpy
from matplotlib import pyplot as plt
import os
class PreClustering:
def __init__(self, data):
self.data = data
def plot_distances_neighbors(self):
try:
neigh = NearestNeighbors(n_neighbors=2)
neigh.fit(self.data)
distances, indices = neigh.kneighbors(self.data, return_distance=True)
distances = numpy.sort(distances, axis=0)
distances = distances[:, 1]
plt.clf()
plt.ylabel('Distances')
plt.xlabel('N points')
plt.title('Nearest neighbors distances per point\n choose epsilon (maximum curvature)')
plt.plot(distances)
plt.show()
except Exception as e:
print('\nProcedure PreClustering.plot_distances_neighbors did not work', e.__repr__())
def epsilon(self):
try:
self.plot_distances_neighbors()
epsilon = input('Enter epsilon: ')
#epsilon = 0.27
return epsilon
except Exception as e:
print('\nMethod PreClustering.epsilon did not work', e.__repr__())
def running(self):
try:
print('\nStarting dbscan...')
epsilon = float(self.epsilon())
clusters = DBSCAN(eps=epsilon, min_samples=2,
metric='euclidean', n_jobs=(os.cpu_count() - 1)).fit_predict(self.data)
return clusters
except Exception as e:
print('\nMethod PreClustering.running did not work', e.__repr__())
def display_clusters_outliers(self, outliers, data, labels_clusters):
try:
if outliers:
data['cluster'] = labels_clusters
print('\nOutliers rows:\n', data.iloc[outliers])
except Exception as e:
print('\nMethod PreClustering.display_clusters_outliers did not work', e.__repr__())
```
|
{
"source": "jgranley/CS291a",
"score": 2
}
|
#### File: CS291a/cs291a/dataset_gen.py
```python
import numpy as np
import random
import h5py
import os
import json
from datetime import datetime
import argparse
import pulse2percept as p2p
def rand_stim(implant, n_electrodes=1):
maxamp = 10
maxfreq = 200
# randomly pick UP TO n_electrodes
sample_elecs = random.randint(1, n_electrodes)
elecs = random.sample([i for i in range(len(implant.electrodes))], sample_elecs)
stim = np.zeros((len(implant.electrodes), 3), dtype='float32')
for elec in elecs:
amp = random.random() * (maxamp - 1) + 1
freq = random.random() * (maxfreq - 1) + 1
pdur = random.expovariate(1)
while pdur > 1000 / freq / 2 or pdur < 0.01 or pdur > 100:
pdur = random.expovariate(1)
stim[elec] = np.array([freq, amp, pdur])
return stim
def rand_percepts(model, implant, n_elecs=1, n_samples=10000):
model.build()
x = np.array([implant[e].x for e in implant.electrodes], dtype='float32')
y = np.array([implant[e].y for e in implant.electrodes], dtype='float32')
percepts = []
stims = []
for i in range(n_samples):
stim = rand_stim(implant, n_electrodes=n_elecs)
percept = model._predict_spatial_jax(stim, x, y).reshape(model.grid.shape)
stims.append(stim)
percepts.append(percept)
percepts = np.array(percepts)
stims = np.array(stims)
return percepts, stims
def write_h5(percepts, stims, path):
if os.path.exists(path):
print("h5 exists, overwriting")
percepts = np.array(percepts, dtype='float32')
stims = np.array(stims, dtype='float32')
hf = h5py.File(path, 'w')
hf.create_dataset('stims', data=stims)
hf.create_dataset('percepts', data=percepts)
hf.close()
def read_h5(path):
if not os.path.exists(path):
raise ValueError("Provided path does not exist")
hf = h5py.File(path, 'r')
if 'stims' not in hf.keys() or 'percepts' not in hf.keys():
raise ValueError("H5 formatted incorrectly")
stims = np.array(hf.get('stims'), dtype='float32')
percepts = np.array(hf.get('percepts'), dtype='float32')
hf.close()
return percepts, stims
def get_path(model, implant, n_electrodes):
path = (f'percepts'
f'_{str(type(implant)).lower().split(".")[-1][:-2]}'
f'_{n_electrodes}elec'
f'_rho{model.rho}lam{model.axlambda}'
f'_{datetime.now().strftime("%m%d%H%M")}'
f'.h5')
return path
def save_h5(dirname, model, implant, percepts, stims, n_electrodes):
info_json = os.path.join(dirname, "info.json")
if os.path.exists(info_json):
info = json.load(open(info_json))
else:
info = {}
path = get_path(model, implant, n_electrodes)
info[path] = {}
info[path]['model'] = str(type(model))
info[path]['implant'] = str(type(implant))
info[path]['n_elecs'] = n_electrodes
info[path]['rho'] = model.rho
info[path]['axlambda'] = model.axlambda
info[path]['xystep'] = model.xystep
info[path]['xrange'] = str(model.xrange)
info[path]['yrange'] = str(model.yrange)
info[path]['size'] = len(stims)
info[path]['subject'] = ''
info[path]['min_ax_sensitivity'] = model.min_ax_sensitivity
for p in ['a' + str(i) for i in range(10)]:
info[path][p] = getattr(model, p)
json.dump(info, open(info_json, 'w'))
write_h5(percepts, stims, os.path.join(dirname, path))
if __name__ == '__main__':
n_electrodes = 15
n_samples = 6000
model = p2p.models.BiphasicAxonMapModel(engine='jax', a4=0, rho=200, xrange=(-14, 12), yrange=(-12, 12), xystep=0.5, axlambda=800)
model.build()
implant = p2p.implants.ArgusII(rot=-30)
percepts, stims = rand_percepts(model, implant, n_elecs = n_electrodes, n_samples = n_samples)
save_h5('/home/jgranley/cs291a/data/percepts', model, implant, percepts, stims, n_electrodes)
```
#### File: CS291a/cs291a/models.py
```python
from matplotlib.colors import Normalize
import tensorflow as tf
from keras import backend as K
from keras import layers
import keras
import numpy as np
from skimage.transform import resize
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import datetime
import json
import h5py
import imageio
import pulse2percept as p2p
from pulse2percept.models import BiphasicAxonMapModel
from pulse2percept.stimuli import BiphasicPulseTrain
from pulse2percept.utils import center_image
from pulse2percept.implants import ArgusII
# import dataset_gen
# physical_devices = tf.config.list_physical_devices('GPU')
# tf.config.experimental.set_memory_growth(physical_devices[0], True)
def get_loss(model, implant, regularize=None, reg_coef=0.05, size_norm=False, loss_fn='mse'):
bundles = model.grow_axon_bundles()
axons = model.find_closest_axon(bundles)
axon_contrib = model.calc_axon_sensitivity(axons, pad=True).astype(np.float32)
axon_contrib = tf.constant(axon_contrib, dtype='float32')
x = tf.constant([implant[e].x for e in implant.electrodes], dtype='float32')
y = tf.constant([implant[e].y for e in implant.electrodes], dtype='float32')
rho = model.rho
# get effect models. Need to reimplement them in tensorflow fashion
def scale_threshold(pdur):
return model.a1 + model.a0*pdur
def predict_freq_amp(amp, freq):
return model.a2*amp + model.a3*freq
def bright(freq, amp, pdur):
F_bright = predict_freq_amp(amp * scale_threshold(pdur), freq)
return F_bright
def size(freq, amp, pdur):
min_f_size = 10**2 / (model.rho**2)
F_size = model.a5 * amp * model.scale_threshold(pdur) + model.a6
return tf.maximum(F_size, min_f_size)
def streak(freq, amp, pdur):
min_f_streak = 10**2 / (model.axlambda ** 2)
F_streak = model.a9 - model.a7 * pdur ** model.a8
return tf.maximum(F_streak, min_f_streak)
def reg_none(y_pred):
return tf.zeros_like(y_pred[:, 0, 0])
def reg_l1(y_pred):
return tf.reduce_sum(tf.abs(y_pred[:, :, 1]), axis=-1)
def reg_l1_ampfreq(y_pred):
return tf.reduce_sum(tf.abs(y_pred[:, :, 1]), axis=-1) + tf.reduce_sum(tf.abs(y_pred[:, :, 0]), axis=-1)
def reg_l2(y_pred):
return tf.reduce_sum(y_pred[:, :, 1]**2, axis=-1)
def reg_elecs(y_pred):
return tf.math.count_nonzero((y_pred[:, :, 1] > 0.5), axis=-1, dtype='float32')
if regularize is None:
regfn = reg_none
elif regularize == 'l1':
regfn = reg_l1
elif regularize == 'l1_ampfreq':
regfn = reg_l1_ampfreq
elif regularize == 'l2':
regfn = reg_l2
elif regularize == 'elecs':
regfn = reg_elecs
else:
regfn = reg_none
def biphasic_axon_map_batched(ypred):
bright_effects = bright(ypred[:, :, 0],
ypred[:, :, 1],
ypred[:, :, 2])
# make bright effects 0 if amp is 0
# mask = tf.cast(ypred[:, :, 0] > 0.5, 'float32')
# bright_effects = bright_effects * mask
size_effects = size(ypred[:, :, 0],
ypred[:, :, 1],
ypred[:, :, 2])
streak_effects = streak(ypred[:, :, 0],
ypred[:, :, 1],
ypred[:, :, 2])
eparams = tf.stack([bright_effects, size_effects, streak_effects], axis=2)
d2_el = (axon_contrib[:, :, 0, None] - x)**2 + (axon_contrib[:, :, 1, None] - y)**2
intensities = eparams[:, None, None, :, 0] * tf.math.exp(-d2_el[None, :, :, :] / (2. * rho**2 * eparams[:, :, 1])[:, None, None, :]) * (axon_contrib[None, :, :, 2, None] ** (1./eparams[:, None, None, :, 2]))
return tf.reduce_max(tf.reduce_sum(intensities, axis=-1), axis=-1)
# assumes model outputs same shape as ytrue
def mse(ytrue, ypred):
pred_imgs = biphasic_axon_map_batched(ypred)
yt = tf.reshape(ytrue, (-1, model.grid.shape[0] * model.grid.shape[1]))
loss = tf.reduce_mean((pred_imgs - yt)**2, axis=-1)
if size_norm: # normalize by total number of pixels
loss /= tf.math.count_nonzero(yt, axis=-1, dtype='float32')
loss *= tf.cast(model.grid.shape[0] * model.grid.shape[1], 'float32')
loss += reg_coef * regfn(ypred)
return loss
def ms_ssim(ytrue, ypred):
pred_imgs = biphasic_axon_map_batched(ypred)
pred_imgs = tf.reshape(pred_imgs, (-1, model.grid.shape[0], model.grid.shape[1], 1))
ytrue = tf.reshape(ytrue, (-1, model.grid.shape[0], model.grid.shape[1], 1))
loss = 1 - tf.image.ssim_multiscale(ytrue, pred_imgs, 3, power_factors = (0.0448, 0.2856, 0.3001, 0.2363), filter_size=7)
loss += reg_coef * regfn(ypred)
return loss
if loss_fn == 'mse':
fn = mse
fn.__name__ = 'mse_' + str(regularize)
return tf.function(fn, jit_compile=True)
elif loss_fn == 'msssim':
fn = ms_ssim
fn.__name__ = 'msssim_' + str(regularize)
# cant jit msssim
return tf.function(fn)
def get_model(implant, input_shape, num_dense=0, force_zero=False, sigmoid=False, clip=False):
""" Makes a keras model for the model
"""
inputs = layers.Input(shape=input_shape, dtype='float32')
x = tf.image.flip_up_down(inputs)
# fully convolutional
num_filters = [100, 1000, 100]
kernel_sizes = [5, 5, 5]
# for idx_conv in range(3):
# x = layers.Conv2D(num_filters[idx_conv], kernel_sizes[idx_conv], padding='same')(x)
# if idx_conv <2:
# x = layers.MaxPool2D()(x)
# x = layers.Activation('relu')(x)
x = layers.Flatten()(x)
for i in range(num_dense):
x = layers.Dense(500, activation='relu')(x)
amps = layers.Dense(len(implant.electrodes))(x)
if clip == 'relu':
amps = layers.ReLU(max_value=10)(amps)
elif clip == 'sigmoid':
amps = layers.Activation('sigmoid')(amps) * 10.
else:
amps = layers.ReLU()(amps)
if force_zero:
amps = tf.where(amps >= 0.5, amps, tf.zeros_like(amps))
freqs = layers.Dense(len(implant.electrodes))(x)
if clip == 'relu':
freqs = layers.ReLU(max_value=200)(freqs)
elif clip == 'sigmoid':
freqs = layers.Activation('sigmoid')(freqs) * 200.
else:
freqs = layers.ReLU()(freqs)
if force_zero:
freqs = tf.where(amps >= 0.5, freqs, tf.zeros_like(freqs))
pdurs = layers.Dense(len(implant.electrodes))(x)
if clip == 'relu':
pdurs = layers.ReLU(max_value=100)(pdurs) + 1e-3
elif clip == 'sigmoid':
pdurs = layers.Activation('sigmoid')(pdurs) * 100. + 1e-3
else:
pdurs = layers.ReLU()(pdurs) + 1e-3
if force_zero:
pdurs = tf.where(amps >= 0.5, pdurs, tf.zeros_like(pdurs))
outputs = tf.stack([freqs, amps, pdurs], axis=-1)
if sigmoid:
mask = layers.Dense(len(implant.electrodes), activation='sigmoid')(x)
outputs = outputs * mask[:, :, None]
model = keras.Model(inputs=inputs, outputs=outputs)
return model
def train_model(nn, model, implant, reg, targets, stims, reg_coef, datatype, opt, learning_rate, clip=False,
batch_size=32, num_dense=0, force_zero=False, sigmoid=False, size_norm=False, fonts='all', loss_str='mse'):
data_dir = "../data"
results_folder = os.path.join("../results", datatype)
if not os.path.exists(results_folder):
os.mkdir(results_folder)
ex = np.array([implant[e].x for e in implant.electrodes], dtype='float32')
ey = np.array([implant[e].y for e in implant.electrodes], dtype='float32')
targets_train, targets_test, stims_train, stims_test = train_test_split(targets, stims, test_size=0.2)
if opt == 'sgd':
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
elif opt == 'adam':
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
else:
# can pass in custom optimizer
optimizer = opt
lossfn = get_loss(model, implant, regularize=reg, reg_coef=reg_coef, size_norm=size_norm, loss_fn=loss_str)
loss_noreg = get_loss(model, implant, size_norm=size_norm, loss_fn=loss_str)
def loss_reg(y_true, y_pred):
return lossfn(y_true, y_pred) - loss_noreg(y_true, y_pred)
loss_reg.__name__ = str(reg)
dt = datetime.datetime.now().strftime("%m%d-%H%M%S")
modelname = (f"nn_{loss_str}"
f"_{len(implant.electrodes)}elecs"
f"_{opt}_lr{learning_rate}"
f"_{str(reg)}_coef{str(reg_coef)}"
+ dt)
log_dir = os.path.join("../results/tensorboard/", datatype, modelname)
modelpath = os.path.join(results_folder, modelname)
tb = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
cp = tf.keras.callbacks.ModelCheckpoint(modelpath, save_best_only=False)
es = tf.keras.callbacks.EarlyStopping(patience=250, monitor='loss', restore_best_weights=True)
nn.compile(optimizer=optimizer, loss=lossfn, metrics=[loss_noreg, loss_reg])
hist = nn.fit(x=targets_train, y=targets_train, batch_size=batch_size, epochs=1000,
callbacks=[ es], validation_data=(targets_test, targets_test), validation_batch_size=batch_size)
hist = hist.history
# save_model
print(f"done training {modelname}")
nn.save(modelpath)
json_path = os.path.join(results_folder, "info.json")
if os.path.exists(json_path):
info = json.load(open(json_path))
else:
info = {}
info['1elec'] = {}
info['msssim'] = {}
if loss_str == 'mse':
dict_name = '1elec' # compatibility
elif loss_str == 'msssim':
dict_name = 'msssim'
info[dict_name][modelname] = {}
info[dict_name][modelname]['test_loss'] = np.min(hist['val_loss'])
info[dict_name][modelname]['train_loss'] = np.min(hist['loss'])
info[dict_name][modelname]['epochs'] = len(hist['val_loss'])
info[dict_name][modelname]['opt'] = opt
info[dict_name][modelname]['lr'] = learning_rate
info[dict_name][modelname]['reg'] = reg
info[dict_name][modelname]['reg_coef'] = reg_coef
info[dict_name][modelname]['batch_size'] = batch_size
info[dict_name][modelname]['dense_layers'] = num_dense
info[dict_name][modelname]['tensorboard_logdir'] = log_dir
info[dict_name][modelname]['rho'] = model.rho
info[dict_name][modelname]['lambda'] = model.axlambda
info[dict_name][modelname]['n_elecs'] = 1
info[dict_name][modelname]['shape'] = str(model.grid.shape)
info[dict_name][modelname]['force_good_stims'] = str(force_zero)
info[dict_name][modelname]['sigmoid'] = str(sigmoid)
info[dict_name][modelname]['size_norm'] = str(size_norm)
info[dict_name][modelname]['clip'] = clip
info[dict_name][modelname]['fonts'] = fonts
json.dump(info, open(json_path, 'w'))
# plot some images
if data_type == 'percepts' or data_type == 'alphabet':
if not os.path.exists(os.path.join(results_folder, 'predicted_images')):
os.mkdir(os.path.join(results_folder, 'predicted_images'))
ims_per_row = 15
rows = 2
fig, axes = plt.subplots(nrows = rows*2, ncols=ims_per_row, figsize=(20, 20))
fig.subplots_adjust(wspace=0, hspace=-0.75)
# predicted first
for i in range(rows):
for j in range(ims_per_row):
if j == 1:
plt.ylabel("Preds", fontweight="bold", fontsize=20)
plt.sca(axes[2*i][j])
idx = i * ims_per_row + j
pred = nn(targets_test[idx:idx+1]).numpy()
score = float(lossfn(targets_test[idx:idx+1], pred).numpy())
pred_img = model._predict_spatial_jax(pred[0], ex, ey)
plt.imshow(pred_img.reshape(model.grid.shape), cmap='gray')
plt.annotate(f"{str(round(score, 3))}", (1, 6), color='white')
plt.yticks([])
plt.xticks([])
axes[2*i][j].spines['bottom'].set_color('gray')
axes[2*i][j].spines['top'].set_color('gray')
axes[2*i][j].spines['right'].set_color('gray')
axes[2*i][j].spines['left'].set_color('gray')
axes[2*i][j].spines['bottom'].set_linewidth(2)
axes[2*i][j].spines['top'].set_linewidth(1)
axes[2*i][j].spines['right'].set_linewidth(2)
axes[2*i][j].spines['left'].set_linewidth(2)
# plt.axis(False)
for i in range(rows):
for j in range(ims_per_row):
if j == 1:
plt.ylabel("True", fontweight="bold", fontsize=20)
plt.sca(axes[2*i+1][j])
idx = i * ims_per_row + j
plt.imshow(targets_train[idx], cmap='gray')
# plt.axis(False)
plt.yticks([])
plt.xticks([])
axes[2*i+1][j].spines['bottom'].set_color('gray')
axes[2*i+1][j].spines['top'].set_color('gray')
axes[2*i+1][j].spines['right'].set_color('gray')
axes[2*i+1][j].spines['left'].set_color('gray')
axes[2*i+1][j].spines['bottom'].set_linewidth(2)
axes[2*i+1][j].spines['top'].set_linewidth(1)
axes[2*i+1][j].spines['right'].set_linewidth(2)
axes[2*i+1][j].spines['left'].set_linewidth(2)
elif data_type =='alphabet':
pass
plt.savefig(os.path.join(results_folder, 'predicted_images', modelname + "_" + str(round(np.min(hist['val_loss']), 3)) +".png"), bbox_inches="tight")
return round(np.min(hist['val_loss']), 4)
def read_h5(path):
if not os.path.exists(path):
raise ValueError("Provided path does not exist")
hf = h5py.File(path, 'r')
if 'stims' not in hf.keys() or 'percepts' not in hf.keys():
raise ValueError("H5 formatted incorrectly")
stims = np.array(hf.get('stims'), dtype='float32')
percepts = np.array(hf.get('percepts'), dtype='float32')
hf.close()
return percepts, stims
def load_alphabet(path, model, fonts=[i for i in range(31)]):
folders = os.listdir(path)
folders = [f for f in folders if f.isnumeric()]
targets = []
labels = []
for folder in folders:
letters = os.listdir(os.path.join(path, folder))
for font in fonts:
if str(font) + ".png" not in letters:
continue
img = imageio.imread(os.path.join(path, folder, str(font) + ".png"))
img = resize(img, model.grid.shape, anti_aliasing=True)
img = 1 - img # invert
img = 2 * img # rescale
targets.append(np.array(img, dtype='float32'))
labels.append(int(folder))
targets = np.array(targets, dtype='float32')
labels = np.array(labels)
return targets, labels
def encode(target, implant, model, mode='amp', stimrange=(0, 2), maxval=None):
stim = []
if maxval is None:
maxval = np.max(target)
for elec in implant.electrodes:
# find location to sample
x_dva, y_dva = model.retinotopy.ret2dva(implant.electrodes[elec].x, implant.electrodes[elec].y)
# interpolate?
# print(x_dva, y_dva)
x_img = (x_dva - model.xrange[0]) / model.xystep
y_img = (y_dva - model.yrange[0]) / model.xystep
x_img = int(round(x_img, ndigits=0))
y_img = int(round(y_img, ndigits=0))
# image is centered differently
# print(x_img, y_img)
# print()
px_intensity = target[y_img, x_img, 0]
stim_intensity = px_intensity / maxval * (stimrange[1] - stimrange[0]) + stimrange[0]
if stim_intensity < 0.5:
stim_intensity = 0
freq = 0
else:
freq = 20
pulse = np.array([freq, stim_intensity, 0.45], dtype='float32')
stim.append(pulse)
return np.array(stim, dtype='float32')
if __name__ == "__main__":
model = BiphasicAxonMapModel(axlambda=800, rho=200, a4=0, engine="jax", xystep=0.5, xrange=(-14, 12), yrange=(-12, 12))
model.build()
implant = ArgusII(rot=-30)
#####################################################################
# PERCEPTS #
#####################################################################
data_type = 'percepts'
h5_file = 'percepts_argusii_1elec_rho200lam800_12031654.h5'
targets, stims = read_h5(os.path.join("../data", data_type, h5_file))
targets = targets.reshape((-1, 49, 53, 1))
# print(targets)
# print(targets.dtype)
# print(targets.shape)
# test opts / learning rates
# best_loss = 99999
# best_opt = ""
# best_lr = 999
# for opt in ['sgd', 'adam']:
# for lr in [0.00001, 0.00005, 0.0001, 0.001]:
# nn = get_model(implant, targets[0].shape)
# loss = train_model(nn, model, implant, None, targets, stims, 0.005, data_type, opt, lr)
# if loss < best_loss:
# best_loss = loss
# best_opt = opt
# best_lr = lr
# test architectures
# best_loss = 9999
# best_ndense = 0
# best_force = False
# for n_dense in [0, 1, 3]:
# for force_zero in [True, False]:
# nn = get_model(implant, targets[0].shape, num_dense=n_dense, force_zero=force_zero)
# loss = train_model(nn, model, implant, None, targets, stims, 0.005, data_type, best_opt, best_lr, num_dense=n_dense, force_zero=force_zero)
# if loss < best_loss:
# best_loss = loss
# best_ndense = n_dense
# best_force = force_zero
# test regularization / coef
# best_loss = 9999
# for reg in ['l1', 'l2', 'l1_ampfreq', 'elecs']:
# for coef in [0.005, 0.01]:
# for lr, opt in zip([0.0001, 0.00005], ['adam', 'adam']):
# nn = get_model(implant, targets[0].shape, num_dense=1, force_zero=False)
# loss = train_model(nn, model, implant, reg, targets, stims, coef, data_type, opt, lr, num_dense=1, force_zero=False)
# if loss < best_loss:
# best_loss = loss
# sig = False
# for lr, opt in zip([ 0.00001], ['adam']):
# nn = get_model(implant, targets[0].shape, num_dense=1, force_zero=False, sigmoid=True)
# loss = train_model(nn, model, implant, None, targets, stims, 0.0, data_type, opt, lr, num_dense=1, force_zero=False, sigmoid=True)
# if loss < 0.07:
# sig = True
# for lr, opt in zip([0.00001], ['adam']):
# for sig in [True, False]:
# nn = get_model(implant, targets[0].shape, num_dense=1, force_zero=False, sigmoid=sig)
# loss = train_model(nn, model, implant, 'elecs', targets, stims, 0.005 * 7.5, data_type, opt, lr, num_dense=1, force_zero=False, sigmoid=sig, size_norm=True)
# print(best_loss)
#####################################################################
# ALPHABET #
#####################################################################
# data_type = 'alphabet'
# letters, labels = load_alphabet("../data/alphabet", model)
# targets = letters.reshape((-1, 49, 53, 1))
# test opts / learning rates
# for opt in ['sgd', 'adam']:
# for lr in [0.00001, 0.0001, 0.001]:
# nn = get_model(implant, targets[0].shape, num_dense=1)
# loss = train_model(nn, model, implant, None, targets, labels, 0.005, data_type, opt, lr)
# if loss < best_loss:
# best_loss = loss
# best_opt = opt
# best_lr = lr
# for n_dense in [0, 1, 3]:
# nn = get_model(implant, targets[0].shape, num_dense=n_dense, force_zero=False)
# loss = train_model(nn, model, implant, None, targets, labels, 0.005, data_type, best_opt, best_lr, num_dense=n_dense, force_zero=False)
# if loss < best_loss:
# best_loss = loss
# best_ndense = n_dense
# test regularization / coef
# # best_loss = 9999
# for reg in ['l1', 'l2', 'l1_ampfreq', 'elecs']:
# for coef in [0.05]:
# for lr, opt in zip([0.0001], ['adam']):
# nn = get_model(implant, targets[0].shape, num_dense=1, force_zero=False)
# loss = train_model(nn, model, implant, reg, targets, labels, coef, data_type, opt, lr, num_dense=1, force_zero=False)
# if loss < best_loss:
# best_loss = loss
# sig = False
# # sigmoid
# for lr, opt in zip([ 0.00001, 0.0001], ['adam', 'adam']):
# nn = get_model(implant, targets[0].shape, num_dense=1, force_zero=False, sigmoid=True)
# loss = train_model(nn, model, implant, None, targets, labels, 0.0, data_type, opt, lr, num_dense=1, force_zero=False, sigmoid=True)
# if loss < 0.07:
# sig = True
# for lr, opt in zip([0.00001], ['adam']):
# for sig in [True, False]:
# nn = get_model(implant, targets[0].shape, num_dense=1, force_zero=False, sigmoid=False)
# loss = train_model(nn, model, implant, 'elecs', targets, labels, 0.005 * 7.5, data_type, opt, lr, num_dense=1, force_zero=False, sigmoid=False, size_norm=True)
# nn = get_model(implant, targets[0].shape, num_dense=1, force_zero=False)
# loss = train_model(nn, model, implant, None, targets, labels, 0.0, data_type, 'adam', 0.0002, num_dense=1, force_zero=False)
# for clip in ['relu', 'sigmoid']:
# nn = get_model(implant, targets[0].shape, num_dense=1, force_zero=False, clip=clip)
# loss = train_model(nn, model, implant, None, targets, labels, 0.05, data_type, 'adam', 0.0001, num_dense=1, force_zero=False, clip=clip)
# for font in range(1, 31):
# data_type = 'alphabet'
# letters, labels = load_alphabet("../data/alphabet", model, fonts=[font])
# targets = letters.reshape((-1, 49, 53, 1))
# if len(targets) == 0:
# continue
# nn = get_model(implant, targets[0].shape, num_dense=1, force_zero=False, clip='relu')
# loss = train_model(nn, model, implant, None, targets, labels, 0.0, data_type, 'adam', 0.0001, num_dense=1, force_zero=False, fonts=font, clip='relu', batch_size=16)
# for font in [28, 27, 26, 25, 21, 20, 10]:
# data_type = 'alphabet'
# letters, labels = load_alphabet("../data/alphabet", model, fonts=[font])
# targets = letters.reshape((-1, 49, 53, 1))
# if len(targets) == 0:
# continue
# nn = get_model(implant, targets[0].shape, num_dense=1, force_zero=False, clip='relu')
# loss = train_model(nn, model, implant, 'l1', targets, labels, 0.0001, data_type, 'adam', 0.0001, num_dense=1, force_zero=False, fonts=font, clip='relu', batch_size=16)
# ms-ssim
# data_type = 'alphabet'
# letters, labels = load_alphabet("../data/alphabet", model)
# targets = letters.reshape((-1, 49, 53, 1))
# lossfn = 'msssim'
# for opt in ['sgd', 'adam']:
# for lr in [0.00001, 0.0001, 0.001]:
# nn = get_model(implant, targets[0].shape, num_dense=1)
# loss = train_model(nn, model, implant, None, targets, labels, 0.005, data_type, opt, lr, loss_str=lossfn)
# new model
# model = BiphasicAxonMapModel(axlambda=1400, rho=80, a4=0, engine="jax", xystep=0.5, xrange=(-14, 12), yrange=(-12, 12))
# model.build()
# implant = ArgusII(rot=-30)
# data_type = 'alphabet'
# letters, labels = load_alphabet("../data/alphabet", model)
# targets = letters.reshape((-1, 49, 53, 1))
# nn = get_model(implant, targets[0].shape, num_dense=1, force_zero=False)
# loss = train_model(nn, model, implant, None, targets, labels, 0.0, data_type, 'adam', 0.0002, num_dense=1, force_zero=False)
```
|
{
"source": "jgrassler/monasca-api",
"score": 2
}
|
#### File: api/core/request.py
```python
import falcon
from oslo_context import context
from monasca_api.common.repositories import constants
from monasca_api.v2.common import exceptions
_TENANT_ID_PARAM = 'tenant_id'
"""Name of the query-param pointing at project-id (tenant-id)"""
class Request(falcon.Request):
"""Variation of falcon.Request with context
Following class enhances :py:class:`falcon.Request` with
:py:class:`context.RequestContext`.
"""
def __init__(self, env, options=None):
super(Request, self).__init__(env, options)
self.context = context.RequestContext.from_environ(self.env)
@property
def project_id(self):
"""Returns project-id (tenant-id)
:return: project-id
:rtype: str
"""
return self.context.tenant
@property
def cross_project_id(self):
"""Returns project-id (tenant-id) found in query params.
This particular project-id is later on identified as
cross-project-id
:return: project-id
:rtype: str
"""
return self.get_param(_TENANT_ID_PARAM, required=False)
@property
def user_id(self):
"""Returns user-id
:return: user-id
:rtype: str
"""
return self.context.user
@property
def roles(self):
"""Returns roles associated with user
:return: user's roles
:rtype: list
"""
return self.context.roles
@property
def limit(self):
"""Returns LIMIT query param value.
'limit' is not required query param.
In case it is not found, py:data:'.constants.PAGE_LIMIT'
value is returned.
:return: value of 'limit' query param or default value
:rtype: int
:raise exceptions.HTTPUnprocessableEntityError: if limit is not valid integer
"""
limit = self.get_param('limit', required=False, default=None)
if limit is not None:
if limit.isdigit():
limit = int(limit)
if limit > constants.PAGE_LIMIT:
return constants.PAGE_LIMIT
else:
return limit
else:
err_msg = 'Limit parameter must be a positive integer'
raise exceptions.HTTPUnprocessableEntityError('Invalid limit', err_msg)
else:
return constants.PAGE_LIMIT
def __repr__(self):
return '%s, context=%s' % (self.path, self.context)
```
#### File: monasca_api/conf/database.py
```python
from oslo_config import cfg
url_opt = cfg.StrOpt(name='url',
default='$database.connection',
help='''
The SQLAlchemy connection string to use to connect to the database
''',
required=False,
deprecated_for_removal=True,
deprecated_since='1.6.0',
deprecated_reason='Please use database.connection option,'
'database.url is scheduled for removal '
'in Pike release')
def register_opts(conf):
conf.register_opt(url_opt, 'database')
def list_opts():
return 'database', [url_opt]
```
#### File: monasca_api/tests/base.py
```python
import falcon
from falcon import testing
from oslo_config import cfg
from oslo_config import fixture as oo_cfg
from oslo_context import fixture as oo_ctx
from oslotest import base as oslotest_base
from monasca_api.api.core import request
from monasca_api import conf
from monasca_api import config
class MockedAPI(falcon.API):
"""MockedAPI
Subclasses :py:class:`falcon.API` in order to overwrite
request_type property with custom :py:class:`request.Request`
"""
def __init__(self):
super(MockedAPI, self).__init__(
media_type=falcon.DEFAULT_MEDIA_TYPE,
request_type=request.Request,
response_type=falcon.Response,
middleware=None,
router=None
)
class ConfigFixture(oo_cfg.Config):
"""Mocks configuration"""
def __init__(self):
super(ConfigFixture, self).__init__(config.CONF)
def setUp(self):
super(ConfigFixture, self).setUp()
self.addCleanup(self._clean_config_loaded_flag)
conf.register_opts()
self._set_defaults()
config.parse_args(argv=[]) # prevent oslo from parsing test args
@staticmethod
def _clean_config_loaded_flag():
config._CONF_LOADED = False
def _set_defaults(self):
self.conf.set_default('user', 'monasca', 'influxdb')
class BaseTestCase(oslotest_base.BaseTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.useFixture(ConfigFixture())
self.useFixture(oo_ctx.ClearRequestContext())
@staticmethod
def conf_override(**kw):
"""Override flag variables for a test."""
group = kw.pop('group', None)
for k, v in kw.items():
cfg.CONF.set_override(k, v, group)
@staticmethod
def conf_default(**kw):
"""Override flag variables for a test."""
group = kw.pop('group', None)
for k, v in kw.items():
cfg.CONF.set_default(k, v, group)
class BaseApiTestCase(BaseTestCase, testing.TestBase):
api_class = MockedAPI
@staticmethod
def create_environ(*args, **kwargs):
return testing.create_environ(
*args,
**kwargs
)
```
|
{
"source": "jgrassler/papercut",
"score": 2
}
|
#### File: papercut/storage/phorum_mysql.py
```python
import MySQLdb
import time
from mimify import mime_encode_header, mime_decode_header
import re
import smtplib
import md5
import papercut.storage.mime as mime
import papercut.settings
import papercut.storage.strutil as strutil
from papercut.version import __VERSION__
settings = papercut.settings.CONF()
# patch by <NAME> <<EMAIL>> to fix the handling of unusual encodings of messages
q_quote_multiline = re.compile("=\?(.*?)\?[qQ]\?(.*?)\?=.*?=\?\\1\?[qQ]\?(.*?)\?=", re.M | re.S)
# we don't need to compile the regexps everytime..
doubleline_regexp = re.compile("^\.\.", re.M)
singleline_regexp = re.compile("^\.", re.M)
from_regexp = re.compile("^From:(.*)<(.*)>", re.M)
subject_regexp = re.compile("^Subject:(.*)", re.M)
references_regexp = re.compile("^References:(.*)<(.*)>", re.M)
lines_regexp = re.compile("^Lines:(.*)", re.M)
# phorum configuration files related regexps
moderator_regexp = re.compile("(.*)PHORUM\['ForumModeration'\](.*)='(.*)';", re.M)
url_regexp = re.compile("(.*)PHORUM\['forum_url'\](.*)='(.*)';", re.M)
admin_regexp = re.compile("(.*)PHORUM\['admin_url'\](.*)='(.*)';", re.M)
server_regexp = re.compile("(.*)PHORUM\['forum_url'\](.*)='(.*)http://(.*)/(.*)';", re.M)
mail_code_regexp = re.compile("(.*)PHORUM\['PhorumMailCode'\](.*)=(.*)'(.*)';", re.M)
class Papercut_Storage:
"""
Storage Backend interface for the Phorum web message board software (http://phorum.org)
This is the interface for Phorum running on a MySQL database. For more information
on the structure of the 'storage' package, please refer to the __init__.py
available on the 'storage' sub-directory.
"""
def __init__(self):
self.conn = MySQLdb.connect(host=settings.dbhost, db=settings.dbname, user=settings.dbuser, passwd=settings.dbpass)
self.cursor = self.conn.cursor()
def get_message_body(self, headers):
"""Parses and returns the most appropriate message body possible.
The function tries to extract the plaintext version of a MIME based
message, and if it is not available then it returns the html version.
"""
return mime.get_text_message(headers)
def quote_string(self, text):
"""Quotes strings the MySQL way."""
return text.replace("'", "\\'")
def group_exists(self, group_name):
stmt = """
SELECT
COUNT(*) AS total
FROM
forums
WHERE
LOWER(nntp_group_name)=LOWER('%s')""" % (group_name)
self.cursor.execute(stmt)
return self.cursor.fetchone()[0]
def article_exists(self, group_name, style, range):
table_name = self.get_table_name(group_name)
stmt = """
SELECT
COUNT(*) AS total
FROM
%s
WHERE
approved='Y'""" % (table_name)
if style == 'range':
stmt = "%s AND id > %s" % (stmt, range[0])
if len(range) == 2:
stmt = "%s AND id < %s" % (stmt, range[1])
else:
stmt = "%s AND id = %s" % (stmt, range[0])
self.cursor.execute(stmt)
return self.cursor.fetchone()[0]
def get_first_article(self, group_name):
table_name = self.get_table_name(group_name)
stmt = """
SELECT
IF(MIN(id) IS NULL, 0, MIN(id)) AS first_article
FROM
%s
WHERE
approved='Y'""" % (table_name)
num_rows = self.cursor.execute(stmt)
return self.cursor.fetchone()[0]
def get_group_stats(self, group_name):
total, max, min = self.get_table_stats(self.get_table_name(group_name))
return (total, min, max, group_name)
def get_table_stats(self, table_name):
stmt = """
SELECT
COUNT(id) AS total,
IF(MAX(id) IS NULL, 0, MAX(id)) AS maximum,
IF(MIN(id) IS NULL, 0, MIN(id)) AS minimum
FROM
%s
WHERE
approved='Y'""" % (table_name)
num_rows = self.cursor.execute(stmt)
return self.cursor.fetchone()
def get_table_name(self, group_name):
stmt = """
SELECT
table_name
FROM
forums
WHERE
nntp_group_name='%s'""" % (group_name.replace('*', '%'))
self.cursor.execute(stmt)
return self.cursor.fetchone()[0]
def get_message_id(self, msg_num, group):
return '<%s@%s>' % (msg_num, group)
def get_notification_emails(self, forum_id):
# open the configuration file
fp = open("%s%s.php" % (settings.phorum_settings_path, forum_id), "r")
content = fp.read()
fp.close()
# get the value of the configuration variable
recipients = []
mod_code = moderator_regexp.search(content, 0).groups()
if mod_code[2] == 'r' or mod_code[2] == 'a':
# get the moderator emails from the forum_auth table
stmt = """
SELECT
email
FROM
forums_auth,
forums_moderators
WHERE
user_id=id AND
forum_id=%s""" % (forum_id)
self.cursor.execute(stmt)
result = list(self.cursor.fetchall())
for row in result:
recipients.append(row[0])
return recipients
def send_notifications(self, group_name, msg_id, thread_id, parent_id, msg_author, msg_email, msg_subject, msg_body):
msg_tpl = """From: Phorum <%(recipient)s>
To: %(recipient)s
Subject: Moderate for %(forum_name)s at %(phorum_server_hostname)s Message: %(msg_id)s.
Subject: %(msg_subject)s
Author: %(msg_author)s
Message: %(phorum_url)s/read.php?f=%(forum_id)s&i=%(msg_id)s&t=%(thread_id)s&admview=1
%(msg_body)s
To delete this message use this URL:
%(phorum_admin_url)s?page=easyadmin&action=del&type=quick&id=%(msg_id)s&num=1&thread=%(thread_id)s
To edit this message use this URL:
%(phorum_admin_url)s?page=edit&srcpage=easyadmin&id=%(msg_id)s&num=1&mythread=%(thread_id)s
"""
# get the forum_id for this group_name
stmt = """
SELECT
id,
name
FROM
forums
WHERE
nntp_group_name='%s'""" % (group_name)
self.cursor.execute(stmt)
forum_id, forum_name = self.cursor.fetchone()
# open the main configuration file
fp = open("%sforums.php" % (settings.phorum_settings_path), "r")
content = fp.read()
fp.close()
# regexps to get the content from the phorum configuration files
phorum_url = url_regexp.search(content, 0).groups()[2]
phorum_admin_url = admin_regexp.search(content, 0).groups()[2]
phorum_server_hostname = server_regexp.search(content, 0).groups()[3]
# connect to the SMTP server
smtp = smtplib.SMTP('localhost')
emails = self.get_notification_emails(forum_id)
for recipient in emails:
current_msg = msg_tpl % vars()
smtp.sendmail("Phorum <%s>" % (recipient), recipient, current_msg)
# XXX: Coding blind here. I really don't know much about how Phorum works with
# XXX: sending forum postings as emails, but it's here. Let's call this a
# XXX: temporary implementation. Should work fine, I guess.
phorum_mail_code = mail_code_regexp.search(content, 0).groups()[3]
notification_mail_tpl = """Message-ID: <%(random_msgid)s@%(phorum_server_hostname)s>
From: %(msg_author)s %(msg_email)s
Subject: %(msg_subject)s
To: %(forum_name)s <%(email_list)s>
Return-Path: <%(email_return)s>
Reply-To: %(email_return)s
X-Phorum-%(phorum_mail_code)s-Version: Phorum %(phorum_version)s
X-Phorum-%(phorum_mail_code)s-Forum: %(forum_name)s
X-Phorum-%(phorum_mail_code)s-Thread: %(thread_id)s
X-Phorum-%(phorum_mail_code)s-Parent: %(parent_id)s
This message was sent from: %(forum_name)s.
<%(phorum_url)s/read.php?f=%(forum_id)s&i=%(msg_id)s&t=%(thread_id)s>
----------------------------------------------------------------
%(msg_body)s
----------------------------------------------------------------
Sent using Papercut version %(__VERSION__)s
"""
stmt = """
SELECT
email_list,
email_return
FROM
forums
WHERE
LENGTH(email_list) > 0 AND
id=%s""" % (forum_id)
num_rows = self.cursor.execute(stmt)
if num_rows == 1:
email_list, email_return = self.cursor.fetchone()
msg_body = strutil.wrap(msg_body)
if len(msg_email) > 0:
msg_email = '<%s>' % msg_email
else:
msg_email = ''
random_msgid = md5.new(str(time.clock())).hexdigest()
phorum_version = settings.phorum_version
current_msg = notification_mail_tpl % vars()
smtp.sendmail('%s %s' % (msg_author, msg_email), email_list, current_msg)
smtp.quit()
def get_NEWGROUPS(self, ts, group='%'):
# since phorum doesn't record when each forum was created, we have no way of knowing this...
return None
def get_NEWNEWS(self, ts, group='*'):
stmt = """
SELECT
nntp_group_name,
table_name
FROM
forums
WHERE
nntp_group_name='%s'
ORDER BY
nntp_group_name ASC""" % (group_name.replace('*', '%'))
self.cursor.execute(stmt)
result = list(self.cursor.fetchall())
articles = []
for group, table in result:
stmt = """
SELECT
id
FROM
%s
WHERE
approved='Y' AND
UNIX_TIMESTAMP(datestamp) >= %s""" % (table, ts)
num_rows = self.cursor.execute(stmt)
if num_rows == 0:
continue
ids = list(self.cursor.fetchall())
for id in ids:
articles.append("<%s@%s>" % (id, group))
if len(articles) == 0:
return ''
else:
return "\r\n".join(articles)
def get_GROUP(self, group_name):
table_name = self.get_table_name(group_name)
result = self.get_table_stats(table_name)
return (result[0], result[2], result[1])
def get_LIST(self, username=""):
stmt = """
SELECT
nntp_group_name,
table_name
FROM
forums
WHERE
LENGTH(nntp_group_name) > 0
ORDER BY
nntp_group_name ASC"""
self.cursor.execute(stmt)
result = list(self.cursor.fetchall())
if len(result) == 0:
return ""
else:
lists = []
for group_name, table in result:
total, maximum, minimum = self.get_table_stats(table)
if settings.server_type == 'read-only':
lists.append("%s %s %s n" % (group_name, maximum, minimum))
else:
lists.append("%s %s %s y" % (group_name, maximum, minimum))
return "\r\n".join(lists)
def get_STAT(self, group_name, id):
table_name = self.get_table_name(group_name)
stmt = """
SELECT
id
FROM
%s
WHERE
approved='Y' AND
id=%s""" % (table_name, id)
return self.cursor.execute(stmt)
def get_ARTICLE(self, group_name, id):
table_name = self.get_table_name(group_name)
stmt = """
SELECT
A.id,
author,
email,
subject,
UNIX_TIMESTAMP(datestamp) AS datestamp,
body,
parent
FROM
%s A,
%s_bodies B
WHERE
A.approved='Y' AND
A.id=B.id AND
A.id=%s""" % (table_name, table_name, id)
num_rows = self.cursor.execute(stmt)
if num_rows == 0:
return None
result = list(self.cursor.fetchone())
if len(result[2]) == 0:
author = result[1]
else:
author = "%s <%s>" % (result[1], result[2])
formatted_time = strutil.get_formatted_time(time.localtime(result[4]))
headers = []
headers.append("Path: %s" % (settings.nntp_hostname))
headers.append("From: %s" % (author))
headers.append("Newsgroups: %s" % (group_name))
headers.append("Date: %s" % (formatted_time))
headers.append("Subject: %s" % (result[3]))
headers.append("Message-ID: <%s@%s>" % (result[0], group_name))
headers.append("Xref: %s %s:%s" % (settings.nntp_hostname, group_name, result[0]))
if result[6] != 0:
headers.append("References: <%s@%s>" % (result[6], group_name))
return ("\r\n".join(headers), strutil.format_body(result[5]))
def get_LAST(self, group_name, current_id):
table_name = self.get_table_name(group_name)
stmt = """
SELECT
id
FROM
%s
WHERE
approved='Y' AND
id < %s
ORDER BY
id DESC
LIMIT 0, 1""" % (table_name, current_id)
num_rows = self.cursor.execute(stmt)
if num_rows == 0:
return None
return self.cursor.fetchone()[0]
def get_NEXT(self, group_name, current_id):
table_name = self.get_table_name(group_name)
stmt = """
SELECT
id
FROM
%s
WHERE
approved='Y' AND
id > %s
ORDER BY
id ASC
LIMIT 0, 1""" % (table_name, current_id)
num_rows = self.cursor.execute(stmt)
if num_rows == 0:
return None
return self.cursor.fetchone()[0]
def get_HEAD(self, group_name, id):
table_name = self.get_table_name(group_name)
stmt = """
SELECT
id,
author,
email,
subject,
UNIX_TIMESTAMP(datestamp) AS datestamp,
parent
FROM
%s
WHERE
approved='Y' AND
id=%s""" % (table_name, id)
num_rows = self.cursor.execute(stmt)
if num_rows == 0:
return None
result = list(self.cursor.fetchone())
if len(result[2]) == 0:
author = result[1]
else:
author = "%s <%s>" % (result[1], result[2])
formatted_time = strutil.get_formatted_time(time.localtime(result[4]))
headers = []
headers.append("Path: %s" % (settings.nntp_hostname))
headers.append("From: %s" % (author))
headers.append("Newsgroups: %s" % (group_name))
headers.append("Date: %s" % (formatted_time))
headers.append("Subject: %s" % (result[3]))
headers.append("Message-ID: <%s@%s>" % (result[0], group_name))
headers.append("Xref: %s %s:%s" % (settings.nntp_hostname, group_name, result[0]))
if result[5] != 0:
headers.append("References: <%s@%s>" % (result[5], group_name))
return "\r\n".join(headers)
def get_BODY(self, group_name, id):
table_name = self.get_table_name(group_name)
stmt = """
SELECT
B.body
FROM
%s A,
%s_bodies B
WHERE
A.id=B.id AND
A.approved='Y' AND
B.id=%s""" % (table_name, table_name, id)
num_rows = self.cursor.execute(stmt)
if num_rows == 0:
return None
else:
return strutil.format_body(self.cursor.fetchone()[0])
def get_XOVER(self, group_name, start_id, end_id='ggg'):
table_name = self.get_table_name(group_name)
stmt = """
SELECT
A.id,
parent,
author,
email,
subject,
UNIX_TIMESTAMP(datestamp) AS datestamp,
B.body
FROM
%s A,
%s_bodies B
WHERE
A.approved='Y' AND
A.id=B.id AND
A.id >= %s""" % (table_name, table_name, start_id)
if end_id != 'ggg':
stmt = "%s AND A.id <= %s" % (stmt, end_id)
self.cursor.execute(stmt)
result = list(self.cursor.fetchall())
overviews = []
for row in result:
if row[3] == '':
author = row[2]
else:
author = "%s <%s>" % (row[2], row[3])
formatted_time = strutil.get_formatted_time(time.localtime(row[5]))
message_id = "<%s@%s>" % (row[0], group_name)
line_count = len(row[6].split('\n'))
xref = 'Xref: %s %s:%s' % (settings.nntp_hostname, group_name, row[0])
if row[1] != 0:
reference = "<%s@%s>" % (row[1], group_name)
else:
reference = ""
# message_number <tab> subject <tab> author <tab> date <tab> message_id <tab> reference <tab> bytes <tab> lines <tab> xref
overviews.append("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (row[0], row[4], author, formatted_time, message_id, reference, len(strutil.format_body(row[6])), line_count, xref))
return "\r\n".join(overviews)
def get_XPAT(self, group_name, header, pattern, start_id, end_id='ggg'):
# XXX: need to actually check for the header values being passed as
# XXX: not all header names map to column names on the tables
table_name = self.get_table_name(group_name)
stmt = """
SELECT
A.id,
parent,
author,
email,
subject,
UNIX_TIMESTAMP(datestamp) AS datestamp,
B.body
FROM
%s A,
%s_bodies B
WHERE
A.approved='Y' AND
%s REGEXP '%s' AND
A.id = B.id AND
A.id >= %s""" % (table_name, table_name, header, strutil.format_wildcards(pattern), start_id)
if end_id != 'ggg':
stmt = "%s AND A.id <= %s" % (stmt, end_id)
num_rows = self.cursor.execute(stmt)
if num_rows == 0:
return None
result = list(self.cursor.fetchall())
hdrs = []
for row in result:
if header.upper() == 'SUBJECT':
hdrs.append('%s %s' % (row[0], row[4]))
elif header.upper() == 'FROM':
# XXX: totally broken with empty values for the email address
hdrs.append('%s %s <%s>' % (row[0], row[2], row[3]))
elif header.upper() == 'DATE':
hdrs.append('%s %s' % (row[0], strutil.get_formatted_time(time.localtime(result[5]))))
elif header.upper() == 'MESSAGE-ID':
hdrs.append('%s <%s@%s>' % (row[0], row[0], group_name))
elif (header.upper() == 'REFERENCES') and (row[1] != 0):
hdrs.append('%s <%s@%s>' % (row[0], row[1], group_name))
elif header.upper() == 'BYTES':
hdrs.append('%s %s' % (row[0], len(row[6])))
elif header.upper() == 'LINES':
hdrs.append('%s %s' % (row[0], len(row[6].split('\n'))))
elif header.upper() == 'XREF':
hdrs.append('%s %s %s:%s' % (row[0], settings.nntp_hostname, group_name, row[0]))
if len(hdrs) == 0:
return ""
else:
return "\r\n".join(hdrs)
def get_LISTGROUP(self, group_name):
table_name = self.get_table_name(group_name)
stmt = """
SELECT
id
FROM
%s
WHERE
approved='Y'
ORDER BY
id ASC""" % (table_name)
self.cursor.execute(stmt)
result = list(self.cursor.fetchall())
return "\r\n".join(["%s" % k for k in result])
def get_XGTITLE(self, pattern=None):
stmt = """
SELECT
nntp_group_name,
description
FROM
forums
WHERE
LENGTH(nntp_group_name) > 0"""
if pattern != None:
stmt = stmt + """ AND
nntp_group_name REGEXP '%s'""" % (strutil.format_wildcards(pattern))
stmt = stmt + """
ORDER BY
nntp_group_name ASC"""
self.cursor.execute(stmt)
result = list(self.cursor.fetchall())
return "\r\n".join(["%s %s" % (k, v) for k, v in result])
def get_XHDR(self, group_name, header, style, range):
table_name = self.get_table_name(group_name)
stmt = """
SELECT
A.id,
parent,
author,
email,
subject,
UNIX_TIMESTAMP(datestamp) AS datestamp,
B.body
FROM
%s A,
%s_bodies B
WHERE
A.approved='Y' AND
A.id = B.id AND """ % (table_name, table_name)
if style == 'range':
stmt = '%s A.id >= %s' % (stmt, range[0])
if len(range) == 2:
stmt = '%s AND A.id <= %s' % (stmt, range[1])
else:
stmt = '%s A.id = %s' % (stmt, range[0])
if self.cursor.execute(stmt) == 0:
return None
result = self.cursor.fetchall()
hdrs = []
for row in result:
if header.upper() == 'SUBJECT':
hdrs.append('%s %s' % (row[0], row[4]))
elif header.upper() == 'FROM':
hdrs.append('%s %s <%s>' % (row[0], row[2], row[3]))
elif header.upper() == 'DATE':
hdrs.append('%s %s' % (row[0], strutil.get_formatted_time(time.localtime(result[5]))))
elif header.upper() == 'MESSAGE-ID':
hdrs.append('%s <%s@%s>' % (row[0], row[0], group_name))
elif (header.upper() == 'REFERENCES') and (row[1] != 0):
hdrs.append('%s <%s@%s>' % (row[0], row[1], group_name))
elif header.upper() == 'BYTES':
hdrs.append('%s %s' % (row[0], len(row[6])))
elif header.upper() == 'LINES':
hdrs.append('%s %s' % (row[0], len(row[6].split('\n'))))
elif header.upper() == 'XREF':
hdrs.append('%s %s %s:%s' % (row[0], settings.nntp_hostname, group_name, row[0]))
if len(hdrs) == 0:
return ""
else:
return "\r\n".join(hdrs)
def do_POST(self, group_name, lines, ip_address, username=''):
table_name = self.get_table_name(group_name)
body = self.get_message_body(lines)
author, email = from_regexp.search(lines, 0).groups()
subject = subject_regexp.search(lines, 0).groups()[0].strip()
# patch by <NAME> <<EMAIL>> to fix the handling of unusual encodings of messages
lines = mime_decode_header(re.sub(q_quote_multiline, "=?\\1?Q?\\2\\3?=", lines))
if lines.find('References') != -1:
# get the 'modifystamp' value from the parent (if any)
references = references_regexp.search(lines, 0).groups()
parent_id, void = references[-1].strip().split('@')
stmt = """
SELECT
IF(MAX(id) IS NULL, 1, MAX(id)+1) AS next_id
FROM
%s""" % (table_name)
num_rows = self.cursor.execute(stmt)
if num_rows == 0:
new_id = 1
else:
new_id = self.cursor.fetchone()[0]
stmt = """
SELECT
id,
thread,
modifystamp
FROM
%s
WHERE
approved='Y' AND
id=%s
GROUP BY
id""" % (table_name, parent_id)
num_rows = self.cursor.execute(stmt)
if num_rows == 0:
return None
parent_id, thread_id, modifystamp = self.cursor.fetchone()
else:
stmt = """
SELECT
IF(MAX(id) IS NULL, 1, MAX(id)+1) AS next_id,
UNIX_TIMESTAMP()
FROM
%s""" % (table_name)
self.cursor.execute(stmt)
new_id, modifystamp = self.cursor.fetchone()
parent_id = 0
thread_id = new_id
stmt = """
INSERT INTO
%s
(
id,
datestamp,
thread,
parent,
author,
subject,
email,
host,
email_reply,
approved,
msgid,
modifystamp,
userid
) VALUES (
%s,
NOW(),
%s,
%s,
'%s',
'%s',
'%s',
'%s',
'N',
'Y',
'',
%s,
0
)
""" % (table_name, new_id, thread_id, parent_id, self.quote_string(author.strip()), self.quote_string(subject), self.quote_string(email), ip_address, modifystamp)
if not self.cursor.execute(stmt):
return None
else:
# insert into the '*_bodies' table
stmt = """
INSERT INTO
%s_bodies
(
id,
body,
thread
) VALUES (
%s,
'%s',
%s
)""" % (table_name, new_id, self.quote_string(body), thread_id)
if not self.cursor.execute(stmt):
# delete from 'table_name' before returning..
stmt = """
DELETE FROM
%s
WHERE
id=%s""" % (table_name, new_id)
self.cursor.execute(stmt)
return None
else:
# alert forum moderators
self.send_notifications(group_name, new_id, thread_id, parent_id, author.strip(), email, subject, body)
return 1
```
|
{
"source": "JGrauNakima/cantools",
"score": 2
}
|
#### File: database/can/database.py
```python
import logging
from .formats import dbc
from .formats import kcd
from .formats import sym
from .internal_database import InternalDatabase
from ...compat import fopen
LOGGER = logging.getLogger(__name__)
class Database(object):
"""This class contains all messages, signals and definitions of a CAN
network.
The factory functions :func:`load()<cantools.database.load()>`,
:func:`load_file()<cantools.database.load_file()>` and
:func:`load_string()<cantools.database.load_string()>` returns
instances of this class.
If `strict` is ``True`` an exception is raised if any signals are
overlapping or if they don't fit in their message.
"""
def __init__(self,
messages=None,
nodes=None,
buses=None,
version=None,
dbc_specifics=None,
frame_id_mask=None,
strict=True):
self._messages = messages if messages else []
self._nodes = nodes if nodes else []
self._buses = buses if buses else []
self._name_to_message = {}
self._frame_id_to_message = {}
self._version = version
self._dbc = dbc_specifics
if frame_id_mask is None:
frame_id_mask = 0xffffffff
self._frame_id_mask = frame_id_mask
self._strict = strict
self.refresh()
@property
def messages(self):
"""A list of messages in the database.
Use :meth:`.get_message_by_frame_id()` or
:meth:`.get_message_by_name()` to find a message by its frame
id or name.
"""
return self._messages
@property
def nodes(self):
"""A list of nodes in the database.
"""
return self._nodes
@property
def buses(self):
"""A list of CAN buses in the database.
"""
return self._buses
@property
def version(self):
"""The database version, or ``None`` if unavailable.
"""
return self._version
@property
def dbc(self):
"""An object containing dbc specific properties like e.g. attributes.
"""
return self._dbc
def add_dbc(self, fp):
"""Read and parse DBC data from given file-like object and add the
parsed data to the database.
>>> db = cantools.database.Database()
>>> with open ('foo.dbc', 'r') as fin:
... db.add_dbc(fin)
"""
self.add_dbc_string(fp.read())
def add_dbc_file(self, filename, encoding='cp1252'):
"""Open, read and parse DBC data from given file and add the parsed
data to the database.
`encoding` specifies the file encoding.
>>> db = cantools.database.Database()
>>> db.add_dbc_file('foo.dbc')
"""
with fopen(filename, 'r', encoding=encoding) as fin:
self.add_dbc(fin)
def add_dbc_string(self, string):
"""Parse given DBC data string and add the parsed data to the
database.
>>> db = cantools.database.Database()
>>> with open ('foo.dbc', 'r') as fin:
... db.add_dbc_string(fin.read())
"""
database = dbc.load_string(string, self._strict)
self._messages += database.messages
self._nodes = database.nodes
self._buses = database.buses
self._version = database.version
self._dbc = database.dbc
self.refresh()
def add_kcd(self, fp):
"""Read and parse KCD data from given file-like object and add the
parsed data to the database.
"""
self.add_kcd_string(fp.read())
def add_kcd_file(self, filename, encoding='utf-8'):
"""Open, read and parse KCD data from given file and add the parsed
data to the database.
`encoding` specifies the file encoding.
"""
with fopen(filename, 'r', encoding=encoding) as fin:
self.add_kcd(fin)
def add_kcd_string(self, string):
"""Parse given KCD data string and add the parsed data to the
database.
"""
database = kcd.load_string(string, self._strict)
self._messages += database.messages
self._nodes = database.nodes
self._buses = database.buses
self._version = database.version
self._dbc = database.dbc
self.refresh()
def add_sym(self, fp):
"""Read and parse SYM data from given file-like object and add the
parsed data to the database.
"""
self.add_sym_string(fp.read())
def add_sym_file(self, filename, encoding='utf-8'):
"""Open, read and parse SYM data from given file and add the parsed
data to the database.
`encoding` specifies the file encoding.
"""
with fopen(filename, 'r', encoding=encoding) as fin:
self.add_sym(fin)
def add_sym_string(self, string):
"""Parse given SYM data string and add the parsed data to the
database.
"""
database = sym.load_string(string, self._strict)
self._messages += database.messages
self._nodes = database.nodes
self._buses = database.buses
self._version = database.version
self._dbc = database.dbc
self.refresh()
def _add_message(self, message):
"""Add given message to the database.
"""
if message.name in self._name_to_message:
LOGGER.warning("Overwriting message '%s' with '%s' in the "
"name to message dictionary.",
self._name_to_message[message.name].name,
message.name)
masked_frame_id = (message.frame_id & self._frame_id_mask)
if masked_frame_id in self._frame_id_to_message:
LOGGER.warning(
"Overwriting message '%s' with '%s' in the frame id to message "
"dictionary because they have identical masked frame ids 0x%x.",
self._frame_id_to_message[masked_frame_id].name,
message.name,
masked_frame_id)
self._name_to_message[message.name] = message
self._frame_id_to_message[masked_frame_id] = message
def as_dbc_string(self):
"""Return the database as a string formatted as a DBC file.
"""
return dbc.dump_string(InternalDatabase(self._messages,
self._nodes,
self._buses,
self._version,
self._dbc))
def as_kcd_string(self):
"""Return the database as a string formatted as a KCD file.
"""
return kcd.dump_string(InternalDatabase(self._messages,
self._nodes,
self._buses,
self._version,
self._dbc))
def get_message_by_name(self, name):
"""Find the message object for given name `name`.
"""
return self._name_to_message[name]
def get_message_by_frame_id(self, frame_id):
"""Find the message object for given frame id `frame_id`.
"""
return self._frame_id_to_message[frame_id & self._frame_id_mask]
def get_node_by_name(self, name):
"""Find the node object for given name `name`.
"""
for node in self._nodes:
if node.name == name:
return node
raise KeyError(name)
def get_bus_by_name(self, name):
"""Find the bus object for given name `name`.
"""
for bus in self._buses:
if bus.name == name:
return bus
raise KeyError(name)
def encode_message(self,
frame_id_or_name,
data,
scaling=True,
padding=False,
strict=True):
"""Encode given signal data `data` as a message of given frame id or
name `frame_id_or_name`. `data` is a dictionary of signal
name-value entries.
If `scaling` is ``False`` no scaling of signals is performed.
If `padding` is ``True`` unused bits are encoded as 1.
If `strict` is ``True`` all signal values must be within their
allowed ranges, or an exception is raised.
>>> db.encode_message(158, {'Bar': 1, 'Fum': 5.0})
b'\\x01\\x45\\x23\\x00\\x11'
>>> db.encode_message('Foo', {'Bar': 1, 'Fum': 5.0})
b'\\x01\\x45\\x23\\x00\\x11'
"""
try:
message = self._frame_id_to_message[frame_id_or_name]
except KeyError:
message = self._name_to_message[frame_id_or_name]
return message.encode(data, scaling, padding, strict)
def decode_message(self,
frame_id_or_name,
data,
decode_choices=True,
scaling=True):
"""Decode given signal data `data` as a message of given frame id or
name `frame_id_or_name`. Returns a dictionary of signal
name-value entries.
If `decode_choices` is ``False`` scaled values are not
converted to choice strings (if available).
If `scaling` is ``False`` no scaling of signals is performed.
>>> db.decode_message(158, b'\\x01\\x45\\x23\\x00\\x11')
{'Bar': 1, 'Fum': 5.0}
>>> db.decode_message('Foo', b'\\x01\\x45\\x23\\x00\\x11')
{'Bar': 1, 'Fum': 5.0}
"""
try:
message = self._frame_id_to_message[frame_id_or_name]
except KeyError:
message = self._name_to_message[frame_id_or_name]
return message.decode(data, decode_choices, scaling)
def refresh(self):
"""Refresh the internal database state.
This method must be called after modifying any message in the
database to refresh the internal lookup tables used when
encoding and decoding messages.
"""
self._name_to_message = {}
self._frame_id_to_message = {}
for message in self._messages:
message.refresh(self._strict)
self._add_message(message)
def __repr__(self):
lines = []
lines.append("version('{}')".format(self._version))
lines.append('')
if self._nodes:
for node in self._nodes:
lines.append(repr(node))
lines.append('')
for message in self._messages:
lines.append(repr(message))
for signal in message.signals:
lines.append(' ' + repr(signal))
lines.append('')
return '\n'.join(lines)
```
#### File: cantools/tests/test_monitor.py
```python
import unittest
import curses
try:
from unittest.mock import Mock
from unittest.mock import patch
from unittest.mock import call
except ImportError:
from mock import Mock
from mock import patch
from mock import call
import can
from cantools.subparsers.monitor import Monitor
class Args(object):
def __init__(self,
database,
single_line=False):
self.database = database
self.encoding = None
self.frame_id_mask = None
self.no_strict = False
self.single_line = single_line
self.bit_rate = None
self.bus_type = 'socketcan'
self.channel = 'vcan0'
class StdScr(object):
def __init__(self, user_input=None, resolution=None):
if resolution is None:
resolution = [(30, 64)]
self.getmaxyx = Mock(side_effect=resolution)
self.nodelay = Mock()
self.clear = Mock()
self.addstr = Mock()
self.refresh = Mock()
if user_input is None:
user_input = ['q']
self.getkey = Mock(side_effect=user_input)
self.move = Mock()
class CanToolsMonitorTest(unittest.TestCase):
maxDiff = None
def assert_called(self, mock, expected):
self.assertEqual(mock.call_args_list, expected)
@patch('can.Notifier')
@patch('can.Bus')
@patch('curses.color_pair')
@patch('curses.is_term_resized')
@patch('curses.init_pair')
@patch('curses.curs_set')
@patch('curses.use_default_colors')
def test_immediate_quit(self,
use_default_colors,
curs_set,
init_pair,
is_term_resized,
color_pair,
bus,
notifier):
# Prepare mocks.
stdscr = StdScr()
args = Args('tests/files/motohawk.dbc')
color_pair.side_effect = ['green', 'cyan']
is_term_resized.return_value = False
# Run monitor.
monitor = Monitor(stdscr, args)
monitor.run()
# Check mocks.
self.assert_called(use_default_colors, [call()])
self.assert_called(curs_set, [call(False)])
self.assert_called(
init_pair,
[
call(1, curses.COLOR_BLACK, curses.COLOR_GREEN),
call(2, curses.COLOR_BLACK, curses.COLOR_CYAN)
])
self.assert_called(color_pair, [call(1), call(2)])
self.assert_called(bus, [call(bustype='socketcan', channel='vcan0')])
self.assert_called(
stdscr.addstr,
[
call(0,
0,
'Received: 0, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(29,
0,
'q: Quit, f: Filter, p: Play/Pause, r: Reset ',
'cyan')
])
@patch('can.Notifier')
@patch('can.Bus')
@patch('curses.color_pair')
@patch('curses.is_term_resized')
@patch('curses.init_pair')
@patch('curses.curs_set')
@patch('curses.use_default_colors')
def test_display_one_frame(self,
_use_default_colors,
_curs_set,
_init_pair,
is_term_resized,
color_pair,
_bus,
_notifier):
# Prepare mocks.
stdscr = StdScr()
args = Args('tests/files/motohawk.dbc')
color_pair.side_effect = ['green', 'cyan']
is_term_resized.return_value = False
# Run monitor.
monitor = Monitor(stdscr, args)
monitor.on_message_received(can.Message(
arbitration_id=496,
data=b'\xc0\x06\xe0\x00\x00\x00\x00\x00'))
monitor.run()
# Check mocks.
self.assert_called(
stdscr.addstr,
[
call(0, 0, 'Received: 1, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 0.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(29,
0,
'q: Quit, f: Filter, p: Play/Pause, r: Reset ',
'cyan')
])
@patch('can.Notifier')
@patch('can.Bus')
@patch('curses.color_pair')
@patch('curses.is_term_resized')
@patch('curses.init_pair')
@patch('curses.curs_set')
@patch('curses.use_default_colors')
def test_display_one_frame_single_line(self,
_use_default_colors,
_curs_set,
_init_pair,
is_term_resized,
color_pair,
_bus,
_notifier):
# Prepare mocks.
stdscr = StdScr()
args = Args('tests/files/motohawk.dbc',
single_line=True)
color_pair.side_effect = ['green', 'cyan']
is_term_resized.return_value = False
# Run monitor.
monitor = Monitor(stdscr, args)
monitor.on_message_received(can.Message(
arbitration_id=496,
data=b'\xc0\x06\xe0\x00\x00\x00\x00\x00'))
monitor.run()
# Check mocks.
self.assert_called(
stdscr.addstr,
[
call(0, 0, 'Received: 1, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2,
0,
" 0.000 ExampleMessage(Enable: 'Enabled' -, "
"AverageRadius: 3.2 m, Temperature: 250.55 degK)"),
call(29,
0,
'q: Quit, f: Filter, p: Play/Pause, r: Reset ',
'cyan')
])
@patch('can.Notifier')
@patch('can.Bus')
@patch('curses.color_pair')
@patch('curses.is_term_resized')
@patch('curses.init_pair')
@patch('curses.curs_set')
@patch('curses.use_default_colors')
def test_display_one_frame_input_twice(self,
_use_default_colors,
_curs_set,
_init_pair,
is_term_resized,
color_pair,
_bus,
_notifier):
# Prepare mocks.
stdscr = StdScr()
args = Args('tests/files/motohawk.dbc')
color_pair.side_effect = ['green', 'cyan']
is_term_resized.return_value = False
# Run monitor.
monitor = Monitor(stdscr, args)
monitor.on_message_received(can.Message(
arbitration_id=496,
data=b'\xc0\x06\xe0\x00\x00\x00\x00\x00',
timestamp=1.0))
monitor.on_message_received(can.Message(
arbitration_id=496,
data=b'\xc0\x06\xd0\x00\x00\x00\x00\x00',
timestamp=2.1))
monitor.run()
# Check mocks.
self.assert_called(
stdscr.addstr,
[
call(0, 0, 'Received: 2, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 1.100 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.54 degK'),
call(6, 0, ' )'),
call(29,
0,
'q: Quit, f: Filter, p: Play/Pause, r: Reset ',
'cyan')
])
@patch('can.Notifier')
@patch('can.Bus')
@patch('curses.color_pair')
@patch('curses.is_term_resized')
@patch('curses.init_pair')
@patch('curses.curs_set')
@patch('curses.use_default_colors')
def test_filter(self,
_use_default_colors,
_curs_set,
_init_pair,
is_term_resized,
color_pair,
_bus,
_notifier):
# Prepare mocks.
stdscr = StdScr(user_input=[
'f', 'Y', '[', '\b', '\n', 'f', '\b', 'E', '\n', 'q'
])
args = Args('tests/files/motohawk.dbc')
color_pair.side_effect = 10 * ['green', 'cyan']
is_term_resized.return_value = False
# Run monitor.
monitor = Monitor(stdscr, args)
monitor.on_message_received(can.Message(
arbitration_id=496,
data=b'\xc0\x06\xe0\x00\x00\x00\x00\x00'))
monitor.run()
# Check mocks.
self.assert_called(
stdscr.addstr,
[
# No filter.
call(0, 0, 'Received: 1, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 0.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(29,
0,
'q: Quit, f: Filter, p: Play/Pause, r: Reset ',
'cyan'),
# 'f' pressed.
call(0, 0, 'Received: 1, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 0.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(29,
0,
'Filter: ',
'cyan'),
# No match on 'Y'.
call(0, 0, 'Received: 1, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(29,
0,
'Filter: Y ',
'cyan'),
# Invalid filter 'Y['.
call(0, 0, 'Received: 1, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 0.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(29,
0,
'Filter: Y[ ',
'cyan'),
# No match on 'Y'.
call(0, 0, 'Received: 1, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(29,
0,
'Filter: Y ',
'cyan'),
# Hit enter to hide filter.
call(0, 0, 'Received: 1, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(29,
0,
'q: Quit, f: Filter, p: Play/Pause, r: Reset ',
'cyan'),
# 'f' pressed again.
call(0, 0, 'Received: 1, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(29,
0,
'Filter: Y ',
'cyan'),
# Backspace.
call(0, 0, 'Received: 1, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 0.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(29,
0,
'Filter: ',
'cyan'),
# Match on 'E'.
call(0, 0, 'Received: 1, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 0.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(29,
0,
'Filter: E ',
'cyan'),
# Hit enter to hide filter.
call(0, 0, 'Received: 1, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 0.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(29,
0,
'q: Quit, f: Filter, p: Play/Pause, r: Reset ',
'cyan')
])
@patch('can.Notifier')
@patch('can.Bus')
@patch('curses.color_pair')
@patch('curses.is_term_resized')
@patch('curses.init_pair')
@patch('curses.curs_set')
@patch('curses.use_default_colors')
def test_reset(self,
_use_default_colors,
_curs_set,
_init_pair,
is_term_resized,
color_pair,
_bus,
_notifier):
# Prepare mocks.
stdscr = StdScr(user_input=[
'f', 'E', '\n', 'p', ' ', 'r', 'f', '\n', 'q'
])
args = Args('tests/files/motohawk.dbc')
color_pair.side_effect = 10 * ['green', 'cyan']
is_term_resized.return_value = False
# Run monitor.
monitor = Monitor(stdscr, args)
monitor.on_message_received(can.Message(
arbitration_id=496,
data=b'\xc0\x06\xe0\x00\x00\x00\x00\x00',
timestamp=3))
# Discarded.
monitor.on_message_received(can.Message(
arbitration_id=497,
data=b'\xc0\x06\xb0\x00\x00\x00\x00\x00',
timestamp=6))
monitor.tick()
monitor.tick()
monitor.tick()
# Input another before pause.
monitor.on_message_received(can.Message(
arbitration_id=496,
data=b'\xc0\x06\xc0\x00\x00\x00\x00\x00',
timestamp=7))
monitor.tick()
# Input when paused. Will not be displayed.
monitor.on_message_received(can.Message(
arbitration_id=496,
data=b'\xc0\x06\xd0\x00\x00\x00\x00\x00',
timestamp=10))
monitor.tick()
monitor.tick()
monitor.tick()
# Input after reset.
monitor.on_message_received(can.Message(
arbitration_id=496,
data=b'\xc0\x06\x00\x00\x00\x00\x00\x00',
timestamp=11))
monitor.run()
# Check mocks.
self.assert_called(
stdscr.addstr,
[
# One ok and one with bad frame id.
call(0, 0, 'Received: 2, Discarded: 1, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 0.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(29
,
0, 'q: Quit, f: Filter, p: Play/Pause, r: Reset ',
'cyan'),
# 'f' pressed.
call(0, 0, 'Received: 2, Discarded: 1, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 0.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(29
,
0, 'Filter: ',
'cyan'),
# 'E' pressed.
call(0, 0, 'Received: 2, Discarded: 1, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 0.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(29
,
0, 'Filter: E ',
'cyan'),
# '\n' pressed.
call(0, 0, 'Received: 3, Discarded: 1, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 4.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.54 degK'),
call(6, 0, ' )'),
call(29
,
0, 'q: Quit, f: Filter, p: Play/Pause, r: Reset ',
'cyan'),
# 'p' pressed. Input frame not displayed.
# 'r' pressed.
call(0, 0, 'Received: 0, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(29
,
0, 'q: Quit, f: Filter, p: Play/Pause, r: Reset ',
'cyan'),
# Input after reset. 'f' pressed.
call(0, 0, 'Received: 1, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 0.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.48 degK'),
call(6, 0, ' )'),
call(29
,
0, 'Filter: ',
'cyan'),
# '\n' pressed.
call(0, 0, 'Received: 1, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 0.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.48 degK'),
call(6, 0, ' )'),
call(29
,
0, 'q: Quit, f: Filter, p: Play/Pause, r: Reset ',
'cyan')
# 'q' pressed, no redraw.
])
@patch('can.Notifier')
@patch('can.Bus')
@patch('curses.color_pair')
@patch('curses.is_term_resized')
@patch('curses.init_pair')
@patch('curses.curs_set')
@patch('curses.use_default_colors')
def test_play_pause(self,
_use_default_colors,
_curs_set,
_init_pair,
is_term_resized,
color_pair,
_bus,
_notifier):
# Prepare mocks.
stdscr = StdScr(user_input=[
' ', ' ', 'p', ' ', ' ', 'p', ' ', ' ', ' ', 'q'
])
args = Args('tests/files/motohawk.dbc')
color_pair.side_effect = 8 * ['green', 'cyan']
is_term_resized.return_value = False
# Run monitor.
monitor = Monitor(stdscr, args)
for timestamp in range(4):
monitor.on_message_received(can.Message(
arbitration_id=496,
data=b'\xc0\x06\xe0\x00\x00\x00\x00\x00',
timestamp=timestamp))
monitor.tick()
# Display most recently received at unpause.
monitor.tick()
monitor.tick()
monitor.tick()
for timestamp in range(5, 7):
monitor.on_message_received(can.Message(
arbitration_id=496,
data=b'\xc0\x06\xe0\x00\x00\x00\x00\x00',
timestamp=timestamp))
monitor.tick()
monitor.run()
# Check mocks.
self.assert_called(
stdscr.addstr,
[
# Received when playing.
call(0, 0, 'Received: 1, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 0.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(29
,
0, 'q: Quit, f: Filter, p: Play/Pause, r: Reset ',
'cyan'),
call(0, 0, 'Received: 2, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 1.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(29
,
0, 'q: Quit, f: Filter, p: Play/Pause, r: Reset ',
'cyan'),
call(0, 0, 'Received: 3, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 2.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(29
,
0, 'q: Quit, f: Filter, p: Play/Pause, r: Reset ',
'cyan'),
# Received when paused, displayed at unpause.
call(0, 0, 'Received: 4, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 3.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(29
,
0, 'q: Quit, f: Filter, p: Play/Pause, r: Reset ',
'cyan'),
# Received when playing.
call(0, 0, 'Received: 5, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 5.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(29
,
0, 'q: Quit, f: Filter, p: Play/Pause, r: Reset ',
'cyan'),
call(0, 0, 'Received: 6, Discarded: 0, Errors: 0'),
call(1,
0,
' TIMESTAMP MESSAGE ',
'green'),
call(2, 0, ' 6.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(29,
0,
'q: Quit, f: Filter, p: Play/Pause, r: Reset ',
'cyan')
])
@patch('can.Notifier')
@patch('can.Bus')
@patch('curses.color_pair')
@patch('curses.is_term_resized')
@patch('curses.init_pair')
@patch('curses.curs_set')
@patch('curses.use_default_colors')
def test_resize(self,
_use_default_colors,
_curs_set,
_init_pair,
is_term_resized,
color_pair,
_bus,
_notifier):
# Prepare mocks.
stdscr = StdScr(user_input=[' ', ' ', 'q'],
resolution=[(30, 40), (25, 35), (25, 35), (20, 30)])
args = Args('tests/files/motohawk.dbc')
color_pair.side_effect = 3 * ['green', 'cyan']
is_term_resized.return_value = True
# Run monitor.
monitor = Monitor(stdscr, args)
monitor.on_message_received(can.Message(
arbitration_id=496,
data=b'\xc0\x06\xe0\x00\x00\x00\x00\x00',
timestamp=1))
monitor.tick()
monitor.run()
# Check mocks.
self.assert_called(
stdscr.addstr,
[
# 25 x 35.
call(0, 0, 'Received: 1, Discarded: 0, Errors: 0'),
call(1, 0, ' TIMESTAMP MESSAGE ', 'green'),
call(2, 0, ' 0.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(24, 0, 'q: Quit, f: Filter, p: Play/Pause, r: Reset', 'cyan'),
# 25 x 35.
call(0, 0, 'Received: 1, Discarded: 0, Errors: 0'),
call(1, 0, ' TIMESTAMP MESSAGE ', 'green'),
call(2, 0, ' 0.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(24, 0, 'q: Quit, f: Filter, p: Play/Pause, r: Reset', 'cyan'),
# 20 x 30.
call(0, 0, 'Received: 1, Discarded: 0, Errors: 0'),
call(1, 0, ' TIMESTAMP MESSAGE ', 'green'),
call(2, 0, ' 0.000 ExampleMessage('),
call(3, 0, " Enable: 'Enabled' -,"),
call(4, 0, ' AverageRadius: 3.2 m,'),
call(5, 0, ' Temperature: 250.55 degK'),
call(6, 0, ' )'),
call(19, 0, 'q: Quit, f: Filter, p: Play/Pause, r: Reset', 'cyan')
])
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JGrauPirozzi/robinhood",
"score": 2
}
|
#### File: JGrauPirozzi/robinhood/robinhood.py
```python
import requests
import urllib
class Robinhood(object):
# All known endpoints as of September 5th, 2015
endpoints = {
"accounts": "https://api.robinhood.com/accounts",
"ach_deposit_schedules": "https://api.robinhood.com/ach/deposit_schedules/",
"ach_iav_auth": "https://api.robinhood.com/ach/iav/auth/",
"ach_relationships": "https://api.robinhood.com/ach/relationships/",
"ach_transfers": "https://api.robinhood.com/ach/transfers/",
"applications": "https://api.robinhood.com/applications/",
"dividends": "https://api.robinhood.com/dividends/",
"document_requests": "https://api.robinhood.com/upload/document_requests/",
"edocuments": "https://api.robinhood.com/documents/",
"instruments": "https://api.robinhood.com/instruments/",
"login": "https://api.robinhood.com/api-token-auth/",
"margin_upgrades": "https://api.robinhood.com/margin/upgrades/",
"markets": "https://api.robinhood.com/markets/",
"notifications": "https://api.robinhood.com/notifications/",
"notifications/devices": "https://api.robinhood.com/notifications/devices/",
"orders": "https://api.robinhood.com/orders/",
"password_reset": "https://api.robinhood.com/password_reset/request/",
"quotes": "https://api.robinhood.com/quotes/",
"user": "https://api.robinhood.com/user/",
"user/additional_info": "https://api.robinhood.com/user/additional_info/",
"user/basic_info": "https://api.robinhood.com/user/basic_info/",
"user/employment": "https://api.robinhood.com/user/employment/",
"user/investment_profile": "https://api.robinhood.com/user/investment_profile/",
"watchlists": "https://api.robinhood.com/watchlists/"
}
def __init__(self, username, password):
self.session = requests.session()
self.username = username
self.session.headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en;q=1, fr;q=0.9, de;q=0.8, ja;q=0.7, nl;q=0.6, it;q=0.5",
"Content-Type": "application/x-www-form-urlencoded; charset=utf-8",
"X-Robinhood-API-Version": "1.0.0",
"Connection": "keep-alive",
"User-Agent": "Robinhood/823 (iPhone; iOS 7.1.2; Scale/2.00)"
}
self.session.headers['Authorization'] = 'Token ' + self.login(username, password)
self.account = self.get_account_number()
self.get_user_info()
def login(self, username, password):
data = "username=%s&password=%s" % (username, password)
res = self.session.post(self.endpoints['login'], data = data)
try:
return res.json()['token']
except:
raise Exception("Could not log in: " + res.text)
def get_account_number(self):
''' Returns the brokerage account number of the account logged in.
This is currently only used for placing orders, so you can ignore
method. '''
res = self.session.get(self.endpoints['accounts'])
if res.status_code == 200:
accountURL = res.json()['results'][0]['url']
account_number = accountURL[accountURL.index('accounts')+9:-1]
return account_number
else:
raise Exception("Could not retrieve account number: " + res.text)
def instrument(self, symbol):
''' Generates an instrument object. Currently this is only used for
placing orders, and generating and using the instrument object are handled
for you, so you can ignore this method'''
res = self.session.get(self.endpoints['instruments'], params={'query':symbol.upper()})
if res.status_code == 200:
return res.json()['results']
else:
raise Exception("Could not generate instrument object: " + res.text)
def get_quote(self, symbol):
''' Returns a qoute object for a given symbol including all data returned
by Robinhood's API'''
data = { 'symbols' : symbol }
res = self.session.get(self.endpoints['quotes'], params=data)
if res.status_code == 200:
return res.json()['results']
else:
raise Exception("Could not retrieve quote: " + res.text)
def quote_price(self, symbol):
''' Returns the most recent price of a given symbol '''
data = { 'symbols' : symbol }
res = self.session.get(self.endpoints['quotes'], params=data)
if res.status_code == 200:
return res.json()['results'][0]['last_trade_price']
else:
raise Exception("Could not retrieve quote: " + res.text)
def place_order(self, instrument, quantity, side, order_type, bid_price=None, time_in_force="gfd", stop_price=None):
''' Places an order with Robinhood. Currently both market and limit orders work,
but stop_limit and stop_loss orders are coming soon.'''
data = """account=%s&instrument=%s&quantity=%d&side=%s&symbol=%s&time_in_force=%s&trigger=immediate&type=%s""" % (
urllib.quote('https://api.robinhood.com/accounts/' + self.account + '/'),
urllib.unquote(instrument['url']), quantity, side, instrument['symbol'], time_in_force, order_type)
if order_type == "market":
data += "&price=%f" % (float(self.get_quote(instrument['symbol'])[0]['bid_price']))
if order_type == "limit":
data += "&price=%f" % (float(bid_price))
##Stop Loss and Stop Limit orders are a work in progress
##if order_type == "stop_loss":
## data += "&stop_price=%f" % (float(stop_price))
##if order_type == "stop_limit":
## data += "&price=%f&stop_price=%f" % (float(bid_price), float(stop_price))
res = self.session.post(self.endpoints['orders'], data = data)
if res.status_code == 201:
res = res.json()
order_ID = res['url'][res['url'].index("orders")+7:-1]
return order_ID
else:
raise Exception("Could not place order: " + res.text)
def place_buy_order(self, symbol, quantity, order_type=None, bid_price=None):
''' Places a buy order '''
i = self.instrument(symbol)[0]
side = "buy"
return self.place_order(i, quantity, side, order_type, bid_price)
def place_sell_order(self, symbol, quantity, order_type=None, bid_price=None):
''' Places a sell order '''
i = self.instrument(symbol)[0]
side = "sell"
return self.place_order(i, quantity, side, order_type, bid_price)
def order_details(self, order_ID):
''' Returns an order object which contains information about an order
and its status'''
res = self.session.get(self.endpoints['orders'] + order_ID + "/")
if res.status_code == 200:
return res.json()
else:
raise Exception("Could not get order status: " + res.text)
def order_status(self, order_ID):
''' Returns an order status string'''
return self.order_details(order_ID)['state']
def advanced_order_status(self, order_ID):
''' Will return number of shares completed, average price ... as a dict '''
def get_order(self, order_ID):
''' Will return a dict of order information for a given order ID '''
def list_orders(self):
''' returns a list of all order_IDs, ordered from newest to oldest '''
res = self.session.get(self.endpoints['orders'])
if res.status_code == 200:
orders = []
for i in res.json()['results']:
URL = i['url']
orders.append(URL[URL.index("orders")+7:-1])
return orders
else:
raise Exception("Could not retrieve orders: " + res.text)
def list_order_details(self):
''' Generates a dictionary where keys are order_IDs and values are
order objects. '''
detailed_orders = {}
for i in self.list_orders():
order = self.order_details(i)
order['symbol'] = self.session.get(order['instrument']).json()['symbol']
detailed_orders[i] = order
return detailed_orders
def cancel_order(self, order_ID):
''' Cancels order with order_ID'''
res = self.session.post(self.endpoints['orders'] + order_ID + "/cancel/")
if res.status_code == 200:
return res
else:
raise Exception("Could not cancel order: " + res.text)
def get_user_info(self):
''' Pulls user info from API and stores it in Robinhood object'''
res = self.session.get(self.endpoints['user'])
if res.status_code == 200:
self.first_name = res.json()['first_name']
self.last_name = res.json()['last_name']
else:
raise Exception("Could not get user info: " + res.text)
res = self.session.get(self.endpoints['user/basic_info'])
if res.status_code == 200:
res = res.json()
self.phone_number = res['phone_number']
self.city = res['city']
self.number_dependents = res['number_dependents']
self.citizenship = res['citizenship']
self.marital_status = res['marital_status']
self.zipcode = res['zipcode']
self.state_residence = res['state']
self.date_of_birth = res['date_of_birth']
self.address = res['address']
self.tax_id_ssn = res['tax_id_ssn']
else:
raise Exception("Could not get basic user info: " + res.text)
```
|
{
"source": "jgravelle-google/it-tools",
"score": 2
}
|
#### File: it-tools/src/adapter.py
```python
import argparse
import os
import sys
import traceback
import itl_parser
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('itl_in')
arg_parser.add_argument('-o', dest='js_out', default=None)
arg_parser.add_argument('--node', dest='do_node', action='store_true')
arg_parser.add_argument('--name', dest='component_name', default=None)
args = arg_parser.parse_args(sys.argv[1:])
# TODO: actually re-support node scripts
itl_path = args.itl_in
itl_filename = os.path.basename(itl_path)
basename, _ = os.path.splitext(itl_filename)
if args.js_out:
js_path = os.path.abspath(args.js_out)
outpath = os.path.dirname(js_path)
else:
outpath = os.path.abspath('out')
js_path = os.path.join(outpath, basename + '.js')
if args.component_name:
component_name = args.component_name
else:
component_name = basename + 'Component'
srcpath = os.path.dirname(__file__)
def main():
contents = open(itl_path).read()
component = itl_parser.parse(contents)
ensure_path(outpath)
write_js_module(component)
def ensure_path(path):
try:
os.makedirs(path)
except:
pass
# NodeJS wrapper module
def write_js_module(component):
def escape(s):
return s.replace('\\', '/')
def function(func, n_indent, is_internal=False):
global num_locals
ret = ''
params = ', '.join(['x' + str(i) for i in range(len(func.params))])
if is_internal:
decl = 'function {}'.format(func.name)
else:
decl = '"{}": function'.format(func.exname)
ret += tab * n_indent + '{}({}) {{\n'.format(decl, params)
for i in range(len(func.body)):
sexpr = func.body[i]
ret += tab * (n_indent + 1)
if func.results and i == len(func.body) - 1:
ret += 'return ' + sexpr.as_js()
else:
ret += sexpr.as_js()
ret += ';\n'
if is_internal:
ret += tab * n_indent + '};\n'
else:
ret += tab * n_indent + '},\n'
return ret
# Paths and setup
template_path = os.path.join(srcpath, 'wrapper_module_template.js')
js_str = open(template_path).read()
tab = ' '
js_str = js_str.replace('/**COMPONENT_NAME**/', component_name)
module_names = ''
load_modules = ''
for mod in component.modules:
name = mod.name
path = mod.path
module_names += tab * 2 + 'let {};\n'.format(name)
load_modules += tab * 2 + '{} = await loadModule("{}", {{\n'.format(name, path)
for imp, funcs in mod.imports.items():
load_modules += tab * 3 + imp + ': {\n'
for func in funcs:
load_modules += function(func, n_indent=4)
load_modules += tab * 3 + '},\n'
load_modules += tab * 2 + '});\n'
# call any static constructors
# TODO: configure this; have a start function in ITL to call these
load_modules += tab * 2 + '{}._initialize();\n'.format(name)
js_str = js_str.replace('/**MODULE_NAMES**/', module_names)
js_str = js_str.replace('/**LOAD_MODULES**/', load_modules)
component_functions = ''
for ty in component.types.values():
component_functions += ty.js_decls(n_indent=2)
for func in component.funcs:
component_functions += function(func, n_indent=2, is_internal=True)
js_str = js_str.replace('/**COMPONENT_FUNCTIONS**/\n', component_functions)
exports = ''
for func in component.exports:
exports += function(func, n_indent=3)
js_str = js_str.replace('/**EXPORTS**/\n', exports)
open(js_path, 'w').write(js_str)
print('Wrote JS module', js_path)
if __name__ == '__main__':
try:
main()
except Exception as e:
trace = traceback.format_exc(e)
print(trace)
sys.exit(1)
```
|
{
"source": "jgraves/rules_ios",
"score": 2
}
|
#### File: rules_ios/rules/transition_support.bzl
```python
def _current_apple_platform(apple_fragment, xcode_config):
"""Returns a struct containing the platform and target os version"""
cpu = apple_fragment.single_arch_cpu
platform = apple_fragment.single_arch_platform
xcode_config = xcode_config[apple_common.XcodeVersionConfig]
target_os_version = xcode_config.minimum_os_for_platform_type(
platform.platform_type,
)
return struct(
platform = platform,
target_os_version = target_os_version,
)
def _cpu_string(platform_type, settings):
"""Generates a <platform>_<arch> string for the current target based on the given parameters."""
# If the cpu value has already been transformed to the correct value, we must not change it anymore.
# Otherwise, we may build for the wrong arch.
cpu_value = settings["//command_line_option:cpu"]
if (platform_type == "macos" and cpu_value.startswith("{}_".format(platform_type))) or cpu_value.startswith("{}_".format(platform_type)):
return cpu_value
if platform_type == "ios":
ios_cpus = settings["//command_line_option:ios_multi_cpus"]
if ios_cpus:
return "ios_{}".format(ios_cpus[0])
return "ios_x86_64"
if platform_type == "macos":
macos_cpus = settings["//command_line_option:macos_cpus"]
if macos_cpus:
return "darwin_{}".format(macos_cpus[0])
return "darwin_x86_64"
if platform_type == "tvos":
tvos_cpus = settings["//command_line_option:tvos_cpus"]
if tvos_cpus:
return "tvos_{}".format(tvos_cpus[0])
return "tvos_x86_64"
if platform_type == "watchos":
watchos_cpus = settings["//command_line_option:watchos_cpus"]
if watchos_cpus:
return "watchos_{}".format(watchos_cpus[0])
return "watchos_i386"
fail("ERROR: Unknown platform type: {}".format(platform_type))
def _min_os_version_or_none(attr, platform, attr_platform_type):
if attr_platform_type != platform:
return None
if hasattr(attr, "platforms"):
platforms = attr.platforms
value = platforms.get(platform)
return value
elif hasattr(attr, "minimum_os_version"):
return attr.minimum_os_version
else:
fail("ERROR: must either specify a single platform/minimum_os_version, or specify a dict via platforms")
def _apple_rule_transition_impl(settings, attr):
"""Rule transition for Apple rules."""
platform_type = str(settings["//command_line_option:apple_platform_type"])
attr_platform_type = getattr(attr, "platform_type", None)
attr_platforms = getattr(attr, "platforms", None)
fail_on_apple_rule_transition_platform_mismatches = getattr(attr, "fail_on_apple_rule_transition_platform_mismatches", False)
if attr_platform_type and attr_platform_type != platform_type:
if fail_on_apple_rule_transition_platform_mismatches:
fail("ERROR: {}: attribute platform_type set to {}, but inferred to be {}".format(attr.name, attr_platform_type, platform_type))
platform_type = attr_platform_type
if attr_platforms and platform_type not in attr_platforms:
if fail_on_apple_rule_transition_platform_mismatches:
fail("ERROR: {}: attribute platforms set to {}, but platform inferred to be {}".format(attr.name, attr_platforms, platform_type))
platform_type = attr_platforms.keys()[0]
ret = {
"//command_line_option:apple configuration distinguisher": "applebin_" + platform_type,
"//command_line_option:apple_platform_type": platform_type,
"//command_line_option:apple_split_cpu": settings["//command_line_option:apple_split_cpu"],
"//command_line_option:compiler": settings["//command_line_option:apple_compiler"],
"//command_line_option:cpu": _cpu_string(platform_type, settings),
"//command_line_option:crosstool_top": (
settings["//command_line_option:apple_crosstool_top"]
),
"//command_line_option:fission": [],
"//command_line_option:grte_top": settings["//command_line_option:apple_grte_top"],
"//command_line_option:ios_minimum_os": _min_os_version_or_none(attr, "ios", platform_type),
"//command_line_option:macos_minimum_os": _min_os_version_or_none(attr, "macos", platform_type),
"//command_line_option:tvos_minimum_os": _min_os_version_or_none(attr, "tvos", platform_type),
"//command_line_option:watchos_minimum_os": _min_os_version_or_none(attr, "watchos", platform_type),
}
return ret
# These flags are a mix of options defined in native Bazel from the following fragments:
# - https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/analysis/config/CoreOptions.java
# - https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/rules/apple/AppleCommandLineOptions.java
# - https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/rules/cpp/CppOptions.java
_apple_rule_transition = transition(
implementation = _apple_rule_transition_impl,
inputs = [
"//command_line_option:apple_compiler",
"//command_line_option:apple_crosstool_top",
"//command_line_option:apple_platform_type",
"//command_line_option:apple_grte_top",
"//command_line_option:cpu",
"//command_line_option:ios_multi_cpus",
"//command_line_option:macos_cpus",
"//command_line_option:tvos_cpus",
"//command_line_option:watchos_cpus",
"//command_line_option:apple_split_cpu",
],
outputs = [
"//command_line_option:apple configuration distinguisher",
"//command_line_option:apple_platform_type",
"//command_line_option:apple_split_cpu",
"//command_line_option:compiler",
"//command_line_option:cpu",
"//command_line_option:crosstool_top",
"//command_line_option:fission",
"//command_line_option:grte_top",
"//command_line_option:ios_minimum_os",
"//command_line_option:macos_minimum_os",
"//command_line_option:tvos_minimum_os",
"//command_line_option:watchos_minimum_os",
],
)
transition_support = struct(
apple_rule_transition = _apple_rule_transition,
current_apple_platform = _current_apple_platform,
)
```
|
{
"source": "jgraving/cne",
"score": 2
}
|
#### File: cne/selfsne/prior.py
```python
import torch
import torch.distributions as D
from torch import nn
from torch.nn import init
from torch import optim
from torch.utils.data import DataLoader
import numpy as np
import pytorch_lightning as pl
from selfsne.kernels import KERNELS
class MixturePrior(pl.LightningModule):
def __init__(
self,
num_dims=2,
num_components=2048,
kernel="normal",
logits="learn",
kernel_scale=1.0,
lr=0.1,
):
super().__init__()
self.save_hyperparameters()
self.kernel = KERNELS[self.hparams.kernel]
if self.hparams.num_components == 1:
locs = torch.zeros((self.hparams.num_components, self.hparams.num_dims))
self.register_buffer("locs", locs)
else:
self.locs = nn.Parameter(
torch.Tensor(self.hparams.num_components, self.hparams.num_dims)
)
init.normal_(self.locs)
if self.hparams.logits == "learn":
self.logits = nn.Parameter(torch.zeros((self.hparams.num_components,)))
init.zeros_(self.logits)
elif self.hparams.logits == "maxent":
logits = torch.zeros((self.hparams.num_components,))
self.register_buffer("logits", logits)
self.watershed_optimized = False
@property
def multinomial(self):
return D.Multinomial(logits=self.logits)
@property
def mixture(self):
return D.Categorical(logits=self.logits)
def sample(self, n_samples):
components = D.Independent(D.Normal(loc=self.locs, scale=1), 1)
normal_mixture = D.MixtureSameFamily(self.mixture, components)
return normal_mixture.sample([n_samples])
@property
def components(self):
return self.kernel(self.locs.unsqueeze(1), self.hparams.kernel_scale)
def entropy(self):
return -(self.mixture.probs * self.log_prob(self.locs)).sum()
def weighted_log_prob(self, x):
return self.components.log_prob(x) + self.mixture.logits.unsqueeze(1)
def log_prob(self, x):
return self.weighted_log_prob(x).logsumexp(0)
def disable_grad(self):
for param in self.parameters():
param.requires_grad = False
def enable_grad(self):
for param in self.parameters():
param.requires_grad = True
def rate(self, x):
self.disable_grad()
rate = self.log_prob(x)
self.enable_grad()
return rate
def assign_modes(self, x):
return self.weighted_log_prob(x).argmax(0)
def quantize(self, x):
return self.locs[self.assign_modes(x)]
def entropy_upper_bound(self):
return -(self.mixture.probs * self.log_prob(self.watershed_locs)).sum()
def configure_optimizers(self):
self.watershed_locs = nn.Parameter(self.locs.clone().detach())
return optim.Adam([self.watershed_locs], lr=self.hparams.lr)
def watershed_labels(self):
# perform sparse watershed assignment for component means
watershed_modes = self.assign_modes(self.watershed_locs)
watershed_assignments = torch.arange(
self.hparams.num_components, device=watershed_modes.device
)
# loop over k_components to ensure all modes are correctly assigned
# hierarchy of clusters cannot be longer than num_components
for _ in range(self.hparams.num_components):
watershed_assignments = watershed_modes[watershed_assignments]
# reindex starting at 0
unique_labels = torch.unique(watershed_assignments)
for idx, label in enumerate(unique_labels):
watershed_assignments[watershed_assignments == label] = idx
return watershed_assignments
def on_train_end(self):
self.watershed_assignments = self.watershed_labels()
self.watershed_optimized = True
def training_epoch_end(self, training_step_outputs):
self.log(
"n_labels_epoch",
self.watershed_labels().max() + 1,
on_step=False,
on_epoch=True,
prog_bar=True,
)
def training_step(self, batch, batch_idx):
entropy = self.entropy_upper_bound()
self.log(
"entropy",
entropy,
on_step=True,
on_epoch=True,
prog_bar=True,
)
return entropy
def assign_labels(self, p):
if not self.watershed_optimized:
self.optimize_watershed()
return self.watershed_assignments[self.assign_modes(p)]
def optimize_watershed(
self,
max_epochs=999,
steps_per_epoch=10,
patience=10,
verbose=True,
gpus=None,
lr=0.1,
):
self.hparams.lr = lr
if verbose:
print("optimizing entropy...")
dummy_loader = DataLoader(np.zeros(steps_per_epoch), batch_size=1)
early_stopping = pl.callbacks.EarlyStopping("entropy", patience=patience)
trainer = pl.Trainer(
max_epochs=max_epochs,
progress_bar_refresh_rate=verbose,
weights_summary=None,
callbacks=[early_stopping],
gpus=gpus,
)
trainer.fit(self, dummy_loader)
```
#### File: cne/selfsne/selfsne.py
```python
import pytorch_lightning as pl
import torch.optim as optim
import torch.nn as nn
import copy
from selfsne.prior import MixturePrior
from selfsne.losses import InfoNCE, RedundancyReduction
from selfsne.neighbors import NearestNeighborSampler
from selfsne.utils import stop_gradient
class SelfSNE(pl.LightningModule):
"""Self-Supervised Noise Embedding"""
def __init__(
self,
encoder,
pair_sampler,
projector=nn.Identity(),
prior=MixturePrior(num_dims=2, num_components=1),
similarity_loss=InfoNCE("studentt"),
redundancy_loss=RedundancyReduction(2),
similarity_multiplier=1.0,
redundancy_multiplier=1.0,
rate_multiplier=0.1,
learning_rate=1e-3,
weight_decay=0.01,
):
super().__init__()
self.encoder = encoder
self.projector = projector
self.pair_sampler = pair_sampler
self.prior = prior
self.similarity_loss = similarity_loss
self.redundancy_loss = redundancy_loss
self.save_hyperparameters(
"similarity_multiplier",
"redundancy_multiplier",
"rate_multiplier",
"learning_rate",
"weight_decay",
ignore=[
"encoder",
"projector",
"pair_sampler",
"prior",
"similarity_loss",
"redundancy_loss",
],
)
def forward(self, batch):
return self.projector(self.encoder(batch))
def loss(self, batch, batch_idx, mode=""):
query, key = self.pair_sampler(batch)
query = self(query)
key = self(key)
similarity = self.similarity_loss(query, key).mean()
redundancy = self.redundancy_loss(query, key).mean()
prior_log_prob = -self.prior.log_prob(stop_gradient(query)).mean()
rate = -self.prior.rate(query).mean()
loss = {
mode + "similarity": similarity,
mode + "redundancy": redundancy,
mode + "rate": rate,
mode + "prior_entropy": self.prior.entropy(),
mode
+ "loss": (
prior_log_prob
+ rate * self.hparams.rate_multiplier
+ similarity * self.hparams.similarity_multiplier
+ redundancy * self.hparams.redundancy_multiplier
),
}
for key in loss.keys():
self.log(key, loss[key], prog_bar=True)
return loss
def training_step(self, batch, batch_idx):
return self.loss(batch, batch_idx, mode="")["loss"]
def validation_step(self, batch, batch_idx):
self.loss(batch, batch_idx, mode="val_")
def test_step(self, batch, batch_idx):
self.loss(batch, batch_idx, mode="test_")
def predict_step(self, batch, batch_idx, dataloader_idx=0):
embedded = self(batch)
prior_log_prob = self.prior.log_prob(embedded)
labels = self.prior.assign_labels(embedded)
return {
"embedding": embedded.cpu().numpy(),
"labels": labels.cpu().numpy(),
"prior_log_prob": prior_log_prob.cpu().numpy(),
}
def configure_optimizers(self):
params_list = [
{"params": self.encoder.parameters()},
{"params": self.pair_sampler.parameters()},
{"params": self.similarity_loss.parameters()},
{"params": self.redundancy_loss.parameters()},
{
"params": self.projector.parameters(),
"weight_decay": 0.0,
},
{
"params": self.prior.parameters(),
"weight_decay": 0.0,
"lr": self.prior.hparams.lr,
},
]
return optim.AdamW(
params_list,
lr=self.hparams.learning_rate,
weight_decay=self.hparams.weight_decay,
)
```
|
{
"source": "jgray5/Lego-boost",
"score": 3
}
|
#### File: examples/plotter/imgtracer.py
```python
import json
import logging
import matplotlib.pyplot as plt
import time
from threading import Thread
import numpy
from PIL import Image
class Tracer(object):
def __init__(self, fname):
super(Tracer, self).__init__()
self.threshold = 64
self.orig_fname = fname
self.orig = Image.open(fname)
self.conv1 = self.remove_transparency(self.orig)
self.conv1 = self.conv1.convert("L")
self.src = numpy.asarray(self.conv1)
self.dst = numpy.copy(self.src)
self.dst.fill(False)
self.mark = numpy.copy(self.dst)
# start in center
self.height, self.width = self.dst.shape[0:2]
self.posy = self.height / 2
self.posx = self.width / 2
self.lines = []
def remove_transparency(self, im, bg_colour=(255, 255, 255)):
# from https://stackoverflow.com/questions/35859140/remove-transparency-alpha-from-any-image-using-pil
# Only process if image has transparency (http://stackoverflow.com/a/1963146)
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
# Need to convert to RGBA if LA format due to a bug in PIL (http://stackoverflow.com/a/1963146)
alpha = im.convert('RGBA').split()[-1]
# Create a new background image of our matt color.
# Must be RGBA because paste requires both images have the same format
# (http://stackoverflow.com/a/8720632 and http://stackoverflow.com/a/9459208)
bg = Image.new("RGBA", im.size, bg_colour + (255,))
bg.paste(im, mask=alpha)
return bg
else:
return im
def trace(self):
while self._has_unchecked_pixels():
# go circles to find a pixel in src
if not self._spiral_till_pixel():
break
# move until we find new pixels
self._move_while_you_can()
logging.info("Done")
with open(self.orig_fname + ".json", "wt") as fhd:
fhd.write(json.dumps(self.lines))
def _has_unchecked_pixels(self):
ix, iy = numpy.where(self.mark == False) # FIXME: highly inefficient
return len(ix) or len(iy)
def is_src(self, posx, posy):
return self.src[posy][posx] < self.threshold
def _spiral_till_pixel(self): # TODO: optimize it, maybe use different algo (not spiral, just walkthrough?)
radius = 1
direction = 0
offset = 0
while self._has_unchecked_pixels():
in_lower = self.posy < self.height and self.posx < self.width
in_upper = self.posy >= 0 and self.posx >= 0
if in_lower and in_upper and not self.mark[self.posy][self.posx]:
if self.is_src(self.posx, self.posy):
return True
self.mark[self.posy][self.posx] = True
if direction == 0:
self.posx += 1
self.posy += 0
elif direction == 1:
self.posx += 0
self.posy += 1
elif direction == 2:
self.posx += -1
self.posy += 0
elif direction == 3:
self.posx += 0
self.posy += -1
else:
raise ValueError()
offset += 1
if offset >= radius:
# time.sleep(0.01)
offset = 0
direction += 1
if direction > 3:
direction = 0
if direction in (0, 2):
radius += 1
return False
def _move_while_you_can(self):
# time.sleep(0.1)
logging.debug("%s:%s=%s", self.posy, self.posx, self.src[self.posy][self.posx])
dirs = self._check_directions() # TODO: use stack of this knowledge to speed-up walktrough
dx, dy, length = self._get_best_direction(dirs)
self.dst[self.posy][self.posx] = True
self.mark[self.posy][self.posx] = True
line = {
"x1": self.posx, "y1": self.posy,
"x2": self.posx + dx * length, "y2": self.posy + dy * length,
"len": length
}
self.lines.append(line)
logging.info("%s", line)
for n in range(0, length):
self.posy += dy
self.posx += dx
self.dst[self.posy][self.posx] = True
self.mark[self.posy][self.posx] = True
def _check_directions(self):
dirs = {
-1: {-1: 0, 0: 0, 1: 0},
0: {-1: 0, 0: 0, 1: 0},
1: {-1: 0, 0: 0, 1: 0},
}
for dy in (-1, 0, 1):
for dx in (-1, 0, 1):
if dy == 0 and dx == 0:
continue
length = 1
while True:
cx = self.posx + length * dx
cy = self.posy + length * dy
if not (0 <= cx < self.width) or not (0 <= cy < self.height):
break
if not self.is_src(cx, cy) or self.mark[cy][cx]:
break
dirs[dy][dx] = length
length += 1
return dirs
def _get_best_direction(self, dirs):
bestlen = 0
bestx = 0
besty = 0
for y in dirs:
for x in dirs[y]:
if dirs[y][x] > bestlen:
bestlen = dirs[y][x]
bestx = x
besty = y
return bestx, besty, bestlen
class TracerVisualizer(object):
def __init__(self, tracer):
"""
:type tracer: Tracer
"""
self.tracer = tracer
def run(self):
tracer = self.tracer
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
ax1.imshow(tracer.orig)
ax2.imshow(tracer.src, cmap='binary')
plt.show(block=False)
thr = Thread(target=tracer.trace)
thr.setDaemon(True)
thr.start()
while plt.get_fignums(): # weird trick to react on close
ax3.set_title("%s:%s" % (tracer.posx, tracer.posy))
ax3.imshow(tracer.mark, cmap='gray')
ax4.imshow(tracer.dst, cmap='gray')
plt.pause(1)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
trc = Tracer("test3.png")
TracerVisualizer(trc).run()
time.sleep(5)
```
#### File: Lego-boost/pylgbst/constants.py
```python
import binascii
import struct
import sys
if sys.version_info[0] == 2:
import Queue as queue
else:
import queue as queue
queue = queue # just to use it
def str2hex(data):
return binascii.hexlify(data).decode("utf8")
def usbyte(seq, index):
return struct.unpack("<B", seq[index:index + 1])[0]
def ushort(seq, index):
return struct.unpack("<H", seq[index:index + 2])[0]
# GENERAL
MOVE_HUB_HARDWARE_HANDLE = 0x0E
MOVE_HUB_HARDWARE_UUID = '00001624-1212-efde-1623-785feabcd123'
PACKET_VER = 0x01
# PORTS
PORT_C = 0x01
PORT_D = 0x02
PORT_LED = 0x32
PORT_A = 0x37
PORT_B = 0x38
PORT_AB = 0x39
PORT_TILT_SENSOR = 0x3A
PORT_AMPERAGE = 0x3B
PORT_VOLTAGE = 0x3C
PORTS = {
PORT_A: "A",
PORT_B: "B",
PORT_AB: "AB",
PORT_C: "C",
PORT_D: "D",
PORT_LED: "LED",
PORT_TILT_SENSOR: "TILT_SENSOR",
PORT_AMPERAGE: "AMPERAGE",
PORT_VOLTAGE: "VOLTAGE",
}
# PACKET TYPES
MSG_DEVICE_INFO = 0x01
# 0501010305 gives 090001030600000010
MSG_DEVICE_SHUTDOWN = 0x02 # sent when hub shuts down by button hold
MSG_PING_RESPONSE = 0x03
MSG_PORT_INFO = 0x04
MSG_PORT_CMD_ERROR = 0x05
MSG_SET_PORT_VAL = 0x81
MSG_PORT_STATUS = 0x82
MSG_SENSOR_SUBSCRIBE = 0x41
MSG_SENSOR_SOMETHING1 = 0x42 # it is seen close to sensor subscribe commands. Subscription options? Initial value?
MSG_SENSOR_DATA = 0x45
MSG_SENSOR_SUBSCRIBE_ACK = 0x47
# DEVICE TYPES
DEV_VOLTAGE = 0x14
DEV_AMPERAGE = 0x15
DEV_LED = 0x17
DEV_DCS = 0x25
DEV_IMOTOR = 0x26
DEV_MOTOR = 0x27
DEV_TILT_SENSOR = 0x28
DEVICE_TYPES = {
DEV_DCS: "DISTANCE_COLOR_SENSOR",
DEV_IMOTOR: "IMOTOR",
DEV_MOTOR: "MOTOR",
DEV_TILT_SENSOR: "TILT_SENSOR",
DEV_LED: "LED",
DEV_AMPERAGE: "AMPERAGE",
DEV_VOLTAGE: "VOLTAGE",
}
# NOTIFICATIONS
STATUS_STARTED = 0x01
STATUS_CONFLICT = 0x05
STATUS_FINISHED = 0x0a
STATUS_INPROGRESS = 0x0c # FIXME: not sure about description
STATUS_INTERRUPTED = 0x0e # FIXME: not sure about description
# COLORS
COLOR_BLACK = 0x00
COLOR_PINK = 0x01
COLOR_PURPLE = 0x02
COLOR_BLUE = 0x03
COLOR_LIGHTBLUE = 0x04
COLOR_CYAN = 0x05
COLOR_GREEN = 0x06
COLOR_YELLOW = 0x07
COLOR_ORANGE = 0x09
COLOR_RED = 0x09
COLOR_WHITE = 0x0a
COLOR_NONE = 0xFF
COLORS = {
COLOR_BLACK: "BLACK",
COLOR_PINK: "PINK",
COLOR_PURPLE: "PURPLE",
COLOR_BLUE: "BLUE",
COLOR_LIGHTBLUE: "LIGHTBLUE",
COLOR_CYAN: "CYAN",
COLOR_GREEN: "GREEN",
COLOR_YELLOW: "YELLOW",
COLOR_ORANGE: "ORANGE",
COLOR_RED: "RED",
COLOR_WHITE: "WHITE",
COLOR_NONE: "NONE"
}
# DEVICE INFO
INFO_DEVICE_NAME = 0x01
INFO_BUTTON_STATE = 0x02
INFO_FIRMWARE_VERSION = 0x03
INFO_SOME4 = 0x04
INFO_SOME5_JITTERING = 0x05
INFO_SOME6 = 0x06
INFO_SOME7 = 0x07
INFO_MANUFACTURER = 0x08
INFO_HW_VERSION = 0x09
INFO_SOME10 = 0x0a
INFO_SOME11 = 0x0b
INFO_SOME12 = 0x0c
INFO_ACTION_SUBSCRIBE = 0x02
INFO_ACTION_UNSUBSCRIBE = 0x03
INFO_ACTION_GET = 0x05
```
|
{
"source": "jgreat/mobilecoin",
"score": 2
}
|
#### File: mobilecoind/strategies/accounts.py
```python
import grpc
import json
import logging
import time
import os
import sys
import uuid
import mobilecoind_api_pb2
import mobilecoind_api_pb2_grpc
from collections import namedtuple
from enum import Enum
from random import randint
from google.protobuf.empty_pb2 import Empty
logging.basicConfig(stream = sys.stdout, level = logging.INFO, format="%(levelname)s:%(module)s:%(lineno)s: %(message)s")
AccountData = namedtuple("AccountData",
["account_key", "monitor_id", "public_address"])
class TransferStatus(Enum):
pending = 0
success = 1
tombstoned = 2
failed = 3
def connect(host, port):
# Set Up GRPC connection to wallet
if port == '443':
credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel('{}:{}'.format(host, port), credentials)
else:
channel = grpc.insecure_channel('{}:{}'.format(host, port))
return mobilecoind_api_pb2_grpc.MobilecoindAPIStub(channel)
def register_account(key_data, stub) -> AccountData:
# Generate an account key from this root entropy
resp = stub.GetAccountKeyFromRootEntropy(
mobilecoind_api_pb2.GetAccountKeyFromRootEntropyRequest(
root_entropy=bytes(key_data['root_entropy'])))
account_key = resp.account_key
# Add this account to the wallet
resp = stub.AddMonitor(
mobilecoind_api_pb2.AddMonitorRequest(account_key=account_key, first_subaddress=0, num_subaddresses=1))
monitor_id = resp.monitor_id
resp = stub.GetPublicAddress(
mobilecoind_api_pb2.GetPublicAddressRequest(monitor_id=monitor_id, subaddress_index=0))
public_address = resp.public_address
return AccountData(account_key, monitor_id, public_address)
def load_key_and_register(keyfile, stub) -> AccountData:
# Load the account key from file
with open(keyfile, 'r') as f:
key_data = json.load(f)
# Remove discovery fqdn, as this causes InvalidTxOutMembershipProof
key_data['acct_fqdn'] = None
return register_account(key_data, stub)
def register_random_key(stub, outdir) -> AccountData:
entropy = [randint(0, 255) for i in range(32)]
logging.debug("entropy = %s", entropy)
data = {"root_entropy": entropy}
outfile = 'account_keys_{}.json'.format(uuid.uuid4())
with open(os.path.join(outdir, outfile), 'w') as out:
json.dump(data, out)
return register_account(data, stub)
def wait_for_accounts_sync(stub, accounts, wait_secs):
logging.debug("accounts = %s", accounts[0])
block_count = stub.GetLedgerInfo(Empty()).block_count
synced_ids = {a: False for a in accounts}
while not all(synced_ids.values()):
logging.info("Waiting for accounts to sync")
for a in synced_ids:
request = mobilecoind_api_pb2.GetMonitorStatusRequest(monitor_id=a)
monitor_block = stub.GetMonitorStatus(request).status.next_block
if monitor_block == block_count:
synced_ids[a] = True
time.sleep(wait_secs)
logging.info("All accounts synced")
def get_synced_accounts(stub, accounts):
block_count = stub.GetLedgerInfo(Empty()).block_count
synced = {a: False for a in accounts}
while not any(synced.values()):
logging.info("Waiting for accounts to sync")
for a in synced:
request = mobilecoind_api_pb2.GetMonitorStatusRequest(monitor_id=a)
monitor_block = stub.GetMonitorStatus(request).status.next_block
if monitor_block == block_count:
synced[a] = True
return {a for a in synced if synced[a] == True}
def poll_mitosis(starting_balance, account_data, tx_stats, stub):
complete = {t: False for t in tx_stats.keys()}
pending = complete.keys()
while not all(complete.values()):
for tx_id in pending:
try:
resp = stub.GetBalance(
mobilecoind_api_pb2.GetBalanceRequest(
monitor_id=account_data.monitor_id))
if resp.balance == starting_balance and resp.account_block_height == resp.ledger_num_blocks:
complete[tx_id] = True
tx_stats[tx_id]['time_delta'] = time.time(
) - tx_stats[tx_id]['start']
tx_stats[tx_id][
'block_delta'] = resp.ledger_num_blocks - tx_stats[
tx_id]['block_start']
# FIXME: don't know status currently...see below in poll
tx_stats[tx_id]['status'] = TransferStatus.success
except Exception as exc:
logging.error("Got Balance exception: %s", repr(exc))
pending = [k for k in complete if not complete[k]]
logging.info("Still pending: %s", len(pending))
time.sleep(2)
logging.info("All accounts transfers complete")
logging.debug(tx_stats)
return tx_stats
def poll(monitor_id, tx_stats, stub):
complete = {t: False for t in tx_stats.keys()}
receipts = {t: tx_stats[t] for t in tx_stats.keys()}
pending = complete.keys()
while not all(complete.values()):
for tx_id in pending:
try:
resp = stub.GetTxStatusAsSender(
mobilecoind_api_pb2.SubmitTxResponse(
sender_tx_receipt=receipts[tx_id]["receipt"].sender_tx_receipt,
receiver_tx_receipt_list=receipts[tx_id]["receipt"].receiver_tx_receipt_list
))
if resp.status == mobilecoind_api_pb2.TxStatus.TombstoneBlockExceeded:
logging.warning("Transfer did not complete in time: %s", tx_id)
complete[tx_id] = True
tx_stats[tx_id]['time_delta'] = time.time(
) - tx_stats[tx_id]['start']
tx_stats[tx_id]['status'] = TransferStatus.tombstoned
elif resp.status == mobilecoind_api_pb2.TxStatus.Verified:
logging.info("Transfer complete %s", tx_id)
complete[tx_id] = True
tx_stats[tx_id]['time_delta'] = time.time(
) - tx_stats[tx_id]['start']
tx_stats[tx_id]['status'] = TransferStatus.success
else:
logging.warning("Transfer status unknown: %s", resp.status)
except Exception as e:
logging.error("TransferStatus exception: %s", repr(e))
pending = [k for k in complete if not complete[k]]
time.sleep(0.25)
logging.info("All accounts transfers complete")
logging.debug(tx_stats)
return tx_stats
```
|
{
"source": "jgreat/tf-rancher-2.0",
"score": 2
}
|
#### File: aws-ha-asg/lambda/lambda_function.py
```python
import json
import boto3
import botocore
import logging
import os
import paramiko
import socket
import subprocess
import warnings
import yaml
from random import randint
from time import sleep
from paramiko.ssh_exception import BadHostKeyException, AuthenticationException, SSHException
warnings.filterwarnings(action='ignore',module='.*paramiko.*')
print('Loading function')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
sleep(randint(1,15))
base_cluster_path = '/tmp/base_cluster.yml'
cluster_path = '/tmp/cluster.yml'
json_vars_path = '/tmp/vars.json'
kubeconfig_path = '/tmp/kube_config_cluster.yml'
snapshot_path = '/tmp/snapshots'
s3_bucket = os.environ['S3_BUCKET']
rke_version = os.environ['RKE_VERSION']
ssh_user = os.environ.get('SSH_USER', default='rancher')
rke_path = '/tmp/rke'
state_path = '/tmp/cluster.rkestate'
ssh_key_path = '/tmp/rsa_id'
logger.info(json.dumps(event, indent=4, sort_keys=True))
message = json.loads(event['Records'][0]['Sns']['Message'])
if 'LifecycleTransition' not in message:
logger.info('Not a autoscale transition event. Doing nothing.')
return
transition = message['LifecycleTransition']
lifecycle_action_token = message['LifecycleActionToken']
metadata = json.loads(message['NotificationMetadata'])
lb = metadata['lb']
ec2 = boto3.resource('ec2')
instance = ec2.Instance(message['EC2InstanceId'])
if instance.public_ip_address:
ip = instance.public_ip_address
internal_ip = instance.private_ip_address
else:
ip = instance.private_ip_address
internal_ip = ""
logger.info(message)
logger.info('Instance ID: {}'.format(message['EC2InstanceId']))
logger.info('Transition: {}'.format(transition))
logger.info('Address: {}'.format(ip))
logger.info('Internal Address: {}'.format(internal_ip))
logger.info('LB Endpoint: {}'.format(lb))
try:
# Get instance info
logger.info('Waiting for instance to be ready')
instance.wait_until_running()
logger.info('Instance is in Running state')
# Download RKE
get_rke(version=rke_version, path=rke_path)
# Get SSH key
get_ssh_private_key(bucket=s3_bucket, path=ssh_key_path)
# Test docker ready
wait_for_docker(private_key_path=ssh_key_path, ip=ip)
# Set Lock
set_lock(bucket=s3_bucket, ip=ip)
# Get state files
get_kubeconfig(bucket=s3_bucket, path=kubeconfig_path)
get_state(bucket=s3_bucket, path=state_path)
get_base_cluster(bucket=s3_bucket, path=base_cluster_path)
get_cluster(bucket=s3_bucket, path=cluster_path)
# Take a snapshot if the cluster exists.
if os.path.isfile(kubeconfig_path):
snapshot_name = 'asg-{}-{}'.format(message['LifecycleHookName'], message['RequestId'])
local_snapshot_path = '{}/{}'.format(snapshot_path, snapshot_name)
remote_snapshot_path = '/opt/rke/etcd-snapshots/{}'.format(snapshot_name)
take_snapshot(name=snapshot_name, rke_path=rke_path, cluster_path=cluster_path)
if not os.path.isdir(snapshot_path):
os.mkdir(snapshot_path)
copy_snapshot(cluster_path=cluster_path, local_path=local_snapshot_path, remote_path=remote_snapshot_path)
upload_snapshot(bucket=s3_bucket, name=snapshot_name, path=local_snapshot_path)
# update cluster.yml
node = {
'address': ip,
'internal_address': internal_ip,
'user': ssh_user,
'role': [ 'controlplane', 'etcd', 'worker' ],
'ssh_key_path': ssh_key_path
}
if transition == 'autoscaling:EC2_INSTANCE_LAUNCHING':
add_node(base_cluster_path=base_cluster_path, cluster_path=cluster_path, node=node)
elif transition == 'autoscaling:EC2_INSTANCE_TERMINATING':
remove_node(path=cluster_path, node=node)
else:
raise Exception('Unknown transition, run away!')
# run rke
cmd = [ rke_path, 'up', '--config', cluster_path, '2>&1' ]
response = subprocess.run(cmd)
response.check_returncode()
# Add ELB endpoint to kube_config_cluster.yml
add_lb_to_kubeconfig(path=kubeconfig_path, lb=lb)
json_vars_file(kubeconfig_path=kubeconfig_path, json_vars_path=json_vars_path)
# Update files
upload_files(bucket=s3_bucket, cluster_path=cluster_path, kubeconfig_path=kubeconfig_path, state_path=state_path, json_vars_path=json_vars_path)
# Remove lock
remove_lock(bucket=s3_bucket, ip=ip)
# Send ASG complete
complete_lifecycle(message, 'CONTINUE')
except Exception as e:
if lifecycle_action_token:
complete_lifecycle(message, 'ABANDON')
if ip:
remove_lock(bucket=s3_bucket, ip=ip)
raise e
else:
logger.info('rke up Success')
def add_lb_to_kubeconfig(path, lb):
logger.info('Updating Kubeconfig with LB endpoint: {}'.format(lb))
kube_config = {}
with open(path, 'r') as k:
kube_config = yaml.safe_load(k.read())
kube_config['clusters'][0]['cluster']['server'] = lb
with open(path, 'w') as k:
k.writelines(yaml.dump(kube_config, default_flow_style=False))
def json_vars_file(kubeconfig_path, json_vars_path):
logger.info('Creating json_vars for TF to ingest')
kube_config = {}
with open(kubeconfig_path, 'r') as k:
kube_config = yaml.safe_load(k.read())
json_vars = {
'host': kube_config['clusters'][0]['cluster']['server'],
'username': kube_config['users'][0]['name'],
'client_certificate_data': kube_config['users'][0]['user']['client-certificate-data'],
'client_key_data': kube_config['users'][0]['user']['client-key-data'],
'certificate_authority_data': kube_config['clusters'][0]['cluster']['certificate-authority-data']
}
with open(json_vars_path, 'w') as j:
j.writelines(json.dumps(json_vars))
def take_snapshot(name, rke_path, cluster_path):
logger.info('Taking snapshot of current cluster. ' + name)
cmd = [ rke_path, 'etcd', 'snapshot-save', '--config', cluster_path, '--name', name ]
response = subprocess.run(cmd)
response.check_returncode()
def copy_snapshot(cluster_path, local_path, remote_path):
# Pull snapshot from the first host in the cluster.yml
cluster = {}
with open(cluster_path, 'r') as c:
cluster = yaml.safe_load(c.read())
ip = cluster['nodes'][0]['address']
user = cluster['nodes'][0]['user']
ssh_key_path = cluster['nodes'][0]['ssh_key_path']
logger.info('Copy snapshot from {} to localhost'.format(ip))
logger.info('Remote path: {}'.format(remote_path))
logger.info('Local local: {}'.format(local_path))
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
private_key = paramiko.RSAKey.from_private_key_file(ssh_key_path)
ssh.connect(hostname=ip, username=user, pkey=private_key)
ftp_client=ssh.open_sftp()
ftp_client.get(remote_path, local_path)
ftp_client.close()
def upload_snapshot(bucket, name, path):
# upload to s3:// /snapshots
logger.info('Uploading snapshot: ' + path)
with open(path, 'rb') as k:
s3 = boto3.client('s3')
s3.upload_fileobj(k, bucket, 'snapshots/{}'.format(name), ExtraArgs={'ContentType': 'text/yaml'})
def get_state(bucket, path):
s3 = boto3.client('s3')
logger.info('Downloading cluster.rkestate from ' + bucket)
try:
s3.download_file(bucket, 'cluster.rkestate', path)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
logger.info('no existing state file')
else:
raise e
def get_kubeconfig(bucket, path):
s3 = boto3.client('s3')
logger.info('Downloading kube_config_cluster.yml from ' + bucket)
try:
s3.download_file(bucket, 'kube_config_cluster.yml', path)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
logger.info('no existing kubeconfig file')
else:
raise e
def get_cluster(bucket, path):
s3 = boto3.client('s3')
logger.info('Downloading cluster.yml from ' + bucket)
try:
s3.download_file(bucket, 'cluster.yml', path)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
logger.info('cluster.yml not found creating new')
open(path, 'a').close()
else:
raise e
def get_base_cluster(bucket, path):
s3 = boto3.client('s3')
logger.info('Downloading base_cluster.yml from ' + bucket)
try:
s3.download_file(bucket, 'base_cluster.yml', path)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
logger.info('base_cluster.yml not found creating new')
open(path, 'a').close()
else:
raise e
def add_node(cluster_path, base_cluster_path, node):
cluster = {}
base_cluster = {}
with open(cluster_path, 'r') as c:
cluster = yaml.safe_load(c.read())
# This seems dumb, but an empty document doesn't return an empty dict
if not cluster:
cluster = {}
with open(base_cluster_path, 'r') as c:
base_cluster = yaml.safe_load(c.read())
if not base_cluster:
base_cluster = {}
# merge base_cluster over values in cluster
new_cluster = {**cluster, **base_cluster}
# update cluster with new node
if new_cluster:
if 'nodes' in new_cluster:
if node in new_cluster['nodes']:
logger.info('found node entry in cluster.yml')
else:
logger.info('appending node entry to cluster.yml')
new_cluster['nodes'].append(node)
else:
logger.info('adding node entry to cluster.yml')
new_cluster['nodes'] = [ node ]
else:
logger.info('adding nodes entry to cluster.yml')
new_cluster = {
'nodes': [
node
]
}
with open(cluster_path, 'w') as c:
c.writelines(yaml.dump(new_cluster, default_flow_style=False))
def remove_node(path, node):
cluster = {}
with open(path, 'r') as c:
cluster = yaml.safe_load(c.read())
if cluster:
if 'nodes' in cluster:
if node in cluster['nodes']:
cluster['nodes'].remove(node)
with open(path, 'w') as c:
c.writelines(yaml.dump(cluster, default_flow_style=False))
def upload_files(bucket, cluster_path, kubeconfig_path, state_path, json_vars_path):
s3 = boto3.client('s3')
if os.path.isfile(cluster_path):
logger.info('Uploading cluster.yml')
with open(cluster_path, 'rb') as c:
s3.upload_fileobj(c, bucket, 'cluster.yml', ExtraArgs={'ContentType': 'text/yaml'})
if os.path.isfile(kubeconfig_path):
logger.info('Uploading kube_config_path.yml')
with open(kubeconfig_path, 'rb') as k:
s3.upload_fileobj(k, bucket, 'kube_config_cluster.yml', ExtraArgs={'ContentType': 'text/yaml'})
if os.path.isfile(state_path):
logger.info('Uploading cluster.rkestate')
with open(state_path, 'rb') as s:
s3.upload_fileobj(s, bucket, 'cluster.rkestate')
if os.path.isfile(json_vars_path):
logger.info('Uploading vars.json')
with open(json_vars_path, 'rb') as j:
s3.upload_fileobj(j, bucket, 'vars.json', ExtraArgs={'ContentType': 'application/json'})
def set_lock(bucket, ip):
s3 = boto3.client('s3')
logger.info('Checking for lock file')
# retry every 10 seconds, stop after 10 min
for attempt in range(60):
try:
s3.head_object(Bucket=bucket, Key='rke_lock')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
logger.info("The lock_file does not exist. Setting now.")
with open('/tmp/rke_lock', 'w') as lock:
lock.write(ip)
with open('/tmp/rke_lock', 'rb') as lock:
s3.upload_fileobj(lock, bucket, 'rke_lock')
break
else:
raise e
else:
logger.info('Lock file exists. Waiting for lock to clear - ' + str(attempt))
sleep(10)
continue
else:
url = 's3://' + bucket + '/rke_tmp'
raise Exception('Time out waiting for lock to clear. ' + url)
def remove_lock(bucket, ip):
s3 = boto3.client('s3')
logger.info('Removing Lock File')
try:
s3.download_file(bucket, 'rke_lock', '/tmp/tmp_lock')
with open('/tmp/tmp_lock') as t:
if t.read() == ip:
s3.delete_object(Bucket=bucket, Key='rke_lock')
else:
logger.info('Not my lock file')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
logger.info('lock file is gone?')
else:
raise e
def get_rke(version, path):
logger.info('Downloading RKE version ' + version)
url = 'https://github.com/rancher/rke/releases/download/' + version + '/rke_linux-amd64'
cmd = [ 'curl', '-fLSs', '-o', path, url ]
subprocess.check_call(cmd)
subprocess.check_call(['chmod', '+x', path])
def get_ssh_private_key(bucket, path):
s3 = boto3.client('s3')
logger.info('Downloading private key from ' + bucket)
s3.download_file(bucket, 'id_rsa', path)
def wait_for_docker(private_key_path, ip):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
private_key = paramiko.RSAKey.from_private_key_file(private_key_path)
logger.info('Waiting for Docker to be ready')
# retry every 10 seconds, stop after 5 min
for attempt in range(30):
try:
ssh.connect(hostname=ip, username='rancher', pkey=private_key)
stdin, stdout, stderr = ssh.exec_command('docker ps')
stdin.flush()
data = stdout.read().splitlines()
logger.debug(data)
logger.debug('Return: ' + str(stdout.channel.recv_exit_status()))
if stdout.channel.recv_exit_status() > 0:
raise SSHException('Command Failed')
logger.info('Docker ready')
ssh.close()
except (BadHostKeyException, AuthenticationException) as e:
raise e
except (SSHException, socket.error) as e:
ssh.close()
logger.info('Docker not ready ' + str(attempt))
sleep(10)
continue
else:
break
else:
raise Exception('Wait for docker timed out')
def complete_lifecycle(message, result):
client = boto3.client('autoscaling')
client.complete_lifecycle_action(
LifecycleHookName=message['LifecycleHookName'],
AutoScalingGroupName=message['AutoScalingGroupName'],
LifecycleActionToken=message['LifecycleActionToken'],
LifecycleActionResult=result
)
```
|
{
"source": "JG-Redeux/genomicdata",
"score": 3
}
|
#### File: JG-Redeux/genomicdata/pdm.py
```python
from PyQt5 import QtCore
import pandas as pd
class DataFrameModel(QtCore.QAbstractTableModel):
"""[Class which interfaces QT and Pandas dataframes]
Args:
QtCore ([QtCore]): [Inherits QAbstractTableModel from QtCore]
Returns:
[dataframe]: [dataframe outputed into QTableView]
"""
DtypeRole = QtCore.Qt.UserRole + 1000
ValueRole = QtCore.Qt.UserRole + 1001
def __init__(self, df=pd.DataFrame(), parent=None):
"""[init the instance]
Args:
df ([dataframe], optional): [dataframe to act as model]. Defaults to pd.DataFrame().
parent ([object], optional): [parent object]. Defaults to None.
"""
super(DataFrameModel, self).__init__(parent)
self._dataframe = df
def setDataFrame(self, dataframe):
"""[set dataframe as the model]
Args:
dataframe ([dataframe]): [dataframe to be set as model]
"""
self.beginResetModel()
self._dataframe = dataframe.copy()
self.endResetModel()
def dataFrame(self):
"""[method to output the class dataframe]
Returns:
[dataframe]: [returns the dataframe]
"""
return self._dataframe
dataFrame = QtCore.pyqtProperty(pd.DataFrame, fget=dataFrame, fset=setDataFrame)
@QtCore.pyqtSlot(int, QtCore.Qt.Orientation, result=str)
def headerData(self, section: int, orientation: QtCore.Qt.Orientation, role: int = QtCore.Qt.DisplayRole):
"""[get header data from dataframe]
Args:
section (int): [column index]
orientation (QtCore.Qt.Orientation): [orientation from Qt5]
role (int, optional): [role from Qt5]. Defaults to QtCore.Qt.DisplayRole.
Returns:
[QtCore.QVariant]: [Qt5 construct]
"""
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
return self._dataframe.columns[section]
else:
return str(self._dataframe.index[section])
return QtCore.QVariant()
def rowCount(self, parent=QtCore.QModelIndex()):
"""[get model row count]
Args:
parent ([object], optional): [object to inherit]. Defaults to QtCore.QModelIndex().
Returns:
[int]: [row count]
"""
if parent.isValid():
return 0
return len(self._dataframe.index)
def columnCount(self, parent=QtCore.QModelIndex()):
"""[get model column count]
Args:
parent ([object], optional): [object to inherit]. Defaults to QtCore.QModelIndex().
Returns:
[int]: [columns size (count)]
"""
if parent.isValid():
return 0
return self._dataframe.columns.size
def data(self, index, role=QtCore.Qt.DisplayRole):
"""[define model data]
Args:
index ([int]): [dataframe index number]
role (int, optional): [role from Qt5]. Defaults to QtCore.Qt.DisplayRole.
Returns:
[objects]: [dataframe objects]
"""
if not index.isValid() or not (0 <= index.row() < self.rowCount() and 0 <= index.column() < self.columnCount()):
return QtCore.QVariant()
row = self._dataframe.index[index.row()]
col = self._dataframe.columns[index.column()]
dt = self._dataframe[col].dtype
val = self._dataframe.iloc[row][col]
if role == QtCore.Qt.DisplayRole:
return str(val)
elif role == DataFrameModel.ValueRole:
return val
if role == DataFrameModel.DtypeRole:
return dt
return QtCore.QVariant()
def roleNames(self):
"""[get role names]
Returns:
[dict]: [dict with roles]
"""
roles = {
QtCore.Qt.DisplayRole: b'display',
DataFrameModel.DtypeRole: b'dtype',
DataFrameModel.ValueRole: b'value'
}
return roles
def sort(self, column):
"""[sort dataframe by column]
Args:
column ([string]): [column to sort by to]
"""
colname = self._dataframe.columns.tolist()[column]
self.layoutAboutToBeChanged.emit()
self._dataframe.sort_values(colname, ascending=QtCore.Qt.AscendingOrder, inplace=True)
self._dataframe.reset_index(inplace=True, drop=True)
self.layoutChanged.emit()
def get_value(self, row, col):
"""[get value from dataframe[row,col]]
Args:
row ([int]): [row number]
col ([int]): [col number]
Returns:
[object]: [value from dataframe[row,col]]
"""
return self._dataframe.iloc[row, col]
```
#### File: JG-Redeux/genomicdata/sqlmng.py
```python
from errorex import gd_errors
import psycopg2
from sqlalchemy import (MetaData, Table, create_engine, Column, Integer, String, Date,
exists, Boolean, Float, exc, func, ForeignKey, select, text,
or_, and_, literal, schema, inspect, DateTime)
from sqlalchemy.engine import reflection
from sqlalchemy.orm import sessionmaker, relationship, mapper
from sqlalchemy.sql.expression import false
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_utils import create_database, database_exists
from sqlalchemy.dialects import postgresql
# import data_import as itapi
import pandas as pd
import logging
import datetime
logger = logging.getLogger(__name__)
Base = declarative_base()
gerrors = gd_errors()
# todo: update user info
class SQL(object):
"""[manages sessions, connections and other database metainfo]
Args:
object ([object]): [parent object]
"""
default_tables = ["users_table", "patients_table", "samples_table", "exams_table"]
# the init's parameters will be used to create the engine, it will be set in the main app
def __init__(self, server_login, server_pw, hostname, database):
"""[init the sql class]
Args:
server_login ([string]): [server login]
server_pw ([string]): [server password]
hostname ([string]): [hostname]
database ([string]): [target database]
"""
self.server_login = server_login
self.server_pw = server_pw
self.hostname = hostname
self.database = database
logger.info("SQLMNG - Postgress instance initialized.")
# create the sqlalchemy engine using postgress and psycopg2 (the module)
# the other infos comes from the SQL init class method
def set_engine(self):
"""[set database engine]
Returns:
[engine]: [database engine]
"""
db_adress = 'postgresql+psycopg2://{}:{}@{}/{}'.format(self.server_login,
self.server_pw,
self.hostname,
self.database)
self.engine = create_engine(db_adress)
if database_exists(self.engine.url) is False:
logger.warn("SQLMNG - DB not found, creating new DB")
logger.debug("SQLMNG - DB {} created".format(self.engine.url))
create_database(self.engine.url)
logger.info("SQLMNG - DB found, connecting.")
logger.debug(self.engine.url)
self.metadata = MetaData(self.engine)
return self.engine
# this method sets the session using the engine defined previously
def set_session(self, engine):
"""[define session from engine]
Args:
engine ([engine]): [sql engine]
Returns:
[session]: [database session from engine]
"""
Session = sessionmaker(bind=engine)
self.session = Session()
logger.debug(self.session)
logger.info("SQLMNG - Session created.")
return self.session
def check_db_info(self):
"""[get database information]
Returns:
[list]: [list with database metadata]
"""
info_list = [list(Base.metadata.tables.keys()), self.metadata]
return info_list
'''
def _create_default_user_db(self):
###
#May use in future
###
users_dict = {"login": String, "password": String,
"name": String, "surname": String,
"nvl": String, "other_spec": String,
"email": String, "date": Date}
badwords_dict = {"badwords": String}
self._create_db_table("db_user_schema", "users", **users_dict)
self._create_db_table("db_user_schema", "badwords", **badwords_dict)
self.commit_new_tables()
self.check_db_info()
'''
def _create_db_table(self, schema, tablename, **table_info):
"""[create table on schema.tablename with table_info]
Args:
schema ([string]): [schema name]
tablename ([string]): [table name]
"""
###
# May use in future
###
self.table = Table(tablename, self.metadata,
Column('id', Integer, primary_key=True),
*(Column(key, value) for key, value in table_info.items()),
schema=schema)
# self.commit_new_tables()
# self.class_mapper(schema, tablename)
def commit_new_table(self, schema, table):
"""[commit table creation]
Args:
schema ([string]): [schema name]
tablename ([string]): [table name]
"""
print("sqlmng: ", schema, table)
table = str_to_table(schema, table)
table.__table__.create(self.engine)
def schema_exists(self, schema_name, create=False):
"""[check if schema already exist]
Args:
schema_name ([string]): [schema name]
create (bool, optional): [create table if not exist]. Defaults to False.
Returns:
[bool]: [True if schema exists]
"""
ret = self.engine.dialect.has_schema(self.engine, schema_name)
if not ret:
if create is True:
self.engine.execute(schema.CreateSchema(schema_name))
return ret
def table_exists(self, name, schema=None):
"""[check if table name exist]
Args:
name ([string]): [table name]
schema ([string], optional): [schema name]. Defaults to None.
Returns:
[bool]: [True if table name exist]
"""
ret = inspect(self.engine).has_table(name, schema)
return ret
def class_mapper(self, schema, tablename):
"""[dynamically creates tables, not in use]
Args:
schema ([string]): [schema name]
tablename ([string]): [table name]
"""
if tablename == "users":
mydict = {'__table__': '{}.{}'.format(schema, tablename),
'__table_args__': ({'autoload': True, 'autoload_with': self.engine},)}
elif tablename == "badwords":
mydict = {'__table__': '{}.{}'.format(schema, tablename),
'__table_args__': ({'autoload': True, 'autoload_with': self.engine},)}
else:
mydict = {'__table__': '{}.{}'.format(schema, tablename),
'__table_args__': ({'autoload': True, 'autoload_with': self.engine},)}
cls = type('{}'.format(tablename), (Base,), mydict)
mapper(cls, self.table)
def detect_schema(self, schema, create_schema=False):
"""[Another implementation to check if schema exist on database]
Args:
schema ([string]): [schema name]
create_schema (bool, optional): [if schema should be created]. Defaults to False.
Returns:
[bool]: [True if exists]
"""
fix_schema = str(schema)
print("schema: ", fix_schema)
flag = exists(select([(text("schema_name"))]).select_from(text("information_schema.schemata"))
.where(text("schema_name = '{}'".format(fix_schema))))
if self.session.query(flag).scalar():
return True
# self.engine.execute("SHOW CREATE SCHEMA {}".format(fix_schema)).scalar()
# create the user on the database, the user_info should be a dict containing
# login, pw, name, surname, email and datetime
def add_user(self, session, user_info):
"""[summary]
Args:
session ([session]): [connection session]
user_info ([list]): [list with info to add to server table]
"""
new_user = User(**user_info)
session.add(new_user)
session.commit()
logger.info("SQLMNG - User added and changes commited.")
# this method finds the target username and change the pw on the database for
# the new one, the pw parameter
def change_user_pw(self, session, username, pw):
"""[change user password on table]
Args:
session ([session]): [connection session]
username ([string]): [username]
pw ([string]): [password]
"""
session.query(User).filter_by(login=username).update({"password": pw})
session.commit()
logging.info("SQLMNG - {} changed password.".format(username))
# this one deletes the target (id/login) entry from the database
# the ident params change if the code searchs for id or login
def delete_user(self, session, target, ident=True):
"""[delete user on table]
Args:
session ([session]): [connection session]
target ([string]): [user to be deleted]
ident (bool, optional): [if should be queried by id or login column]. Defaults to True.
"""
if ident:
ident = session.query(User).filter_by(id=target).delete()
else:
ident = session.query(User).filter_by(login=target).delete()
session.commit()
logging.info("SQLMNG - {} deleted and changes commited.".format(target))
def query_values(self, session, column=None, schema="db_user_schema",
target=None, table="users", _type="all", _pd=False):
"""[query values from table]
Args:
session ([session]): [connection session]
column ([list or string], optional): [description]. Defaults to None.
schema (str, optional): [schema name]. Defaults to "db_user_schema".
target ([string], optional): [what should be queried]. Defaults to None.
table (str, optional): [table to look into]. Defaults to "users".
_type (str, optional): [query with .all(), .last(), .first(), .scalar() or .one()]. Defaults to "all".
_pd (bool, optional): [guide the flow of the method]. Defaults to False.
Raises:
ValueError: [In case the column name doesn't exist]
Returns:
[object]: [return the query result]
"""
logging.debug("SQLMNG - {}".format(":".join(str(x) for x in [session,
column, table, _type])))
table_name = str_to_table(schema, table)
base_query = self.session.query(table_name)
if table == "users":
col = str_to_column(table_name, column)
return self.session.query(col).all()
if _pd is True:
if column is not None and target is not None:
try:
col_obj = str_to_column(table_name, column)
q = pd.read_sql(self.session.query(table_name).filter(col_obj == target).statement, self.session.bind)
return q
except:
return pd.read_sql(self.session.query(table_name).statement, self.session.bind)
elif column is not None:
if _type is None:
try:
col_obj = str_to_column(table_name, column)
q = pd.read_sql(self.session.query(table_name).order_by(column.asc()).statement, self.session.bind)
return q
except:
return pd.read_sql(self.session.query(table_name).statement, self.session.bind)
elif _type == "last":
try:
col_obj = str_to_column(table_name, column)
q = pd.read_sql(self.session.query(table_name).order_by(column.id.asc()).limit(20).statement, self.session.bind)
return q
except:
return pd.read_sql(self.session.query(table_name).statement, self.session.bind)
else:
return pd.read_sql(self.session.query(table_name).statement, self.session.bind)
if column:
if type(column) == str:
col_obj = str_to_column(table_name, column)
col_query = base_query.filter(col_obj == target)
if _type == "first":
return base_query.filter(col_obj == target).first()
elif type(column) == list:
col_obj_list = [str_to_column(table_name, col) for col in column]
col_query = base_query.with_entities(*col_obj_list).filter(text(target))
else:
raise ValueError("Column must be either str or list")
if _type == "all":
query = col_query.all()
elif _type == "first":
query = col_query.first()
# #TODO next two elifs will not work with multiple columns, fix it
elif _type == "scalar":
query = base_query.filter(col_obj == target).first() is not None
elif _type == "one":
query = col_query.one()
return query
else:
if _type == "all":
query = base_query.all()
elif _type == "first":
query = base_query.first()
else:
query = base_query.first()
return query
# the query method makes query (on the user table) based on the target param
# if the badword param is set to true the query will be made in the badword tables
# if the user is set to true it will look for the login column, otherwise it will look at the email column
# if scalar is set to true the output will be either True or False
def query_user(self, session, target, badword=False, user=True, scalar=True):
"""[summary]
Args:
session ([session]): [connection session]
target ([string]): [target to be queried for]
badword (bool, optional): [look in badword table or not]. Defaults to False.
user (bool, optional): [return the query result as a list]. Defaults to True.
scalar (bool, optional): [return the query result as a bool]. Defaults to True.
Returns:
[object]: [query]
"""
if badword is True:
query = session.query(exists().where(Badwords.badword == target)).scalar()
if target:
logging.info("SQLMNG - Badword {} queried.".format(
target.replace(target, "*" * len(target))))
return query
try:
if scalar:
query = session.query(exists().where(User.login == target)).scalar()
else:
if user:
query = session.query(User).filter_by(login=target).first()
else:
query = session.query(User).filter_by(email=target).first()
logging.info("SQLMNG - {} queried.".format(target))
return query
except exc.SQLAlchemyError:
return exc.SQLAlchemyError.__name__
# similar to the add_user and change password, it looks for the username and
# update the row with the information inside new_info dict
def update_user(self, session, username, new_info):
"""[update user in table users with new info]
Args:
session ([session]): [connection session]
username ([string]): [user username to be updated]
new_info ([dict]): [info to be updated]
"""
session.query(User).filter_by(login=username).update(new_info)
session.commit()
logging.debug("SQLMNG - Update <{}> requested.".format(new_info))
logging.info("SQLMNG - Update commited.")
def upsert(self, schema, table_name, records={}):
"""[summary]
Args:
schema ([string]): [schema name]
table_name ([string]): [table name]
records (dict, optional): [records to be update]. Defaults to {}.
Returns:
[execute]: [return the execution of the upsert]
"""
metadata = MetaData(schema=schema)
metadata.bind = self.engine
table = Table(table_name, metadata, schema=schema, autoload=True)
# get list of fields making up primary key
primary_keys = [key.name for key in inspect(table).primary_key]
# assemble base statement
stmt = postgresql.insert(table).values(records)
# define dict of non-primary keys for updating
update_dict = {
c.name: c
for c in stmt.excluded
if not c.primary_key
}
# assemble new statement with 'on conflict do update' clause
update_stmt = stmt.on_conflict_do_update(
index_elements=primary_keys,
set_=update_dict,
)
# execute
with self.engine.connect() as conn:
try:
result = conn.execute(update_stmt)
return result
except exc.IntegrityError:
return gerrors.fk_error()
def add_rows_sampat(self, session, rows_info, schema, table):
"""[summary]
Args:
session ([session]): [connection session]
rows_info ([dict]): [rows info to be added]
schema ([string]): [schema name]
table ([string]): [table name]
Raises:
ValueError: [In case the rows_info keyu doesn't match the table]
"""
table = str_to_table(schema, table)
new_row = table(**rows_info)
session.add(new_row)
try:
session.commit()
except exc.ProgrammingError:
raise ValueError
logger.info("SQLMNG - {} rows added to {} table.".format(len(rows_info), table))
def delete_entry(self, session, schema, table, target):
"""[delete target entry from the table schema.table ]
Args:
session ([session]): [connection session]
schema ([string]): [schema name]
table ([string]): [table name]
target ([string]): [id to be queried]
"""
true_table = str_to_table(schema, table)
true_col = str_to_column(true_table, 'id')
session.query(true_table).filter(true_col == target).delete()
session.commit()
def update_table(self, session, schema, table, column, target, new_entry):
"""[update table with new entry]
Args:
session ([session]): [connection session]
schema ([string]): [schema name]
table ([string]): [table name]
column ([string]): [column to query the target]
target ([string]): [which to be queried]
new_entry ([dict]): [dict with info to be updated]
"""
true_table = str_to_table(schema, table)
true_col = str_to_column(true_table, column)
session.query(true_table).filter(true_col == target).update(new_entry)
session.commit()
def update_rows_sampat(self, session, rows_info, schema, table):
"""[summary]
Args:
session ([session]): [connection session]
schema ([string]): [schema name]
table ([string]): [table name]
rows_info ([dict]): [dict with info to be updated]
"""
table_obj = str_to_table(schema, table)
new_row = table_obj(**rows_info)
session.merge(new_row)
session.commit()
logging.info('SQLMNG - Update commited')
def pat_flow(self, rows_info, schema, table, verbose=False):
"""[specific flow to add entries into patients table]
Args:
rows_info ([dict]): [dict with info to be updated]
schema ([string]): [schema name]
table ([string]): [table name]
verbose (bool, optional): [to print or not the content of rows_info]. Defaults to False.
Raises:
ValueError: [In case the entry weren't added to the database]
"""
#TODO lidar com duplicatas
logging.debug("SQLMNG - Insert entry <{}> requested.".format(rows_info))
for entry in rows_info:
target = int(entry["old_id"])
part = entry["particular"]
check = self.session.query(Patient).filter(Patient.old_id == target,
Patient.particular == part)
check_bool = check.first() is not None
if not check_bool:
if verbose:
print(entry)
try:
self.add_rows_sampat(self.session, entry, schema, table)
except:
logging.info('SQLMNG - Entry not added')
raise ValueError("New entrys not inserted on DB")
else:
pass
def samp_flow(self, rows_info, schema, table, verbose=False):
"""[specific flow to add entries into samples table]
Args:
rows_info ([dict]): [dict with info to be updated]
schema ([string]): [schema name]
table ([string]): [table name]
verbose (bool, optional): [to print or not the content of rows_info]. Defaults to False.
Raises:
ValueError: [In case the entry weren't added to the database]
"""
for entry in rows_info:
target = int(entry["samp_serial"])
old_id = int(entry["old_id"])
part = True if not entry["sample_group"] == "G" else False
query = self.session.query(Patient).filter(Patient.old_id == old_id,
Patient.particular == part)
check = self.query_values(self.session, target=target,
column='samp_serial', schema=schema,
table=table, _type="scalar")
if not check:
entry["sample_owner"] = query.first()
if verbose:
print(entry)
try:
self.add_rows_sampat(self.session, entry, schema, table)
except:
logging.info('SQLMNG - Entry not added')
raise ValueError("New entrys not inserted on DB")
else:
pass
def exams_flow(self, rows_info, schema, table, verbose=False):
"""[specific flow to add entries into exams table]
Args:
rows_info ([dict]): [dict with info to be updated]
schema ([string]): [schema name]
table ([string]): [table name]
verbose (bool, optional): [to print or not the content of rows_info]. Defaults to False.
Raises:
ValueError: [In case the entry weren't added to the database]
"""
for entry in rows_info:
target = int(entry["exam_serial"])
old_id = int(entry["old_id"])
part = entry["sample_group"]
query = self.session.query(Samples).filter(Samples.old_id == old_id,
Samples.sample_group == part)
check = self.query_values(self.session, target=target,
column='exam_serial', schema=schema,
table=table, _type="scalar")
if not check:
entry["master_sample"] = query.first()
entry.pop("sample_group", None)
entry.pop("old_id", None)
if verbose:
print(entry)
try:
self.add_rows_sampat(self.session, entry, schema, table)
except:
logging.info('SQLMNG - Entry not added')
raise ValueError("New entrys not inserted on DB")
else:
pass
def row_count(self, session, table="users_table"):
"""[get row count of table table]
Args:
session ([session]): [connection session]
table (str, optional): [table name]. Defaults to "users_table".
Returns:
[int]: [row number]
"""
if table == "users_table":
rows = session.query(func.count(User.id)).scalar()
elif table == "Badword":
rows = session.query(func.count(Badwords.id)).scalar()
elif table == "samples_table":
rows = session.query(func.count(Samples.id)).scalar()
elif table == "patients_table":
rows = session.query(func.count(Patient.id)).scalar()
elif table == "exams_table":
rows = session.query(func.count(Exams.id)).scalar()
return rows
def col_info(self, session, schema="db_sampat_schema", table="patients_table"):
"""[get columns info from table]
Args:
session ([session]): [connection session]
schema (str, optional): [schema name]. Defaults to "db_sampat_schema".
table (str, optional): [table name]. Defaults to "patients_table".
Returns:
[list]: [columns from table]
"""
insp = reflection.Inspector.from_engine(self.engine)
col_info = insp.get_columns(table, schema)
return col_info
# back-end method used to add entries to the badword table, it accepts both lists and strings
def populate_badword(self, session, badword):
"""[add badwords to table badwords]
Args:
session ([session]): [connection session]
badword ([list, str]): [badword to be added]
"""
if type(badword) == list:
for item in badword:
new_bad = Badwords(badword=item)
session.add(new_bad)
else:
print(badword)
new_bad = Badwords(badword=badword)
session.add(new_bad)
session.commit()
def close_conn(self, session):
"""[close the session]
Args:
session ([session]): [connection session]
"""
logging.info("SQL Session closed.")
session.close()
# class that defines the User table in the database, it follows the sqlalchemy guidelines
class User(Base):
"""[define User table on database]
"""
__tablename__ = 'users_table'
__table_args__ = {'schema': "db_user_schema"}
id = Column(Integer, primary_key=True)
login = Column(String)
password = Column(String)
name = Column(String)
surname = Column(String)
nvl = Column(String)
other_spec = Column(String)
email = Column(String)
date = Column(Date)
def __repr__(self):
return "login={},password={},name={},surname={},nvl={},"\
"other_spec={},email={},date={}".format(self.login, self.password, self.name,
self.surname, self.nvl, self.other_spec,
self.email, self.date)
# class that defines the Badword table in the database, it follows the sqlalchemy guidelines
class Badwords(Base):
"""[define Badwords table on database]
"""
__tablename__ = 'badwords'
__table_args__ = {'schema': "db_user_schema"}
id = Column(Integer, primary_key=True)
badword = Column(String)
def __repr__(self):
return "badword={}".format(self.badword)
# class that defines the main db in the server, it follows the sqlalchemy guidelines
class Patient(Base):
"""[define Patient table on database]
"""
__tablename__ = 'patients_table'
__table_args__ = {'schema': "db_sampat_schema"}
id = Column(Integer, primary_key=True, unique=True)
# old_id = Column("barcode", Integer, unique=False)
samples = relationship("Samples", backref='sample_owner')
particular = Column(Boolean, default=False, unique=False, nullable=False)
first_name = Column(String, default=None)
second_name = Column(String, default=None)
surname = Column(String, default=None)
rn = Column(Boolean, default=False, unique=False)
nt = Column(Boolean, default=False, unique=False)
rg = Column(String, default=None)
registry = Column(String, default=None)
birth_date = Column(Date, default=None)
register_date = Column(Date, default=None)
pat_origin = Column(String, default=None)
doctor = Column(String, default=None)
parent_type = Column(String, default=None)
parent = Column(String, default=None)
lib = Column(Boolean, default=False, unique=False, nullable=False)
diag_hipt = Column(String, default=None)
term = Column(Boolean, default=False, unique=False, nullable=False)
gen = Column(String, default=None)
karyotype = Column(String, default=None)
obs = Column(String, default=None)
updated = Column(DateTime, default=datetime.datetime.now())
def __repr__(self):
return rep_gen(self)
class Samples(Base):
"""[define Samples table on database]
"""
__tablename__ = 'samples_table'
__table_args__ = {'schema': "db_sampat_schema"}
id = Column(Integer, primary_key=True)
# old_id = Column(Integer, unique=False)
sample_group = Column(String, default=None)
samp_serial = Column(Integer, default=None, unique=True)
patient_id = Column(Integer, ForeignKey('db_sampat_schema.patients_table.id'), nullable=False)
exams = relationship('Exams', backref='master_sample')
sample_orign = Column(String, default=None)
cap_color = Column(String, default=None)
material_type = Column(String, default=None)
material_details = Column(String, default=None)
material_quantity = Column(Float, default=None)
main_tube = Column(Boolean, default=False, unique=False, nullable=False)
aliquot = Column(Boolean, default=False, unique=False, nullable=False)
aliquot_id = Column(Integer, default=None, nullable=True)
extracted = Column(Boolean, default=False, unique=False, nullable=False)
processed = Column(Boolean, default=False, unique=False, nullable=False)
arquived = Column(Boolean, default=False, unique=False, nullable=False)
arquiv_date = Column(Date, default=None, nullable=True)
arq_position = Column(String, default=None)
sample_register_date = Column(Date, default=None)
sample_extraction_date = Column(Date, default=None)
sample_process_date = Column(Date, default=None)
sample_aliquot_date = Column(Date, default=None)
sample_dna_concentration = Column(Float, default=None)
sample_dna_quality = Column(Float, default=None)
recall = Column(Boolean, default=False, nullable=False)
recall_date = Column(Date, default=None, nullable=True)
recall_sample_id = Column(Integer, default=None, nullable=True)
recall_register_date = Column(Date, default=None, nullable=True)
lib_date = Column(Date, default=None)
lib = Column(Boolean, default=False, unique=False, nullable=False)
obs = Column(String, default=None)
updated = Column(DateTime, default=datetime.datetime.now())
def __repr__(self):
return rep_gen(self)
class Exams(Base):
"""[define Exams table on database]
"""
__tablename__ = 'exams_table'
__table_args__ = {'schema': "db_sampat_schema"}
id = Column(Integer, primary_key=True)
sample_id = Column(Integer, ForeignKey('db_sampat_schema.samples_table.id'), nullable=False)
exam_serial = Column(Integer, default=None, unique=True)
sample_exam = Column(String, default=None)
run_number = Column(Integer, default=None)
seq_number = Column(Integer, default=None)
run_letter = Column(String, default=None, unique=False)
kit = Column(String, default=None)
kit_lot = Column(String, default=None)
platform = Column(String, default=None)
results = Column(String, default=None)
lib_date = Column(Date, default=None)
lib = Column(Boolean, default=False, unique=False, nullable=False)
obs = Column(String, default=None)
updated = Column(DateTime, default=datetime.datetime.now())
def __repr__(self):
return rep_gen(self)
# a plain function that initialize the SQL class and outputs the instance and session
def str_to_table(schema, table):
"""[transform string table into class object table]
Args:
schema ([string]): [schema name]
table ([string]): [table name]
Returns:
[object]: [table object]
"""
'''# for item in Base._decl_class_registry.values():
# if hasattr(item, '__table__') and item.__table__.fullname == "{}.{}".format(schema, table):
return item'''
for item in Base.registry.mappers:
if item.class_.__tablename__ == table.lower():
return item.class_
def str_to_column(table, column):
"""[transform string column into class object table.column]
Args:
table ([string]): [table name]
column ([string]): [column name]
Returns:
[object]: [column object]
"""
return getattr(table, column)
def sql_init(login, upw, hn, db):
psql = SQL(login, upw, hn, db)
try:
engine = psql.set_engine()
except TimeoutError as TE_ERROR:
return TE_ERROR
finally:
sess = psql.set_session(engine)
return psql, sess
def rep_gen(ncls):
"""[method to generate how the table classes generate its __repr__ method]
Args:
ncls ([list]): [class keys]
Raises:
NameError: [In case the class doesn't exist]
Returns:
[string]: [__repr__ string]
"""
try:
if type(ncls) == str:
_cls = eval(ncls)
else:
_cls = ncls
except:
raise NameError("Class not found")
cl_keys = [key for key in _cls.__dict__.keys() if not "_" in key]
repr_str = "={},".join(cl_keys) + "={}"
cls_attr = [getattr(_cls, key) for key in cl_keys]
return repr_str.format(*cls_attr)
```
|
{
"source": "jgreener64/allopred",
"score": 2
}
|
#### File: allopred/functions/nma_methods.py
```python
import numpy as np
from prody import *
class GammaResidue(Gamma):
"""Return force constant for a particular pair of residues.
Returns normal force constant apart from at the residues being tested for allosteric character.
"""
# see ProDy documentation for template
def __init__(self, atoms, res_nos, frac_change, gamma=1):
rnum = atoms.getResindices()
# residue indices
self._rnum = rnum
self._res_nos = res_nos
self._frac_change = float(frac_change)
self._gamma = float(gamma)
def gamma(self, dist2, i, j):
rnum = self._rnum
res_nos = self._res_nos
gamma = self._gamma
# if residue number is one of the input residues returns the modified force constant
if rnum[i] in res_nos or rnum[j] in res_nos:
return gamma * self._frac_change
# otherwise return the normal force constant
else:
return gamma
def quantify_difference(anm_active, anm_mod_active, no_modes):
"""Compares standard to perturbed normal modes at the active site residues and returns a single perturbation value."""
evals = anm_active.getEigvals()[:no_modes]
# average magnitude difference for each normal mode
mags_overall = []
# iterate over low-frequency modes
for mode_no in range(no_modes):
# get mode and modified mode
mode = anm_active[mode_no].getEigvec()
mode_mod = anm_mod_active[mode_no].getEigvec()
# sometimes the modified mode is reversed, so also compare the mode with the reversed modified mode
mode_mod_alt = -anm_mod_active[mode_no].getEigvec()
# add the minimum of the differences
av_mags = vector_difference(mode, mode_mod)
av_mags_alt = vector_difference(mode, mode_mod_alt)
# add the minimum of the differences, which will correspond to the correct orientation
site_av = min(av_mags, av_mags_alt)
mags_overall.append(site_av)
av = weight_by_eval(evals, mags_overall)
return av
def vector_difference(mode1, mode2):
"""Returns average magnitude of vector difference between two eigenvectors."""
mags = []
# iterate over each atom
for atom_index in range(0,len(mode1),3):
# get normal and perturbed vectors
vec1 = mode1[atom_index:atom_index+3]
vec2 = mode2[atom_index:atom_index+3]
# calculate magnitude of vector linking vectors
diff = vec1 - vec2
mag = np.sqrt(diff.dot(diff))
mags.append(mag)
return np.average(mags)
def weight_by_eval(evals, diffs):
"""Returns the average perturbation across the normal modes weighted by eigenvalue."""
weightings = 1 / np.sqrt(evals)
sum_weightings = sum(weightings)
mags_weighted = weightings*diffs
weighted_av = sum(mags_weighted) / sum_weightings
return weighted_av
```
|
{
"source": "jgreer013/pymcc",
"score": 2
}
|
#### File: jgreer013/pymcc/play.py
```python
import pyxinput
import torch
import numpy as np
import time
import datetime
import utils
import neural_net
from neural_net import Net
import torchvision.models as models
import torch.nn as nn
from torchvision import transforms
PATH = "WGANGP_withPrevAction_NEW_was_mse_30_sgd_16_0_01_e50.pt"
class XController(pyxinput.vController):
leftJoyX = 'AxisLx'
leftJoyY = 'AxisLy'
rightJoyX = 'AxisRx'
rightJoyY = 'AxisRy'
leftTrigger = 'TriggerL'
rightTrigger = 'TriggerR'
leftBumper = 'BtnShoulderL'
rightBumper = 'BtnShoulderR'
a = 'BtnA'
x = 'BtnX'
y = 'BtnY'
b = 'BtnB'
leftThumb = 'BtnThumbL'
rightThumb = 'BtnThumbR'
back = 'BtnBack'
start = 'BtnStart'
dpad = 'Dpad'
current_state = np.random.rand(20) * 2 - 1
def get_controller_state(self):
return self.current_state
def update_controller_state(self, input_vector):
self.current_state = input_vector
self.set_value(self.leftJoyX, input_vector[0])
self.set_value(self.leftJoyY, input_vector[1])
self.set_value(self.rightJoyX, input_vector[2])
self.set_value(self.rightJoyY, input_vector[3])
self.set_value(self.leftTrigger, input_vector[4])
self.set_value(self.rightTrigger, input_vector[5])
self.set_value(self.leftBumper, int(input_vector[6]))
self.set_value(self.rightBumper, int(input_vector[7]))
self.set_value(self.a, int(input_vector[8]))
self.set_value(self.x, int(input_vector[9]))
self.set_value(self.y, int(input_vector[10]))
self.set_value(self.b, int(input_vector[11]))
self.set_value(self.leftThumb, int(input_vector[12]))
self.set_value(self.rightThumb, int(input_vector[13]))
#self.set_value(self.back, int(input_vector[14]))
#self.set_value(self.start, int(input_vector[15]))
dpad = input_vector[16:]
if dpad[0] == 1:
self.set_value(self.dpad, self.DPAD_LEFT)
elif dpad[1] == 1:
self.set_value(self.dpad, self.DPAD_RIGHT)
elif dpad[2] == 1:
self.set_value(self.dpad, self.DPAD_UP)
elif dpad[3] == 1:
self.set_value(self.dpad, self.DPAD_DOWN)
else:
self.set_value(self.dpad, self.DPAD_OFF)
class ReadController(pyxinput.rController):
test = "test"
def init_network():
#net = Net()
#net = neural_net.StickNet()
net = neural_net.GeneratorWithAction()
gpu = None
if torch.cuda.is_available():
gpu = torch.device("cuda:0")
net.load(PATH, gpu=gpu)
return net
def get_frame_as_tensor():
sct_img = utils.get_frame()
sct_img.resize((960, 540))
np_img = np.asarray(sct_img)
np_img = np.require(np_img, dtype='f4', requirements=['O', 'W'])
np_img.setflags(write=True)
image_tensor = torch.from_numpy(np_img)
image_tensor = image_tensor.permute(2, 0, 1)
image_tensor = image_tensor / 255.0
#normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
#image_tensor = normalize(image_tensor)
image_tensor = image_tensor.unsqueeze(0)
return image_tensor
def get_current_controller_state_as_tensor(controller):
current_controller_state = torch.from_numpy(controller.get_controller_state())
current_controller_state = current_controller_state.unsqueeze(0).float()
return current_controller_state
def fix_action(action):
action[4:] = (action[4:] + 1) / 2.0
action[4:] = np.clip(action[4:], 0, 1)
action[4:] = np.rint(action[4:])
return action
if __name__ == "__main__":
controller = XController()
r_controller = ReadController(2)
net = init_network()
net.eval()
cpu = torch.device('cpu')
gpu = torch.device('cuda:0')
print("Plugging in controller in 5 seconds")
time.sleep(5)
print("Plugging in controller")
controller.PlugIn()
print("Begin playing in 5 seconds (have to give time for controller to be plugged in and to allow you to bring focus to main window")
time.sleep(5)
while True:
try:
input_img = get_frame_as_tensor()
input_img = input_img.to(gpu)
current_controller_state = get_current_controller_state_as_tensor(controller)
current_controller_state = current_controller_state.to(gpu)
#stick_l_lr, stick_l_ud, stick_r_lr, stick_r_ud, buttons, stick_l_lr_probs, stick_l_ud_probs, stick_r_lr_probs, stick_r_ud_probs, buttons_probs = net(input_img)
#sticks, buttons, button_probs = net(input_img, current_controller_state)
#sticks, button_probs = net(input_img, current_controller_state)
action = net(input_img, current_controller_state)
#action = net(input_img)
action = action.to(cpu).detach().numpy()[0]
#sticks, buttons = torch.split(action, [4, 16], dim=1)
#buttons = button_probs.to(cpu).detach().numpy()[0]
#sticks = sticks.to(cpu).detach().numpy()[0]
#action = np.concatenate((sticks, buttons))
#sticks, buttons = net(input_img)
#action = torch.cat((sticks, buttons), 1).to(cpu).detach().numpy()[0]
action = fix_action(action)
#print(buttons)
print(action)
controller.update_controller_state(action)
except KeyboardInterrupt:
break
print("UnPlugging controller")
controller.UnPlug()
```
|
{
"source": "jgreer013/re-qomp",
"score": 3
}
|
#### File: jgreer013/re-qomp/main.py
```python
import sys
import file_parse as fp
import story_importance as si
import distance_metric as dm
import read_answer_set as ras
import tensor_functionality as tenf
usage = """Usage: python main.py <userStoryFileName.txt> <filterWord> <resultsFileName>
<userStoryFileName.txt> is the name of the user story file to be read.
<filterWord> is the word used to show user stories pertinent to that word.
<resultsFileName> is the name of the result file."""
def main():
fName = ""
filterWord = ""
#print "Reading Answer Set Files"
answer_set = ras.getAnswerSet("AnswerSetMetadata.txt")
answer_set.extend(ras.getAnswerSet("AnswerSetWorkgroup.txt"))
#print "Reading Input Files"
# Attempt to read argument
try:
fName = sys.argv[1]
except:
print("Error: No filename given")
print(usage)
return
try:
filterWord = sys.argv[2]
except:
filterWord = ""
userStories = fp.parseFile(fName)
userStories = si.getRelevantStories(userStories, filterWord)
orderedStories = []
for story in sorted(userStories.keys()):
orderedStories.append(userStories[story])
#print "Generating TFIDF Vectors"
tf_mat = dm.getTFIDFVectors(orderedStories)
dist_mat = dm.getDistanceMatrix(tf_mat)
#print "Generating Training Sets"
training_set = ras.generateTrainingSets(userStories, tf_mat, answer_set)
test_set = ras.generateTrainingSets(userStories, tf_mat, answer_set)
#print "Training Sets Generated: " + str(len(training_set))
#final_predictions = tenf.trainTF(training_set, training_set)
threshold = 0.9
# Print out vals
for i in xrange(len(test_set)):
a = i / (len(orderedStories)-1)
b = i % (len(orderedStories)-1)
if (b >= a):
b = b + 1
val = test_set[i][2]
if (val[1] == 1 and b > a):
print str(a+1) + "," + str(b+1)
# Rebuild results from one dimensional array
"""
for i in xrange(len(orderedStories)):
for j in xrange(len(orderedStories)):
count = i*46+j-1
if (i != j):
print str(i) + ' ' + str(j) + ' ' + str(count)"""
if __name__ == "__main__":
main()
```
|
{
"source": "jgregoriods/quaesit",
"score": 3
}
|
#### File: quaesit/quaesit/agent.py
```python
import inspect
from math import hypot, sin, asin, cos, radians, degrees
from abc import ABCMeta, abstractmethod
from random import randint, choice
from typing import Dict, List, Tuple, Union
class Agent(metaclass=ABCMeta):
"""
Class to represent an agent in an agent-based model.
"""
_id = 0
colors = ['blue', 'brown', 'cyan', 'gray', 'green', 'magenta', 'orange',
'pink', 'purple', 'red', 'yellow']
def __init__(self, world, coords: Tuple = None):
self._id = Agent._id
Agent._id += 1
self.world = world
self.coords = coords or (randint(0, self.world.width - 1),
randint(0, self.world.height - 1))
self.direction = 90
self.breed = self.__class__.__name__.lower()
self.icon = '.'
self.color = choice(self.colors)
self.world.add_agent(self)
def die(self):
"""
Remove the agent from the world.
"""
del self.world.agents[self._id]
self.world.grid[self.coords]['agents'].remove(self)
del self
def hatch(self):
"""
Creates an agent and initializes it with the same parameters as
oneself.
"""
sig = inspect.signature(self.__init__)
filter_keys = [param.name for param in sig.parameters.values()
if param.kind == param.POSITIONAL_OR_KEYWORD]
filtered_dict = {filter_key: self.__dict__[filter_key]
for filter_key in filter_keys}
return self.__class__(**filtered_dict)
def move_to(self, coords: Tuple):
"""
Places the agent in a different cell of the world grid.
"""
self.world.remove_from_grid(self)
self.coords = coords
self.world.place_on_grid(self)
def cell_here(self, layer = None):
"""
Returns the value of a layer in the model's grid for the cell
where the agent is. If no layer is specified, the values of all
layers are returned.
"""
if layer is not None:
return self.world.grid[self.coords][layer]
else:
return self.world.grid[self.coords]
def get_distance(self, coords: Tuple) -> int:
"""
Returns the distance (in cells) from the agent to a pair of
coordinates.
"""
x, y = coords
return round(hypot((x - self.coords[0]), (y - self.coords[1])))
def cells_in_radius(self, radius: int) -> Dict:
"""
Returns all cells and respective attributes within a distance
of the agent.
"""
if self.world.torus:
neighborhood = {self.world.to_torus((x, y)):
self.world.grid[self.world.to_torus((x, y))]
for x in range(self.coords[0] - radius,
self.coords[0] + radius + 1)
for y in range(self.coords[1] - radius,
self.coords[1] + radius + 1)
if self.get_distance((x, y)) <= radius}
else:
neighborhood = {(x, y): self.world.grid[(x, y)]
for x in range(self.coords[0] - radius,
self.coords[0] + radius + 1)
for y in range(self.coords[1] - radius,
self.coords[1] + radius + 1)
if (self.get_distance((x, y)) <= radius and
(x, y) in self.world.grid)}
return neighborhood
def empty_cells_in_radius(self, radius: int) -> Dict:
"""
Returns all empty cells (with no agents on them) and respective
attributes within a distance of the agent.
"""
if self.world.torus:
neighborhood = {self.world.to_torus((x, y)):
self.world.grid[self.world.to_torus((x, y))]
for x in range(self.coords[0] - radius,
self.coords[0] + radius + 1)
for y in range(self.coords[1] - radius,
self.coords[1] + radius + 1)
if (self.get_distance((x, y)) <= radius and not
self.world.grid[self.world.to_torus((x, y))]
['agents'])}
else:
neighborhood = {(x, y): self.world.grid[(x, y)]
for x in range(self.coords[0] - radius,
self.coords[0] + radius + 1)
for y in range(self.coords[1] - radius,
self.coords[1] + radius + 1)
if (self.get_distance((x, y)) <= radius and
(x, y) in self.world.grid and not
self.world.grid[(x, y)]['agents'])}
return neighborhood
def nearest_cell(self, cells: Union[List, Dict]) -> Tuple:
"""
Given a list or dictionary of cells, returns the coordinates of
the cell that is nearest to the agent.
"""
dists = {cell: self.get_distance(cell) for cell in cells}
return min(dists, key=dists.get)
def agents_in_radius(self, radius: int):
"""
Returns all agents within a distance of oneself.
"""
neighborhood = self.cells_in_radius(radius)
neighbors = [agent for coords in neighborhood
for agent in self.world.grid[coords]['agents']
if agent is not self]
return neighbors
def agents_here(self) -> List:
"""
Returns all agents located on the same cell as oneself.
"""
return [agent for agent in self.world.grid[self.coords]['agents']
if agent is not self]
def nearest_agent(self, agents: List = None):
"""
Given a list of agents, returns the agent that is nearest to
oneself. If no list is provided, all agents are evaluated.
"""
if agents is None:
agents = [self.world.agents[_id] for _id in self.world.agents]
dists = {agent: self.get_distance(agent.coords)
for agent in agents if agent is not self}
return min(dists, key=dists.get)
def turn_right(self, angle: int = 90):
"""
Rotates the agent's direction a number of degrees to the right.
"""
self.direction = round((self.direction - angle) % 360)
def turn_left(self, angle: int = 90):
"""
Rotates the agent's direction a number of degrees to the left.
"""
self.direction = round((self.direction + angle) % 360)
def forward(self, n_steps: int = 1):
"""
Moves the agent a number of cells forward in the direction it
is currently facing.
"""
x = round(self.coords[0] + cos(radians(self.direction)) * n_steps)
y = round(self.coords[1] + sin(radians(self.direction)) * n_steps)
if self.world.torus:
self.move_to(self.world.to_torus((x, y)))
elif (x, y) in self.world.grid:
self.move_to((x, y))
def face_towards(self, coords: Tuple):
"""
Turns the agent's direction towards a given pair of coordinates.
"""
if coords != self.coords:
xdif = coords[0] - self.coords[0]
ydif = coords[1] - self.coords[1]
dist = hypot(xdif, ydif)
angle = degrees(asin(ydif / dist))
if xdif < 0:
self.direction = round(180 - angle)
else:
self.direction = round((360 + angle) % 360)
def random_walk(self, n_steps: int = 1):
"""
Moves the agent one cell forward in a random direction for a
number of times.
"""
for i in range(n_steps):
self.turn_right(randint(0, 360))
self.forward()
@abstractmethod
def step(self):
"""
Methods to be performed by the agent at each step of the
simulation.
"""
raise NotImplementedError
```
#### File: quaesit/quaesit/world.py
```python
import numpy as np
import rasterio as rio
from abc import ABCMeta, abstractmethod
from random import shuffle, randint, choice
from scipy.interpolate import interp2d
from statistics import mean
from tqdm import tqdm
from typing import Dict, Tuple
class World(metaclass=ABCMeta):
"""
Class to represent the environment or world in an agent-based model.
"""
def __init__(self, width: int, height: int, tracking: Dict = None,
torus: bool = True):
self.width = width
self.height = height
self.grid = self.init_grid()
self.torus = torus
self.agents = {}
self.tick = 0
self.display_layer = None
self.tracking = tracking
self.globals = {}
if self.tracking:
self.track = {agent: {param: [] for param in tracking[agent]}
for agent in tracking}
def init_grid(self) -> Dict:
"""
Creates the world grid with a layer to keep track of agents in
each cell.
"""
grid = {}
for i in range(self.width):
for j in range(self.height):
grid[(i, j)] = {'agents': []}
return grid
def add_layer(self, layer_name: str, file: str = None, array=None,
value: int = 0, display: bool = False):
"""
Adds a new layer to the grid. Layer can be initialized with a
given value or can be generated from a raster file or from a
numpy array. In the latter cases, the layer is resampled to the
world's dimensions.
"""
if file is not None:
with rio.open(file) as layer:
array = layer.read(1)
self.interp_to_grid(array, layer_name)
elif array is not None:
self.interp_to_grid(array, layer_name)
else:
for cell in self.grid:
self.grid[cell][layer_name] = value
if display:
self.display_layer = layer_name
def interp_to_grid(self, array, layer_name):
"""
Bilinear interpolation of an array to the world's dimensions.
"""
height, width = array.shape
xrange = lambda x: np.linspace(0, 1, x)
f = interp2d(xrange(width), xrange(height), array, kind='linear')
new_arr = f(xrange(self.width), xrange(self.height))
for i in range(self.width):
for j in range(self.height):
self.grid[(i, j)][layer_name] = new_arr[self.height - 1 - j, i]
def to_torus(self, coords: Tuple) -> Tuple:
"""
In case world is toroidal, converts coordinates that exceed its
limits back to the grid.
"""
x, y = coords
return (x % self.width, y % self.height)
def add_agent(self, agent):
"""
Adds a newly-created agent to the dictionary of agents and to
the grid.
"""
self.agents[agent._id] = agent
self.place_on_grid(agent)
def remove_from_grid(self, agent):
"""
Removes an agent from the grid.
"""
self.grid[agent.coords]['agents'].remove(agent)
def place_on_grid(self, agent):
"""
Places an agent on the grid's layer that keeps track of where
agents are.
"""
self.grid[agent.coords]['agents'].append(agent)
def random_cell(self):
"""
Returns the coordinates of a random grid cell.
"""
return (randint(0, self.width - 1), randint(0, self.height - 1))
def random_empty_cell(self):
"""
Returns the coordinates of a random grid cell with no agents
on it.
"""
empty_cells = [cell for cell in self.grid
if not self.grid[cell]['agents']]
return choice(empty_cells)
def save(self):
"""
Stores the variables to be tracked at each step of the model.
"""
for agent in self.tracking:
if agent == 'global':
for param in self.tracking[agent]:
self.track['global'][param].append(
self.globals[param])
elif agent[:5] == 'grid_':
layer = np.reshape([self.grid[(i, j)][agent[5:]]
for j in range(self.height)
for i in range(self.width)],
(self.height, self.width))
for param in self.tracking[agent]:
if param[:6] == 'count_':
val = param[6:]
if val.isdigit():
val = int(val)
self.track[agent][param].append(
np.count_nonzero(layer == val))
elif param == 'avg':
self.track[agent][param].append(
np.average(layer))
elif param == 'sum':
self.track[agent][param].append(
np.sum(layer))
elif param == 'min':
self.track[agent][param].append(
np.min(layer))
elif param == 'max':
self.track[agent][param].append(
np.max(layer))
else:
for param in self.tracking[agent]:
if param == 'count':
self.track[agent][param].append(
len([self.agents[_id] for _id in self.agents
if self.agents[_id].breed == agent]))
elif param[:4] == 'avg_':
self.track[agent][param].append(
mean([getattr(self.agents[_id], param[4:])
for _id in self.agents
if self.agents[_id].breed == agent] or [0]))
elif param[:4] == 'sum_':
self.track[agent][param].append(
sum([getattr(self.agents[_id], param[4:])
for _id in self.agents
if self.agents[_id].breed == agent]))
elif param[:4] == 'min_':
self.track[agent][param].append(
min([getattr(self.agents[_id], param[4:])
for _id in self.agents
if self.agents[_id].breed == agent] or [0]))
elif param[:4] == 'max_':
self.track[agent][param].append(
max([getattr(self.agents[_id], param[4:])
for _id in self.agents
if self.agents[_id].breed == agent] or [0]))
@abstractmethod
def setup(self):
"""
Actions to be executed to prepare the model before it starts to
run.
"""
raise NotImplementedError
def step(self):
"""
At each step of the model, each agent performs the actions
defined in their own step method. Agents' actions are not
parallel, but the order of the agents is shuffled at every step
of the model. If keeping track of variables, they are saved at
every step.
"""
agent_ids = list(self.agents.keys())
shuffle(agent_ids)
for _id in agent_ids:
if _id in self.agents:
self.agents[_id].step()
if self.tracking:
self.save()
self.tick += 1
def iterate(self, n_steps: int):
"""
Runs the model for a number of steps.
"""
for i in tqdm(range(n_steps)):
self.step()
```
|
{
"source": "jgrembi/nL-qPCR_PathogenChip",
"score": 3
}
|
#### File: nL-qPCR_PathogenChip/nlpr/pcr_tools.py
```python
primer3_path = '/usr/bin/' #/usr/bin/primer3_core
fuzznuc_path = '/usr/bin/'
def hello_pcr_tools():
print "## \nYou are using a version pcr_tools.py last updated on Sept 20, 2012 \n##"
def file_to_list(sys_index = 1,line_index = 0):
L = list()
import sys
fn = sys.argv[sys_index]
fh = open(fn , 'r')
for line in fh:
L.append(line.strip().split()[line_index])
return L
def generate_seq_dictionary(x):
# arg: a <filename.fna>
# returns: a dictionary <record_dict>
# USE ME IF YOU WANT TO GENERATE A DICTIONARY OF SEQUENCE
# example: master_dictionary = generate_seq_dictionary(sys.argv[1])
from Bio import SeqIO
fh = open(x, "rU")
record_dict = SeqIO.to_dict(SeqIO.parse(fh, "fasta"))
fh.close()
print "## \nYou generated a sequence dictionary with %i entries \n##"%(len(record_dict.keys()))
return record_dict
def write_subfasta(L,D,filename):
# args:
# (1) a List of the accession you want to create
# (2) Dictionary to search
# (3) filename to write output file
# returns
# write the output file
# USE ME WITH THE ABOVE generate_seq_dictionary TO MAKE A SUB FASTA FILE FROM THE MASTER DICTIONARY
# example:
# master_dictionary = generate_seq_dictionary(sys.argv[1])
# write_subfasta(list_of_accessions, master_dictionary, 'output.fna')
fh = open(filename, "w")
for l in L:
#print "> %s \n%s\n" %(D[l].id,str(D[l].seq))
try:
fh.write("> %s \n%s\n" %(D[l].id,str(D[l].seq)))
except KeyError:
continue
fh.close()
print "## \nYou wrote a subfasta with with %i sequence(s)\n##"%(len(L))
def write_full_subfasta(L,D,filename):
# args:
# (1) a List of the accession you want to create
# (2) Dictionary to search
# (3) filename to write output file
# returns
# write the output file
# USE ME WITH THE ABOVE generate_seq_dictionary TO MAKE A SUB FASTA FILE FROM THE MASTER DICTIONARY
# example:
# master_dictionary = generate_seq_dictionary(sys.argv[1])
# write_subfasta(list_of_accessions, master_dictionary, 'output.fna')
fh = open(filename, "w")
for l in L:
#print "> %s \n%s\n" %(D[l].id,str(D[l].seq))
try:
fh.write(">%s \n%s\n" %(D[l].description,str(D[l].seq)))
except KeyError:
continue
fh.close()
print "## \nYou wrote a subfasta with with %i sequence(s)\n##"%(len(L))
def fna_to_pr3_input(In, Out):
# For now the parameters are hardcoded, but this takes a fasta file and outputs .pr3in file
import sys
from Bio import SeqIO
file_handle = open(In, 'r')
output_handle = open(Out, "w")
#This block writes the parameters to be use by primer3. Version 1.4
output_handle.write("PRIMER_NUM_RETURN=500\n")
output_handle.write("PRIMER_MIN_TM=54\n")
output_handle.write("PRIMER_OPT_TM=55\n")
output_handle.write("PRIMER_MAX_TM=56\n")
#output_handle.write("PRIMER_OPT_SIZE=17\n")
output_handle.write("PRIMER_MIN_SIZE=15\n")
output_handle.write("PRIMER_MAX_SIZE=26\n")
output_handle.write("PRIMER_NUM_NS_ACCEPTED=1\n")
output_handle.write("PRIMER_PRODUCT_SIZE_RANGE=75-200\n")
output_handle.write("PRIMER_GC_CLAMP=0\n")
output_handle.write("PRIMER_FILE_FLAG=1\n")
output_handle.write("PRIMER_EXPLAIN_FLAG=1\n")
#This block writes the sequences in sys.argv[1] to the primer3input file
counter = 0
for seq_record in SeqIO.parse(file_handle, 'fasta'):
output_handle.write("PRIMER_SEQUENCE_ID=" + str(seq_record.id) +"\n")
output_handle.write("SEQUENCE=" + str(seq_record.seq) + "\n")
output_handle.write("="+ "\n")
counter = counter + 1
print "## \nYou generated a primer3 input file with %i sequence(s)\n##"%(counter)
def fna_to_pr3_v2_3_4_input(In, Out, number_of_primers_to_generate):
# For now the parameters are hardcoded, but this takes a fasta file and outputs .pr3in file
import sys
from Bio import SeqIO
file_handle = open(In, 'r')
output_handle = open(Out, "w")
#This block writes the parameters to be use by primer3. Version 1.4
output_handle.write("PRIMER_NUM_RETURN=%i\n"%(number_of_primers_to_generate))
output_handle.write("PRIMER_MIN_TM=59\n")
output_handle.write("PRIMER_OPT_TM=60\n")
output_handle.write("PRIMER_MAX_TM=61\n")
output_handle.write("PRIMER_MIN_SIZE=15\n")
output_handle.write("PRIMER_MAX_SIZE=28\n")
output_handle.write("PRIMER_NUM_NS_ACCEPTED=1\n")
output_handle.write("PRIMER_PRODUCT_SIZE_RANGE=45-200\n") # CHANGED FOR tRNA study Feb 10, 2014
output_handle.write("PRIMER_GC_CLAMP=0\n")
output_handle.write("PRIMER_FILE_FLAG=1\n")
output_handle.write("PRIMER_EXPLAIN_FLAG=1\n")
output_handle.write('PRIMER_TM_FORMULA=1\n')
output_handle.write('PRIMER_SALT_CORRECTIONS=1\n')
output_handle.write('PRIMER_THERMODYNAMIC_ALIGNMENT=1\n')
output_handle.write('PRIMER_SALT_DIVALENT=3\n')
output_handle.write('PRIMER_DNTP_CONC=0.6\n')
output_handle.write('PRIMER_LIB_AMBIGUITY_CODES_CONSENSUS=0\n')
#output_handle.write('PRIMER_THERMODYNAMIC_PARAMETERS_PATH=' + primer3_path + 'primer3_config/\n')
# output_handle.write('PRIMER_THERMODYNAMIC_PARAMETERS_PATH=/Users/koshlan/primer3-2.3.4/src/primer3_config/\n') #!UPDATE ME!#
#output_handle.write('PRIMER_THERMODYNAMIC_PARAMETERS_PATH=/Users/JGrembi/primer3-2.3.4/src/primer3_config/\n')
#This block writes the sequences in sys.argv[1] to the primer3input file
counter = 0
for seq_record in SeqIO.parse(file_handle, 'fasta'):
output_handle.write("SEQUENCE_ID=" + str(seq_record.id) +"\n")
output_handle.write("SEQUENCE_TEMPLATE=" + str(seq_record.seq) + "\n")
output_handle.write("="+ "\n")
counter = counter + 1
print "## \nYou generated a primer3v2.3.4 input file with %i sequence(s)\n##"%(counter)
def write_check_p3(p1,p2,Out):
oh = open(Out, 'w')
oh.write("PRIMER_TASK=check_primers\n")
oh.write("SEQUENCE_PRIMER=%s\n"%(p1))
oh.write("SEQUENCE_PRIMER_REVCOMP=%s\n"%(p2))
oh.write("PRIMER_MIN_TM=55\n")
oh.write("PRIMER_MAX_TM=70\n")
oh.write("PRIMER_FILE_FLAG=1\n")
oh.write("PRIMER_EXPLAIN_FLAG=1\n")
oh.write("PRIMER_TM_FORMULA=1\n")
oh.write("PRIMER_SALT_CORRECTIONS=1\n")
oh.write("PRIMER_THERMODYNAMIC_ALIGNMENT=1\n")
oh.write("PRIMER_SALT_DIVALENT=3\n")
oh.write("PRIMER_DNTP_CONC=0.6\n")
oh.write("PRIMER_LIB_AMBIGUITY_CODES_CONSENSUS=0\n")
#oh.write("PRIMER_THERMODYNAMIC_PARAMETERS_PATH=" + primer3_path + "primer3_config/\n")
# oh.write("PRIMER_THERMODYNAMIC_PARAMETERS_PATH=/Users/JGrembi/primer3-2.3.4/src/primer3_config/\n")#!UPDATE ME!#
# oh.write("PRIMER_THERMODYNAMIC_PARAMETERS_PATH=/Users/koshlan/primer3-2.3.4/src/primer3_config/\n")
oh.write("=\n")
def pr3_modern(In, Out, pr3_path):
import os
os.system(pr3_path + "primer3_core <" + In + ">" + Out)
def pr3(In, Out):
import os
os.system(primer3_path + "primer3_core <" + In + ">" + Out)
#os.system("/Users/JGrembi/primer3-2.3.4/src/primer3_core <" + In + ">" + Out)
# os.system("/Users/koshlan/primer3-2.3.4/src/primer3_core <" + In + ">" + Out) #!UPDATE ME!#
# JULY 24, 2014 (I MADE THIS CHAGEN SO JESS COULD RUN ON HER MACHINE)#os.system("/Users/koshlan/primer3-2.3.4/src/primer3_core <" + In + ">" + Out)
#os.system("primer3_core <" + In + ">" + Out)
def parse_pr3out(In, Out):
# This was used from January 15, call_primer3_v3.py should work fine
primer3output = open(In, 'r')
output_handle = open(Out, 'w')
primers_dictionary = {}
PRIMER_PAIR = '0'
#######################################################
#### II.i if statements to give each sequence an ID ###
#######################################################
for line in primer3output:
# if the line starts with the sequence identifier (typically first line), we store that information as a variable
if line.startswith("PRIMER_SEQUENCE_ID"):
#define the parent sequence name as the second element after line is split it into 2 elements on either side of the "=" symbol and strip away the newline
PRIMER_SEQUENCE_ID = line.split("=")[1].strip("\n")
continue
#as we parse each pair begins with a PRIMER_PAIR_PENALTY_ Designation
# for the first case
if line.startswith("PRIMER_PAIR_PENALTY") and line.split('=')[0] == "PRIMER_PAIR_PENALTY":
PRIMER_PAIR = '0'
PRIMER_PAIR_PENALTY = line.split('=')[1].strip('\n')
continue
if line.startswith("PRIMER_PAIR_PENALTY_"):
# strip away the Primer_Pair_Penalty portion of the string leaving just the number which will be use to identify the pair
try:
PRIMER_PAIR = line.split("=")[0]
PRIMER_PAIR = PRIMER_PAIR.split("PRIMER_PAIR_PENALTY_")[1]
PRIMER_PAIR_PENALTY = line.split('=')[1].strip('\n')
except IndexError:
continue
#######################################################
#### II.ii if statements to get sequence and make dictionary
#######################################################
if "PRIMER_LEFT" in line and "SEQUENCE" in line:
PRIMER_LEFT_SEQUENCE = line.split("=")[1].strip('\n')
primers_dictionary[PRIMER_SEQUENCE_ID + "_" + PRIMER_PAIR] = [PRIMER_LEFT_SEQUENCE]
if "PRIMER_RIGHT" in line and "SEQUENCE" in line:
PRIMER_RIGHT_SEQUENCE = line.split("=")[1].strip('\n')
primers_dictionary[PRIMER_SEQUENCE_ID + "_" + PRIMER_PAIR].append(PRIMER_RIGHT_SEQUENCE)
#######################################################
#### II.iii if statements to parse Tm, primer_start_positions, and penalty_score and PRINT
#######################################################
#SPECIAL if Statments for the first case of primer position
if line.split("=")[0] == "PRIMER_LEFT":
LEFT_START = line.split("=")[1].split(',')[0]
if line.split("=")[0] == "PRIMER_RIGHT":
RIGHT_START = line.split("=")[1].split(',')[0]
#SPECIAL if Statments for 2nd through nth case for primer position
if line.split('=')[0] == "PRIMER_LEFT_" + PRIMER_PAIR:
LEFT_START = line.split("=")[1].split(',')[0]
if line.split('=')[0] == "PRIMER_RIGHT_" + PRIMER_PAIR:
RIGHT_START = line.split("=")[1].split(',')[0]
#if Statments for 1st through nth case for TM
if "PRIMER_LEFT" in line and 'TM' in line:
LEFT_TM = line.split("=")[1].strip('\n')
if "PRIMER_RIGHT" in line and 'TM' in line:
RIGHT_TM = line.split("=")[1].strip('\n')
if "PRIMER_PRODUCT_SIZE" in line and not("RANGE" in line):
PRIMER_PRODUCT_SIZE = line.split("=")[1].strip('\n')
#######################################################
#### II.iii PRINT and WRITE OUTPUT
#######################################################
#print ">" + PRIMER_SEQUENCE_ID + "_" + PRIMER_PAIR + "\t"+ PRIMER_PAIR_PENALTY + "\t" + PRIMER_LEFT_SEQUENCE + "\t" + PRIMER_RIGHT_SEQUENCE +"\t"+ LEFT_TM + "\t" + RIGHT_TM + "\t" + LEFT_START + "\t" + RIGHT_START + "\t"+ PRIMER_PRODUCT_SIZE
output_handle.write(">" + PRIMER_SEQUENCE_ID + "_" + PRIMER_PAIR + "\t"+ PRIMER_PAIR_PENALTY + "\t" + PRIMER_LEFT_SEQUENCE + "\t" + PRIMER_RIGHT_SEQUENCE +"\t"+ LEFT_TM + "\t" + RIGHT_TM + "\t" + LEFT_START + "\t" + RIGHT_START + "\t"+ PRIMER_PRODUCT_SIZE + "\n")
def parse_pr3_v2_3_4_out(In, Out):
# This was used from January 15, call_primer3_v3.py should work fine
primer3output = open(In, 'r')
output_handle = open(Out, 'w')
primers_dictionary = {}
PRIMER_PAIR = '0'
#######################################################
#### II.i if statements to give each sequence an ID ###
#######################################################
import re
for line in primer3output:
# if the line starts with the sequence identifier (typically first line), we store that information as a variable
if line.startswith("SEQUENCE_ID"):
#define the parent sequence name as the second element after line is split it into 2 elements on either side of the "=" symbol and strip away the newline
PRIMER_SEQUENCE_ID = line.split("=")[1].strip("\n")
continue
if re.search('PRIMER_PAIR_([\d]+)_PENALTY', line): # SEARCH FOR "PRIMER_PAIR_1_PENALTY"
matchObj = re.search('PRIMER_PAIR_([\d]+)_PENALTY', line)
try:
PRIMER_PAIR = matchObj.group(1)
PRIMER_PAIR_PENALTY = line.split('=')[1].strip('\n')
except IndexError:
continue
#######################################################
#### II.ii if statements to get sequence and make dictionary
#######################################################
if re.search('PRIMER_LEFT_([\d]+)_SEQUENCE', line):
PRIMER_LEFT_SEQUENCE = line.split("=")[1].strip('\n')
continue
if re.search('PRIMER_RIGHT_([\d]+)_SEQUENCE', line):
PRIMER_RIGHT_SEQUENCE = line.split("=")[1].strip('\n')
continue
if re.search('PRIMER_LEFT_([\d]+)=', line):
LEFT_START = line.split("=")[1].split(',')[0]
continue
if re.search('PRIMER_RIGHT_([\d]+)=', line):
RIGHT_START = line.split("=")[1].split(',')[0]
continue
if re.search('PRIMER_LEFT_([\d]+)_TM', line):
LEFT_TM = line.split("=")[1].strip('\n')
continue
if re.search('PRIMER_RIGHT_([\d]+)_TM', line):
RIGHT_TM = line.split("=")[1].strip('\n')
continue
if re.search('PRIMER_PAIR_([\d]+)_PRODUCT_SIZE', line):
PRIMER_PRODUCT_SIZE = line.split("=")[1].strip('\n')
output_handle.write(">" + PRIMER_SEQUENCE_ID + "_" + PRIMER_PAIR + "\t"+ PRIMER_PAIR_PENALTY + "\t" + PRIMER_LEFT_SEQUENCE + "\t" + PRIMER_RIGHT_SEQUENCE +"\t"+ LEFT_TM + "\t" + RIGHT_TM + "\t" + LEFT_START + "\t" + RIGHT_START + "\t"+ PRIMER_PRODUCT_SIZE + "\n")
continue
def write_fuznuc_pattern(p1,p2, Out, mismatches = 4):
#args:
# p1 (string) first primer sequence
# p2 (string) second primer sequence
# mismatches (int) number of acceptable mismatches per primer (DEFAULT = 4)
# Out (string) name of temporary file to write the pattern file
# results: writes a temporary file
# >pat1 <mismatch=4>
# GTTAGTCCCTTGGTGGCT
# >pat2 <mismatch=4>
# CGGGTCTAAAGCCTTTTC
fh = open(Out, 'w')
fh.write(">pat1 <mismatch=%i>\n%s\n>pat2 <mismatch=%i>\n%s\n"%(mismatches,p1,mismatches,p2))
def execute_fuznuc(pattern_file,target_seqs, Out):
# Args:
# pattern_file (string): name of the temporary pattern file .pat
# target (string): name of the sequences file .fna
# Out (sting): name of the temporary output file
#NOTE: ENSURE THE FOLLOWING IS INSTALLED in ~/EMBOSS-6.4.0/emboss/fuzznuc
#NOTE: http://emboss.sourceforge.net/docs/themes/ReportFormats.html
import os
os.system(fuzznuc_path + 'fuzznuc %s %s -pattern @%s -complement Y -rformat excel'%(target_seqs, Out, pattern_file))
#os.system('~/EMBOSS-6.5.7/emboss/fuzznuc %s %s -pattern @%s -complement Y -rformat excel'%(target_seqs, Out, pattern_file))
#print "## \nFrom %s and %s You generated a fuznuc output file: %s\n##"%(target_seqs, pattern_file,Out)
#EXAMPLE:
#os.system('/Users/koshlan/EMBOSS-6.4.0/emboss/fuzznuc fuzzIn.fasta fuzzOut.pat -pattern @oligo_nucseq.pat -complement Y')
def parse_fzout(In):
# args: In (string) f
# Returns Dictionary D[SeqName] { { 'AB301952.1': { 'negative': '1', 'positive': '0', 'total': 0},
fh = open(In, 'r')
D={}
for line in fh:
# SKIP HEADER LINES
if line.startswith("SeqName"):
continue
else:
line = line.strip()
L = line.split()
(SeqName,Start,End,Score,Strand,Pattern_Seq,Mismatch) = line.split()
Pattern = Pattern_Seq.split(':')[0] # KMB JAN 23, 2013, fixed the fact that pat:seq
Seq = Pattern_Seq.split(':')[1]
Mismatch = Mismatch.replace('.', '0') # Changed out the . for a zeor mismatch
if SeqName not in D.keys():
D[SeqName] = dict()
D[SeqName]['pat1'] = 99
D[SeqName]['pat2'] = 99
if Pattern == 'pat1': # KMB JAN 27, 2013 CHANGED to 'pat1:' to 'pat1'
D[SeqName]['pat1'] = int(Mismatch)
elif Pattern == "pat2": # KMB JAN 27, 2013 CHANGED to 'pat1:' to 'pat1'
D[SeqName]['pat2'] = int(Mismatch)
L = D.keys()
for l in L:
try:
X = int(D[l]['pat1']) + int(D[l]['pat2'])
except KeyError:
D[l]['total'] = 99
else:
D[l]['total'] = X
#print "## \nThe function <parse_fzout> recieved %s, and returned a dictionary"%(In)
# import pprint
# pp = pprint.PrettyPrinter(indent=4)
# pp.pprint(D)
return D
def hit_percentage_calc(L,D,acceptable_mismatches):
hit_count = 0
for l in L:
try:
(l in D.keys()) & (D[l]['total'] < acceptable_mismatches)
except KeyError:
continue
else:
if (l in D.keys()) & (D[l]['total'] < acceptable_mismatches):
hit_count = hit_count + 1
hit_percentage = round(float(hit_count)/float(len(L)), 3) # (1)
return hit_percentage
def hit_percentage_calc_plus_discovery(L,D,acceptable_mismatches):
hit_count = 0
Lout = []
for l in L:
try:
(l in D.keys()) & (D[l]['total'] < acceptable_mismatches)
except KeyError:
continue
else:
if (l in D.keys()) & (D[l]['total'] < acceptable_mismatches):
Lout.append(l)
hit_count = hit_count + 1
hit_percentage = round(float(hit_count)/float(len(L)),3) # (1)
#print hit_percentage
#print Loutl
#print L
#print D
return {'hit_percentage' : hit_percentage, 'hits': Lout}
def hit_percentage_calc_plus_number_of_mismatch(L,D,acceptable_mismatches):
hit_count = 0
Lout = []
Lout_mismatch = []
for l in L:
try:
(l in D.keys()) & (D[l]['total'] < acceptable_mismatches)
except KeyError:
continue
else:
if (l in D.keys()) & (D[l]['total'] < acceptable_mismatches):
Lout.append(l)
Lout_mismatch.append(D[l]['total'])
hit_count = hit_count + 1
hit_percentage = round(float(hit_count)/float(len(L)),3) # (1)
#print hit_percentage
#print Loutl
#print L
#print D
return {'hit_percentage' : hit_percentage, 'hits': Lout, 'number_mismatched': Lout_mismatch }
def sort_single_capture_primers(In,Out,number_of_primers_to_review):
# this finds the primers that best capture the whole cluster, return the 100 best.
import os
os.system("sort -k 10,10nr -k 2,2n %s > tempsort.txt" %(In)) # SORT COMMAND, SORT FIRST ON THE 10th column numerically, next sort on the 2nd colum
os.system('head -%i tempsort.txt > %s' %(number_of_primers_to_review, Out))
# sort -t$'\t' -k 10,10nr -k 2,2n
def sort_single_capture_primers_by_2_columns(In,Out,number_of_primers_to_review, column, column2):
# this finds the primers that best capture the whole cluster, return the 100 best.
import os
os.system("sort -k %i,%inr -k %i,%inr -k 2,2n %s > tempsort.txt" %(column, column, column2, column2,In)) # SORT COMMAND, SORT FIRST ON THE 10th column numerically, next sort on the 2nd colum
os.system('head -%i tempsort.txt > %s' %(number_of_primers_to_review, Out))
# sort -t$'\t' -k 10,10nr -k 2,2n
def sort_by_priority_columns(In,Out,priority_column, second_priority_column, number_of_primers_to_review, direction):
# this finds the primers that best capture the whole cluster, return the 100 best.
import os
priority_column = int(priority_column)
second_priority_column = int(second_priority_column)
if direction is 'F':
os.system("sort -k %i,%in -k %i,%in %s > tempsort.txt" %(priority_column, priority_column, second_priority_column, second_priority_column, In)) # SORT COMMAND, SORT FIRST ON THE priority column numerically, next sort on the 2nd priority column
os.system('head -%i tempsort.txt > %s' %(number_of_primers_to_review, Out))
elif direction is 'R':
os.system("sort -k %i,%inr -k %i,%in %s > tempsort.txt" %(priority_column, priority_column, second_priority_column, second_priority_column, In)) # SORT COMMAND, SORT FIRST ON THE priority column numerically, next sort on the 2nd priority column
os.system('head -%i tempsort.txt > %s' %(number_of_primers_to_review, Out))
# sort -t$'\t' -k 10,10nr -k 2,2n
def sort_by_3_priority_columns(In,Out,priority_column, second_priority_column, third_priority_column, number_of_primers_to_review, direction):
# this finds the primers that best capture the whole cluster, return the 100 best.
import os
priority_column = int(priority_column)
second_priority_column = int(second_priority_column)
third_priority_column = int(third_priority_column)
if direction is 'F':
os.system("sort -k %i,%in -k %i,%inr -k %i,%in %s > tempsort.txt" %(priority_column, priority_column, second_priority_column, second_priority_column,third_priority_column,third_priority_column, In)) # SORT COMMAND, SORT FIRST ON THE priority column numerically, next sort on the 2nd priority column
os.system('head -%i tempsort.txt > %s' %(number_of_primers_to_review, Out))
elif direction is 'R':
os.system("sort -k %i,%inr -k %i,%inr -k %i,%in %s > tempsort.txt" %(priority_column, priority_column, second_priority_column, second_priority_column,third_priority_column,third_priority_column, In)) # SORT COMMAND, SORT FIRST ON THE priority column numerically, next sort on the 2nd priority column
os.system('head -%i tempsort.txt > %s' %(number_of_primers_to_review, Out))
# sort -t$'\t' -k 10,10nr -k 2,2n
def sort_exclusion_table(In, Out):
import os
os.system("sort -k ")
def probe_plotter(line, fn_blastout, count):
# ARGS
# A LINE FROM MY BLAST OUT (NOTE: COLUMN INDEX 12 and -1 MUST CONTAIN THE RELEVANT FIELDS)
# fn - filename : FOR THE BLASTOUT PUT
# DEPDENDS ON grab_sub_blastout and R_probe_plotter.R
# RETURNS
# EXECUTES R, WHICH WRITES A PDF SHOWNING THE PERFORMANCE OF THE ASSAY
# RUN ME: probe_plotter(line, '../3_Run/PF13486_Full_Length350to700AA.ncbi.aa.gbk.faa.blastresult')
# line = ">BAE84628.1_0 0.054721 GGCTATTATGCAGCGCCGTG CAGAACTCGCGTACCCCGAA 63.949 64.004 1062 1258 197 1.0 1.0 1.0 BAE84628.1|CAD28792.1 0.005 0.052 0.054 ACH87596.1|CAR57929.1|CAR57926.1|ACH87599.1|ACH87597.1|BAF57046.1|CAR57927.1|AAO60101.1|CAR57937.1|CAR57932.1|CAJ75430.1|CAJ75435.1|CAR57933.1|ACH87594.1|CAR57934.1|ACH87598.1|CAR57936.1|CAR57931.1|CAD28790.2|CAR57935.1|CAR57930.1"
import os
line = line.strip()
primary_seq = line.split()[0]
assay_id = primary_seq.replace(">","")
assay_id = str(count) + "_" + assay_id # THIS ALLOWS COUNTING
primary_seq = primary_seq.split("_")[0].replace(">","")
inclusion_hit_list = line.split()[12].split("|")
exclusion_hit_list = line.split()[16].split("|")
from pcr_tools import grab_sub_blastout
grab_sub_blastout(primary_seq, fn_blastout, 'BLASTout.temp') # Grab the sub-section of the blast that you need, next you will add a column with hits and non hits
fh = open('BLASTout.temp', 'r')
oh = open('BLASToutScored.temp', 'w')
for line in fh:
line = line.strip()
subject = line.split()[1]
if subject in inclusion_hit_list:
oh.write(line + "\tI\n")
elif subject in exclusion_hit_list:
oh.write(line + "\tE\n")
else:
oh.write(line + "\tN\n")
fh.close()
oh.close()
# CALL R
path = os.getcwd() + "/"
os.system("./R_probe_plotter.R %s %s %s"%(assay_id, path, 'BLASToutScored.temp'))
def advanced_probe_plotter(full_line, fn_blastout, count):
# ARGS
# A LINE FROM MY BLAST OUT (NOTE: COLUMN INDEX 12 and -1 MUST CONTAIN THE RELEVANT FIELDS)
# fn - filename : FOR THE BLASTOUT PUT
# DEPDENDS ON grab_sub_blastout and R_complex_probe_plotter.R
# RETURNS
# EXECUTES R, WHICH WRITES A PDF SHOWNING THE PERFORMANCE OF THE ASSAY
# RUN ME:
import os
full_line = full_line.strip()
inclusion_hit_list = full_line.split()[10].split("|")
thermo_penalty = full_line.split()[1] # This is the primer3 penalty
inclusivity = full_line.split()[9] # This is the inclusivity of the desired sequences
primary_seq = full_line.split()[0] # >BAE84628.1_0
original_seq = primary_seq.split("_")[0].replace(">","")
assay_id = primary_seq.replace(">","") #BAE84628.1_0
assay_id = str(count) + "_" + assay_id # THIS ALLOWS COUNTING
primary_seq = primary_seq.split("_")[0].replace(">","") #BAE84628.1
match_info = full_line.split()[9::2]
match_hits = full_line.split()[10::2]
exclusion_hits= match_hits[5:]
from pcr_tools import list_partition
non_redundant_hits = list_partition(exclusion_hits, "|", "No_Hits")
my_dict = dict(enumerate(non_redundant_hits))
new_dict = {} # Contains the index position of every sequence (in this case 0 corresponds with zero mismatchs, 1 with (1,2 mismatchs), 2 (3,4 mismatches))
for k in my_dict.keys():
L = my_dict[k]
for l in L:
new_dict[l] = k
# NOW WE GO THROUGH THE ACTUAL BLAST AND MARK EACH SEQUENCE WITH LOOKUPS.
from pcr_tools import grab_sub_blastout
grab_sub_blastout(primary_seq, fn_blastout, 'BLASTout.temp') # Grab the sub-section of the blast that you need, next you will add a column with hits and non hits
fh = open('BLASTout.temp', 'r')
oh = open('BLASToutScored.temp', 'w')
for line in fh:
line = line.strip()
subject = line.split()[1]
if subject in inclusion_hit_list:
oh.write(line + "\tI" + "\tNA\n")
continue
else:
oh.write(line + "\tE")
if subject in new_dict.keys():
x = new_dict[subject]
oh.write("\t%s\n" %(x))
else:
oh.write("\tNA\n")
fh.close()
oh.close()
path = os.getcwd() + "/"
os.system("../scripts_used/R_complex_probe_plotter.r %s %s %s %s"%(assay_id, path, 'BLASToutScored.temp', original_seq))
def grab_sub_blastout(seq_id, fn_blastout, Out):
import os
os.system("grep -E '^%s' %s > %s" %(seq_id, fn_blastout, Out))
def list_partition(L, deliminator, empty):
# THIS TAKES A LIST OF STRING WITH A DELIMINATOR BREAKS THEM OPEN AND PARTITIONS INTO A LIST OF LIST, WHERE EACH SUBSEQUENCE OBJECT HAS NO ENTRIES FROM THE PREVIOUS SET. THIS HAS PRACTICAL IMPLICATION WHEN TAKING OUR LIST OF HITS GIVEN INCREASING NUMBER OF ACCEPTABLE MISMATCHES.
# TEST IT WITH:
#mega = ["A|B|C","A|B|C|D","A|B|C|D|E","A|B|C|D|E|F"]
#print list_partition(mega, "|", "No_Hits")
# SHOULD GET : [['A', 'C', 'B'], ['D'], ['E'], ['F']]
def no_hits_check(L, string):
if string in L :
return [ ]
else:
return L
accumulated_set = set()
output = []
while len(L) > 0:
my_list = L.pop(0).split(deliminator)
my_list = no_hits_check(my_list, empty)
my_set = set(my_list)
my_set = my_set - accumulated_set
accumulated_set = accumulated_set | my_set
output.append(list(my_set))
return output
```
#### File: nlpr/utils/nL_nr_assay_check.py
```python
import sys
fn = sys.argv[1]
fh = open(fn, 'r')
def plus_or_minus(x,h):
L = []
for i in range(h):
L.append(int(x-i))
L.append(int(x+i))
return list(set(L))
def lists_overlap3(a, b):
return bool(set(a) & set(b))
forbidden_range_F = []
forbidden_range_R = []
forbidden_range_F2 = []
forbidden_range_R2= []
forbidden_range_F3 = []
forbidden_range_R3 = []
# Take the best hit
line = fh.readline()
line = line.strip()
print line
for line in fh:
forbidden_range_F3 = list(forbidden_range_F2)
forbidden_range_R3 = list(forbidden_range_R2)
forbidden_range_F2 = list(forbidden_range_F)
forbidden_range_R2 = list(forbidden_range_R)
#print "#####"
#print forbidden_range_F2
#print forbidden_range_F3
#print "#####"
line = line.strip()
start = int(line.split()[6])
end = int(line.split()[7])
forbidden_range_F.append(start)
forbidden_range_R.append(end)
test_F = plus_or_minus(int(start),4)
test_R = plus_or_minus(int(end),4)
if lists_overlap3(test_F, forbidden_range_F2) and lists_overlap3(test_R,forbidden_range_R2):
pass
else:
print line
fh.close()
```
|
{
"source": "jgrenadier/ui",
"score": 2
}
|
#### File: jgrenadier/ui/main.py
```python
import json
import re
import pandas as pd
from sqlalchemy import create_engine
import requests
import param
import parambokeh
import holoviews as hv
import geoviews as gv
import datashader as ds
import dask.dataframe as dd
from cartopy import crs
from bokeh.models import WMTSTileSource
from holoviews.operation.datashader import datashade
# Get renderer and set global display options
hv.extension('bokeh')
hv.opts("RGB [width=1200 height=682 xaxis=None yaxis=None show_grid=False]")
hv.opts("Polygons (fill_color=None line_width=1.5) [apply_ranges=False tools=['tap']]")
hv.opts("Points [apply_ranges=False] WMTS (alpha=0.5)")
# Load census data
df = dd.io.parquet.read_parquet('data/census.snappy.parq').persist()
census_points = gv.Points(df, kdims=['easting', 'northing'], vdims=['race'])
# Declare colormapping
color_key = {'w': 'white', 'b': 'green', 'a': 'red',
'h': 'orange', 'o': 'saddlebrown'}
races = {'w': 'White', 'b': 'Black', 'a': 'Asian',
'h': 'Hispanic', 'o': 'Other'}
color_points = hv.NdOverlay({races[k]: gv.Points([0,0], crs=crs.PlateCarree())(style=dict(color=v))
for k, v in color_key.items()})
# Apply datashading to census data
x_range, y_range = ((-13884029.0, -7453303.5), (2818291.5, 6335972.0)) # Continental USA
shade_defaults = dict(x_range=x_range, y_range=y_range, x_sampling=10,
y_sampling=10, width=1200, height=682,
color_key=color_key, aggregator=ds.count_cat('race'),)
shaded = datashade(census_points, **shade_defaults)
shapefile = {'state_house': 'cb_2016_48_sldl_500k',
'state_senate': 'cb_2016_48_sldu_500k',
'us_house': 'cb_2015_us_cd114_5m'}
# Define tile source
tile_url = 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{Z}/{Y}/{X}.jpg'
tiles = gv.WMTS(WMTSTileSource(url=tile_url))
DIVISION_ID_RE = {
'state_house': re.compile(r'ocd-division/country:us/state:[a-z]{2}/sldl:([0-9]+)'),
'state_senate': re.compile(r'ocd-division/country:us/state:[a-z]{2}/sldu:([0-9]+)'),
'us_house': re.compile(r'ocd-division/country:us/state:[a-z]{2}/cd:([0-9]+)'),
'county': re.compile(r'ocd-division/country:us/state:[a-z]{2}/county:[^\/]+/council_district:([0-9]+)'),
'city_council': re.compile(r'ocd-division/country:us/state:[a-z]{2}/place:[^\/]+/council_district:([0-9]+)'),
}
# engine = create_engine('mysql+mysqlconnector://atxhackathon:atxhackathon@atxhackathon.chs2sgrlmnkn.us-east-1.rds.amazonaws.com:3306/atxhackathon', echo=False)
# cnx = engine.raw_connection()
# vtd_data = pd.read_sql('SELECT * FROM vtd2016preselection', cnx)
def address_latlon_lookup(address, api_key):
json_response = requests.get(
'https://maps.googleapis.com/maps/api/geocode/json?address={address}&key={api_key}'.format(
address=address, api_key=api_key))
# TODO: error handling for not found addresses
# result comes out looking like {"lat" : 30.2280933, "lng" : -97.8503729}
location = json.loads(json_response)['results'][0]['geometry']['location']
return location['lat'], location['lng']
def address_district_lookup(address, district_type, api_key):
json_response = requests.get(
'https://www.googleapis.com/civicinfo/v2/representatives?address={address}&key={api_key}'.format(
address=address, api_key=api_key)
)
# TODO: error handling for not found addresses
divisions = json.loads(json_response)['divisions']
for key in divisions:
match = DIVISION_ID_RE[district_type].match(key)
if match:
district = match.group(1)
# TODO: error handling for no matching RE (maybe due to different state expression)
return district
def load_district_shapefile(district_type, **kwargs):
district_type = '_'.join([part.lower() for part in district_type.split()])
shape_path = 'data/{0}/{0}.shp'.format(shapefile[district_type])
districts = gv.Shape.from_shapefile(shape_path, crs=crs.PlateCarree())
districts = gv.operation.project_shape(districts)
districts = hv.Polygons([gv.util.geom_to_array(dist.data) for dist in districts])
# districts.opts(plot=dict(fill_color=None, line_width=1.5))
return districts
class DistrictExplorer(hv.streams.Stream):
district_type = param.ObjectSelector(objects=('US House',
'State House',
'State Senate'),
default='US House')
def make_view(self, **kwargs):
districts = hv.DynamicMap(load_district_shapefile, streams=[self])
options = dict(width=1000, height=600, xaxis=None, yaxis=None,
show_grid=False)
tiles.opts(plot=options)
return tiles * shaded * color_points * districts
# def event(self, **kwargs):
# if not self.output or any(k in kwargs for k in ['District type']):
# self.output = hv.DynamicMap(self.view, streams=[self])
# else:
# super(DistrictExplorer, self).event(**kwargs)
explorer = DistrictExplorer(name="District explorer")
dmap = explorer.make_view()
plot = hv.renderer('bokeh').instance(mode='server').get_plot(dmap)
parambokeh.Widgets(explorer, continuous_update=True, callback=explorer.event,
on_init=True, plots=[plot.state], mode='server')
```
|
{
"source": "jgressmann/sc2links",
"score": 2
}
|
#### File: jgressmann/sc2links/addon.py
```python
__author__ = 'jgressmann'
from datetime import date
import pickle
import sys
import traceback
import urllib
import urlparse
#import urlresolver
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
import zlib
import resources.lib.sc2links as sc2links
addon = xbmcaddon.Addon()
#__addonname__ = addon.getAddonInfo('name')
addonid = addon.getAddonInfo('id')
def debug(val):
if isinstance(val, str) or isinstance(val, unicode):
pass
else:
val = repr(val)
message = u'%s: %s' % (addonid, val)
xbmc.log(message.encode('utf-8'), xbmc.LOGDEBUG)
def build_url(query):
return sys.argv[0] + '?' + urllib.urlencode(query)
handle = int(sys.argv[1])
args = dict(urlparse.parse_qsl(sys.argv[2][1:]))
debug("url args: " + repr(args))
revealMatches = addon.getSetting('reveal_matches') == 'true'
debug('reveal matches: ' + str(revealMatches))
lastNYears = 0
try:
lastNYears = int(addon.getSetting('last_n_years_to_fetch'))
except:
pass
debug('last_n_years_to_fetch: ' + repr(lastNYears))
def get_youtube_info(url):
parsed = urlparse.urlparse(url)
args = urlparse.parse_qs(parsed.query)
id = None
time = None
# 'https://www.youtube.com/embed/TdjhjhbT3eA'
if parsed.path.startswith('/embed/'):
id = parsed.path[7:]
else:
# parse something like https://www.youtube.com/watch?v=XqywDF675kQ
#debug(str(args))
time = args.get('t', [''])[0]
id = args.get('v', [''])[0]
if not id:
# parse something like https://youtu.be/3A3guAd42Dw?t=9
if parsed.hostname == 'youtu.be':
pathParts = (parsed.path or '').split('/')
if len(pathParts) == 2:
id = pathParts[1]
if id:
return (id, time)
def get_youtube_plugin_url(web_url):
data = get_youtube_info(web_url)
if data:
id = data[0]
time = data[1]
if id:
args = {'play': 'plugin://plugin.video.youtube/play/?video_id={}'.format(id)}
if time:
args['time'] = time
return build_url(args)
debug('failed to get youtube id for ' + repr(web_url))
def get_twitch_info(url):
# parse something like https://www.twitch.tv/videos/161472611?t=07h49m09s
def _twitch_time_to_seconds(t):
seconds = 0
buf = ''
for c in t:
if c == 'h':
if len(buf):
seconds += int(buf) * 3600
buf = ''
elif c == 'm':
if len(buf):
seconds += int(buf) * 60
buf = ''
elif c == 's':
if len(buf):
seconds += int(buf)
buf = ''
elif c.isdigit():
buf += c
else:
# oh well
pass
return seconds
id = None
time = None
parsed = urlparse.urlparse(url)
args = urlparse.parse_qs(parsed.query)
#debug('path: ' + str(parsed.path))
if parsed.path.find('/videos/') == 0:
id = parsed.path[8:]
#debug('id: ' + str(id))
if id and id.isdigit():
time = args.get('t', [None])[0]
if time:
time = _twitch_time_to_seconds(time)
else:
# https://player.twitch.tv/?video=v187746182&autoplay=false&time=
args = urlparse.parse_qs(parsed.query)
id = args.get('video', [None])[0]
if id:
id = id[1:]
time = args.get('time', [None])[0]
if time:
time = _twitch_time_to_seconds(time)
return (id, time)
def get_twitch_plugin_url(web_url):
data = get_twitch_info(web_url)
if data:
id = data[0]
time = data[1]
if id:
#@dispatcher.register(MODES.PLAY, kwargs=['seek_time', 'channel_id', 'video_id', 'slug', 'ask', 'use_player', 'quality'])
args = {'play': 'plugin://plugin.video.twitch/?mode=play&video_id={}'.format(id)}
if time:
args['time'] = time
return build_url(args)
debug('failed to get twitch id for ' + repr(web_url))
compress = True
def by_name(lhs, rhs):
return cmp(lhs.name, rhs.name)
def build():
level = int(args.get('level', 0))
debug("level " + repr(level))
args.update({'level': level+1})
data0 = args.get('data0', None)
if data0:
if compress:
#debug("data1z " + repr(data0))
data0 = zlib.decompress(data0)
#debug("data1p " + repr(data0))
data0 = pickle.loads(data0)
#debug("data0 " + repr(data0))
data1 = args.get('data1', None)
if data1:
if compress:
#debug("data1z " + repr(data1))
data1 = zlib.decompress(data1)
#debug("datap " + repr(data1))
data1 = pickle.loads(data1)
#debug("data1 " + repr(data1))
year = args.get('year', None)
if year:
year = int(year)
debug("year " + repr(year))
name = args.get('name', None)
debug("name " + repr(name))
stage_name = args.get('stage_name', None)
debug("stage_name " + repr(stage_name))
overrideFilter = args.get('override_filter', False)
debug("overrideFilter " + repr(overrideFilter))
# yearsFiltered = args.get('years_filtered', None)
# debug("yearsFiltered " + repr(yearsFiltered))
if level == 0:
args.update({'order': 0})
url = build_url(args)
xbmcplugin.addDirectoryItem(handle, url, xbmcgui.ListItem('By Name'), isFolder=1)
args.update({'order': 1})
url = build_url(args)
xbmcplugin.addDirectoryItem(handle, url, xbmcgui.ListItem('By Year'), isFolder=1)
elif level == 1:
order = int(args.get('order', 0))
# want only the last n years worth of shows?
yearsFiltered = not overrideFilter and lastNYears >= 1
if yearsFiltered:
currentYear = date.today().year + 1
years = [x for x in range(currentYear-(lastNYears), currentYear)]
#debug("years: " + repr(years))
sc2 = sc2links.Sc2Links(years=years)
else:
sc2 = sc2links.Sc2Links()
children = sc2.children
# debug("children: " + repr(children))
data = pickle.dumps(children)
if compress:
data = zlib.compress(data)
args.update({'data0': data})
if order == 1:
years = [x.year for x in children]
years = set(years)
years = sorted(years, reverse=True)
#debug('years: ' + repr(years))
for year in years:
displayYear = str(year or 'Other')
args.update({'year': year or -1})
url = build_url(args)
xbmcplugin.addDirectoryItem(handle, url, xbmcgui.ListItem(displayYear), isFolder=1)
else:
names = [x.name for x in children]
names = set(names)
names = sorted(names)
#debug('names: ' + repr(names))
for name in names:
args.update({'name': name})
url = build_url(args)
xbmcplugin.addDirectoryItem(handle, url, xbmcgui.ListItem(name), isFolder=1)
if yearsFiltered: # load all item
args.update({'override_filter': True, 'level': level})
debug('args ' + repr(args))
url = build_url(args)
xbmcplugin.addDirectoryItem(handle, url, xbmcgui.ListItem("Load all"), isFolder=1)
elif level == 2:
children = data0
if year is None:
filtered = [child for child in children if child.name == name]
years = [x.year for x in filtered]
years = set(years)
years = sorted(years, reverse=True)
#debug('# children by name' + repr(len(years)))
for year in years:
displayYear = str(year or 'Other')
args.update({'year': year or -1})
url = build_url(args)
xbmcplugin.addDirectoryItem(handle, url, xbmcgui.ListItem(displayYear), isFolder=1)
else:
filtered = [child for child in children if child.year == year]
sortedByName = sorted(filtered, cmp=by_name)
#debug('# children by year' + repr(len(sortedByName)))
for child in sortedByName:
args.update({'name': child.name})
url = build_url(args)
xbmcplugin.addDirectoryItem(handle, url, xbmcgui.ListItem(child.name), isFolder=1)
elif level == 3:
item = None
for child in data0:
if child.name == name and child.year == year:
item = child
break
if item:
children = item.children
data = pickle.dumps(children)
if compress:
data = zlib.compress(data)
args.update({'data1': data})
for child in children:
args.update({'stage_name': child.name})
url = build_url(args)
xbmcplugin.addDirectoryItem(handle, url, xbmcgui.ListItem(child.name), isFolder=1)
elif level == 4:
item = None
for child in data1:
if child.name == stage_name:
item = child
break
if item:
vods = item.children
for vod in vods:
url = vod.url
debug('vod url' + repr(url))
if not url: # match didn't take place
continue
plugin_url = get_youtube_plugin_url(url) or get_twitch_plugin_url(url)
debug('plugin url:' + repr(plugin_url))
if not plugin_url: # couldn't resolve vod url
continue
label = 'Match ' + str(vod.match_number)
if revealMatches:
if len(vod.side2):
label += u' {} - {}'.format(vod.side1, vod.side2)
else:
label += ' ' + vod.side1
xbmcplugin.addDirectoryItem(handle, plugin_url, xbmcgui.ListItem(label), False)
xbmcplugin.endOfDirectory(handle)
def play(url, args):
time = args.get('time', None)
# BROKEN URL RESOLVER
# media_url = urlresolver.resolve('https://www.youtube.com/watch?v=7OXVPgu6urw')
# # Create a playable item with a path to play.
# play_item = xbmcgui.ListItem(path=url)
# play_item.setProperty('StartOffset', time)
# # Pass the item to the Kodi player.
# xbmcplugin.setResolvedUrl(handle, True, listitem=play_item)
# return
# stop whatever is playing
player = xbmc.Player()
player.stop()
# launch youtube plugin
xbmc.executebuiltin('PlayMedia({})'.format(url))
# seek?
if time:
delay = 5
try:
delay = int(addon.getSetting('youtube_seek_delay_s'))
except:
pass
timeout = 20
try:
timeout = int(addon.getSetting('youtube_seek_delay_s'))
except:
pass
# xbmcgui.Dialog().ok(addonname, "have time: " + time)
# wait for playback
if timeout > 0:
for i in range(0, timeout):
if player.isPlaying():
debug('player is playing')
break
xbmc.sleep(1000)
# seek
if player.isPlaying() and delay > 0:
xbmc.sleep(delay * 1000)
player.seekTime(int(time))
__run = 0
try:
url = args.get('play', '')
if url:
play(url, args)
else:
build()
debug("run: " + repr(__run))
__run += 1
except Exception as e:
debug(u'Exception: ' + str(e))
map(debug, str(traceback.format_exc()).splitlines())
```
|
{
"source": "jgresty/snyk-threadfix",
"score": 3
}
|
#### File: snyk-threadfix/snyk_threadfix/utils.py
```python
import json
import os
import sys
from pathlib import Path
import requests
def get_default_token_path():
home = str(Path.home())
default_token_path = "%s/.config/configstore/snyk.json" % home
return default_token_path
def get_token_from_file(token_file_path):
path = token_file_path
with open(path, "r") as f:
json_obj = json.load(f)
token = json_obj["api"]
return token
def get_token_by_env_var():
return os.environ.get("SNYK_TOKEN")
def get_token():
t = get_token_by_env_var()
if not t:
token_file_path = get_default_token_path()
t = get_token_from_file(token_file_path)
return t
def get_snyk_api_headers(snyk_token):
snyk_api_headers = {"Authorization": "token %s" % snyk_token}
return snyk_api_headers
def validate_token(snyk_token):
h = get_snyk_api_headers(snyk_token)
full_api_url = "https://snyk.io/api/v1/"
resp = requests.get(full_api_url, headers=h)
return resp.ok
```
#### File: snyk-threadfix/tests/test_tokenparsing.py
```python
import json
import tempfile
import pytest
import requests_mock
from mock import patch
from snyk_threadfix import main, utils
def test_get_token_from_file_fails_if_token_file_not_found():
with pytest.raises(FileNotFoundError) as pytest_wrapped_exception:
t = utils.get_token_from_file("/some/path/that/does/not/exist/snyk.json")
assert pytest_wrapped_exception.type == FileNotFoundError
assert pytest_wrapped_exception.value.args[1] == "No such file or directory"
def test_get_token_from_file_fails_if_token_file_cant_be_parsed():
"""Build a temp file with an invalid spec and make sure it fails"""
obj_token_json = {"some-invalid-key": "test-token"}
with tempfile.NamedTemporaryFile() as temp_token_file:
with open(temp_token_file.name, "w") as temp_token_file_write:
json.dump(obj_token_json, temp_token_file_write, indent=2)
with pytest.raises(KeyError) as pytest_wrapped_exception:
temp_filename = temp_token_file.name
returned_token = utils.get_token_from_file(temp_filename)
assert pytest_wrapped_exception.type == KeyError
assert pytest_wrapped_exception.value.args[0] == "api"
def test_get_token_works_with_well_formed_token_file():
obj_token_json = {"api": "test-token"}
with tempfile.NamedTemporaryFile() as temp_token_file:
with open(temp_token_file.name, "w") as temp_token_file_write:
json.dump(obj_token_json, temp_token_file_write, indent=2)
temp_filename = temp_token_file.name
returned_token = utils.get_token_from_file(temp_filename)
assert returned_token == "test-token"
def test_snyk_auth_header_is_correct():
token = "<PASSWORD>"
auth_headers = utils.get_snyk_api_headers(token)
assert auth_headers["Authorization"] == "token test-token"
def test_main_fails_if_token_not_set_in_file_or_env_var(monkeypatch, capsys):
monkeypatch.delenv(
"SNYK_TOKEN", raising=False
) # raising=True means raise an exception if this var doesn't exist to delete
t = utils.get_token_by_env_var() # should return None if not set
assert t is None
with patch(
"snyk_threadfix.utils.get_default_token_path",
return_value="/some/path/that/does/not/exist/snyk.json",
):
with pytest.raises(SystemExit) as pytest_wrapped_exception:
main.main(["--org-id", "abc123", "--project-ids", "123"])
captured_out = capsys.readouterr()
assert (
"Error fetching Snyk token. Set SNYK_TOKEN env var or run `snyk auth <your-token>` (see https://github.com/snyk/snyk#installation)."
in captured_out.err
)
assert pytest_wrapped_exception.type == SystemExit
def test_main_fails_if_token_file_cant_be_parsed(monkeypatch, capsys):
"""Build a temp file with an invalid spec and make the main fails properly"""
# make sure SNYK_TOKEN appears to not be set for this test (in case this test is run in an env where it is set)
monkeypatch.delenv(
"SNYK_TOKEN", raising=False
) # raising=True means raise an exception if this var doesn't exist to delete
t = utils.get_token_by_env_var() # should return None if not set
assert t is None
obj_token_json = {"some-invalid-key": "test-token"}
with tempfile.NamedTemporaryFile() as temp_token_file:
with open(temp_token_file.name, "w") as temp_token_file_write:
json.dump(obj_token_json, temp_token_file_write, indent=2)
with patch(
"snyk_threadfix.utils.get_default_token_path",
return_value=temp_token_file.name,
):
with pytest.raises(SystemExit) as pytest_wrapped_exception:
main.main(["--org-id", "abc123", "--project-ids", "123"])
captured_out = capsys.readouterr()
assert (
"Error fetching Snyk token. Set SNYK_TOKEN env var or run `snyk auth <your-token>` (see https://github.com/snyk/snyk#installation)."
in captured_out.err
)
assert pytest_wrapped_exception.type == SystemExit
def test_validate_token():
with requests_mock.mock() as m:
m.get(
"https://snyk.io/api/v1/",
status_code=200,
text='{"what orgs can the current token access?":"https://snyk.io/api/v1/orgs","what projects are owned by this org?":"https://snyk.io/api/v1/org/:id/projects","test a package for issues":"https://snyk.io/api/v1/test/:packageManager/:packageName/:packageVersion"}',
)
is_valid = utils.validate_token("test-token")
assert is_valid
def test_validate_token_fails_for_invalid_token():
with requests_mock.mock() as m:
m.get("https://snyk.io/api/v1/", status_code=401)
is_valid = utils.validate_token("test-token")
assert not is_valid
def test_main_fails_if_validate_token_fails():
with patch("snyk_threadfix.utils.get_token_from_file", return_value="test-token"):
with patch("snyk_threadfix.main.validate_token", return_value=False):
with pytest.raises(Exception) as pytest_wrapped_exception:
main.main(["--org-id", "abc123", "--project-ids", "123"])
assert pytest_wrapped_exception.type == main.SnykTokenInvalidError
def test_get_token_by_env_var_works(monkeypatch):
monkeypatch.setenv("SNYK_TOKEN", "SOME_TOKEN", prepend=False)
t = utils.get_token_by_env_var()
assert t == "SOME_TOKEN"
monkeypatch.delenv("SNYK_TOKEN", raising=True)
t = utils.get_token_by_env_var() # should return None if not set
assert t is None
def test_verify_token_comes_from_env_var_rather_than_file_if_both_set(monkeypatch):
with patch(
"snyk_threadfix.utils.get_token_from_file", return_value="token-from-file"
) as get_token_from_file_mock:
monkeypatch.setenv("SNYK_TOKEN", "SOME_TOKEN_FROM_ENV_VAR", prepend=False)
t = utils.get_token()
assert t == "SOME_TOKEN_FROM_ENV_VAR"
assert get_token_from_file_mock.call_count == 0
```
|
{
"source": "jgresula/jagpdf",
"score": 3
}
|
#### File: apitest/py/annots.py
```python
import jagpdf
import sys
import os
import jag.testlib as testlib
g_font = testlib.EasyFontTTF()
#raw_input("attach")
def do_some(doc):
doc.page_start(5.9*72, 5.9*72)
page=doc.page().canvas()
page.color("fs", 0.75)
page.rectangle(20, 20, 72, 72)
page.path_paint('s')
page.text_font(g_font(8))
page.text(25, 57, "a gray rectangle")
page.text(25, 45, "www.boost.org")
doc.page().annotation_uri(20, 20, 72, 72, "http://www.boost.org")
doc.page_end()
def do_goto_each_other(doc):
def page_vis(this,other):
page=doc.page().canvas()
page.text_font(g_font(8))
page.color("fs", 0.75)
page.rectangle(20, 20, 72, 72)
page.path_paint('s')
page.text(25, 57, "This is Page *%d*" % this)
page.text(25, 45, "Click for page %d" % other)
return page
dest_1st = doc.destination_reserve()
page_nr = doc.page_number()
doc.page_start(5.9*72, 5.9*72)
dest_1st_direct = doc.destination_define("mode=XYZ") #should get page automatically
dest_2nd = doc.destination_reserve()
page = page_vis(page_nr+1, page_nr+2)
doc.page().annotation_goto(20, 20, 72, 72, dest_2nd)
doc.page_end()
doc.page_start(5.9*72, 5.9*72)
page = page_vis(page_nr+2, page_nr+1)
doc.page().annotation_goto(20, 20, 72, 72, dest_1st)
doc.page_end()
doc.page_start(5.9*72, 5.9*72)
page = page_vis(page_nr+3, page_nr+1)
doc.page().annotation_goto(20, 20, 72, 72, dest_1st_direct)
doc.page_end()
doc.destination_define_reserved(dest_1st, "page=%d;mode=XYZ" % page_nr)
doc.destination_define_reserved(dest_2nd, "page=%d;mode=XYZ" % (page_nr+1))
def do_outline_and_goto(doc):
def make_goto(x, y, dest, txt):
page.color("fs", 0.75)
page.rectangle(x, y, 36, 36)
page.path_paint('s')
page.text(x+10, y+10, txt)
doc.page().annotation_goto(x, y, 36, 36, dest)
page_nr = doc.page_number()
p25 = doc.destination_define("mode=XYZ;zoom=0.25")
doc.page_start(5.9*72, 5.9*72)
page = doc.page().canvas()
p50 = doc.destination_define("mode=XYZ;zoom=0.50")
p75 = doc.destination_reserve()
doc.outline().item("Page %d" % (page_nr+1))
doc.outline().level_down()
doc.outline().item("zoom 25%", p25)
doc.outline().item("zoom 50%", p50)
doc.outline().item("zoom 75%", p75)
doc.outline().level_up()
page.text_font(g_font(8))
make_goto(20, 20, p25, "25%")
make_goto(20, 60, p50, "50%")
make_goto(20, 100, p75, "75%")
doc.page_end()
doc.destination_define_reserved(p75, "page=%d;mode=XYZ;zoom=0.75" % page_nr)
def do_goto_single_page(doc):
pheight = 5.9*72
doc.page_start(5.9*72, pheight)
page = doc.page().canvas()
page.color("fs", 0.75)
page.rectangle(20, 20, 72, 72)
page.rectangle(20, pheight-92, 72, 72)
page.path_paint('s')
page.text_font(g_font(8))
page.text(25, 57, "click to zoom")
page.text(25, 45, "to 200%")
page.text(25, pheight-47, "click to fit height")
doc.page().annotation_goto(20, pheight-92, 72, 72, "page=%d;mode=FitV" % doc.page_number())
doc.page().annotation_goto(20, 20, 72, 72, "page=%d;mode=XYZ;left=0;top=%lf;zoom=2" % (doc.page_number(),pheight))
doc.page_end()
def test_main(argv=None):
cfg = testlib.test_config()
#cfg.set("doc.trace_level", "5")
doc = testlib.create_test_doc(argv, 'annots.pdf', cfg)
g_font.set_writer(doc)
do_some(doc)
do_goto_single_page(doc)
do_goto_each_other(doc)
do_outline_and_goto(doc)
doc.finalize()
if __name__ == "__main__":
test_main()
```
#### File: apitest/py/colorspaces.py
```python
import jagpdf
import os
import sys
import jag.testlib as testlib
## ===============================================
## GENERAL
## ===============================================
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
class Bind:
def __init__(self, fn, *args):
self.args = args
self.fn = fn
def __call__(self, *args):
return self.fn(*(self.args + args))
g_font = testlib.EasyFont()
def draw_rect_grid(page, spec, it):
# spec.x0,y0,w,h,nx,ny,step_x, step_y
for yc in range(spec.num_y):
for xc in range(spec.num_x):
x = spec.x0 + xc*spec.w + xc*spec.step_x
y = spec.y0 + yc*spec.h + yc*spec.step_y
it(xc,yc)
page.rectangle(x, y, spec.w, spec.h)
page.path_paint("fs")
def grid1(page, rng, x, y):
page.color("fs", float(x)/rng)
def grid3(page, rng, fix, x, y):
if fix == 0:
page.color("fs", float(x)/rng, float(y)/rng, 0.0)
elif fix == 1:
page.color("fs", float(x)/rng, 0.0, float(y)/rng)
elif fix == 2:
page.color("fs", 0.0, float(x)/rng, float(y)/rng)
else:
assert(0)
def grid4(page, rng, fix, x, y):
if fix == 0:
page.color("fs", float(x)/rng, float(y)/rng, 0.0, 0.0)
elif fix == 1:
page.color("fs", float(x)/rng, 0.0, float(y)/rng, 0.0)
elif fix == 2:
page.color("fs", 0.0, float(x)/rng, float(y)/rng, 0.0)
else:
assert(0)
## ===============================================
## DEVICE SPACES
## ===============================================
def do_device_spaces(writer):
writer.page_start(5.9*72, 6.2*72)
page = writer.page().canvas()
page.text_font(g_font(12))
page.text(20, 20, "DeviceGray")
page.state_save()
page.color_space("fs", jagpdf.CS_DEVICE_GRAY)
spec = Bunch(x0=20, y0=40, w=13, h=13, num_x=24, num_y=1, step_x=2, step_y=2)
draw_rect_grid(page, spec, Bind(grid1,page,spec.num_x))
page.state_restore()
page.text(20, 70, "DeviceRGB")
page.state_save()
page.color_space("fs", jagpdf.CS_DEVICE_RGB)
spec = Bunch(x0=20, y0=85, w=7, h=7, num_x=14, num_y=14, step_x=2, step_y=2)
draw_rect_grid(page, spec, Bind(grid3, page, spec.num_x, 0))
spec.x0 = 2.1*72
draw_rect_grid(page, spec, Bind(grid3, page, spec.num_x, 1))
spec.x0 = 3.9*72
draw_rect_grid(page, spec, Bind(grid3, page, spec.num_x, 2))
page.state_restore()
page.text(20, 230, "DeviceCMYK")
page.state_save()
page.color_space("fs", jagpdf.CS_DEVICE_CMYK)
spec = Bunch(x0=20, y0=250, w=7, h=7, num_x=14, num_y=14, step_x=2, step_y=2)
draw_rect_grid(page, spec, Bind(grid4, page, spec.num_x, 0))
spec.x0 = 2.1*72
draw_rect_grid(page, spec, Bind(grid4, page, spec.num_x, 1))
spec.x0 = 3.9*72
draw_rect_grid(page, spec, Bind(grid4, page, spec.num_x, 2))
page.state_restore()
page.text_font(g_font(14))
page.text(20, 410, "Device Color Spaces")
writer.page_end()
## ===============================================
## LAB SPACES
## ===============================================
def def_calrgb(writer, gamma=None, mtx=""):
calrgb = "calrgb;white=0.9505, 1.0890"
if gamma:
calrgb += ";gamma=%.3f, %.3f, %.3f" % (gamma, gamma, gamma)
if mtx:
calrgb += ";matrix=0.4497, 0.2446, 0.0252, 0.3163, 0.6720, 0.1412, 0.1845, 0.0833, 0.9227"
return writer.color_space_load(calrgb), gamma and gamma or "default", mtx
def lab_grid3(page, rng, L, x, y):
x0 = ((float(x)/rng)-0.5)*200
y0 = ((float(y)/rng)-0.5)*200
page.color("fs", L, x0, y0)
def do_cie_spaces(writer):
writer.page_start(5.9*72, 11.3*72)
page = writer.page().canvas()
page.text_font(g_font(12))
curr_y = 20
def def_calgray(gamma=None):
spec = "calgray; white=0.9505, 1.0890"
if gamma:
spec += ';gamma=%.3f' % gamma
return writer.color_space_load(spec), gamma and gamma or "default"
for cs_id, gamma in [def_calgray(), def_calgray(2.2)]:
page.text(20, curr_y, "CalGray - gamma " + str(gamma))
curr_y += 15
page.state_save()
page.color_space("fs", cs_id)
spec = Bunch(x0=20, y0=curr_y, w=13, h=13, num_x=24, num_y=1, step_x=2, step_y=2)
draw_rect_grid(page, spec, Bind(grid1,page,spec.num_x))
page.state_restore()
curr_y += 35
page.text(20, curr_y, "CIE Lab")
page.text(110, curr_y, "L=25")
page.text(240, curr_y, "L=75")
page.text(370, curr_y, "L=100")
curr_y += 15
cielab = "cielab;white=0.9505, 1.0890"
cielabid = writer.color_space_load(cielab)
page.state_save()
page.color_space("fs", cielabid)
spec = Bunch(x0=20, y0=curr_y, w=7, h=7, num_x=14, num_y=14, step_x=2, step_y=2)
draw_rect_grid(page, spec, Bind(lab_grid3, page, spec.num_x, 25))
spec.x0 = 2.1*72
draw_rect_grid(page, spec, Bind(lab_grid3, page, spec.num_x, 75))
spec.x0 = 3.9*72
draw_rect_grid(page, spec, Bind(lab_grid3, page, spec.num_x, 100))
page.state_restore()
curr_y += 145
for cs_id, gamma, mtx, in [def_calrgb(writer), def_calrgb(writer,2.2), def_calrgb(writer,1.8, ' - transformed')]:
page.text(20, curr_y, "CalRGB - gamma " + str(gamma) + str(mtx))
curr_y += 15
page.state_save()
page.color_space("fs", cs_id)
spec = Bunch(x0=20, y0=curr_y, w=7, h=7, num_x=14, num_y=14, step_x=2, step_y=2)
draw_rect_grid(page, spec, Bind(grid3, page, spec.num_x, 0))
spec.x0 = 2.1*72
draw_rect_grid(page, spec, Bind(grid3, page, spec.num_x, 1))
spec.x0 = 3.9*72
draw_rect_grid(page, spec, Bind(grid3, page, spec.num_x, 2))
page.state_restore()
curr_y += 140
curr_y += 20
page.text_font(g_font(14))
page.text(20, curr_y, "CIE Color Spaces - white point [0.9505, 0.0, 1.0890]")
writer.page_end()
## ===============================================
## ICC Based spaces
## ===============================================
g_res_dir = os.path.expandvars('${JAG_TEST_RESOURCES_DIR}/icc')
g_icc_files = ['AdobeRGB1998.icc',\
"sRGB Color Space Profile.icm",\
'WideGamutRGB.icc',\
'AppleRGB.icc',\
'CIERGB.icc',\
'ColorMatchRGB.icc']
g_iccs = [os.path.join(g_res_dir, icc) for icc in g_icc_files]
def def_iccbased(writer, icc_file):
spec = "icc; components=3; profile=" + icc_file
# alternate is ignored
return writer.color_space_load(spec), os.path.basename(icc_file)
def do_iccbased_spaces(writer):
writer.page_start(5.9*72, 14*72)
page = writer.page().canvas()
page.text_font(g_font(12))
curr_y = 20
for cs_id, desc in [def_iccbased(writer,d) for d in g_iccs]:
page.text(20, curr_y, desc)
curr_y += 15
page.state_save()
page.color_space("fs", cs_id)
spec = Bunch(x0=20, y0=curr_y, w=7, h=7, num_x=14, num_y=14, step_x=2, step_y=2)
draw_rect_grid(page, spec, Bind(grid3, page, spec.num_x, 0))
spec.x0 = 2.1*72
draw_rect_grid(page, spec, Bind(grid3, page, spec.num_x, 1))
spec.x0 = 3.9*72
draw_rect_grid(page, spec, Bind(grid3, page, spec.num_x, 2))
page.state_restore()
curr_y += 140
curr_y += 20
page.text_font(g_font(14))
page.text(20, curr_y, "ICC Based color spaces")
writer.page_end()
## ===============================================
## Indexed spaces
## ===============================================
def palette332():
result = []
for i in range(256):
result += int(255 * ((i&7)/7.0)),\
int(255 * (((i>>3)&7)/7.0)),\
int(255 * (((i>>6)&3)/3.0))
return result
def palette431():
result = []
for i in range(256):
result += int(255 * ((i&15)/15.0)),\
int(255 * (((i>>4)&7)/7.0)),\
int(255 * (((i>>7)&1)/1.0))
return result
def indexed_fn(page, x, y):
page.color("fs", y*16+x)
def do_indexed_spaces(writer):
writer.page_start(5.9*72, 7.5*72)
page = writer.page().canvas()
page.text_font(g_font(12))
curr_y = 20
curr_x = 20
def def_indexed(csid, pal):
palette = "by-id; id=%d; palette=%s" % (csid,
','.join([str(c) for c in pal]))
return writer.color_space_load(palette)
calrgb_id = def_calrgb(writer,2.2)[0]
icc_id = def_iccbased(writer,g_iccs[1])[0] #sRGB
for cs_id, desc in [(def_indexed(jagpdf.CS_DEVICE_RGB, palette332()), "332 DeviceRGB"),\
(def_indexed(jagpdf.CS_DEVICE_RGB, palette431()), "431 DeviceRGB"),\
(def_indexed(calrgb_id, palette332()), "332 CallRGB - g2.2"),\
(def_indexed(calrgb_id, palette431()), "431 CallRGB - g2.2"),\
(def_indexed(icc_id, palette332()), "332 ICC - sRGB"),\
(def_indexed(icc_id, palette431()), "431 ICC - sRGB"),\
]:
page.text(curr_x, curr_y, "Indexed - " + desc)
page.state_save()
page.color_space("fs", cs_id)
spec = Bunch(x0=curr_x, y0=curr_y+15, w=6, h=6, num_x=16, num_y=16, step_x=2, step_y=2)
draw_rect_grid(page, spec, Bind(indexed_fn, page))
page.state_restore()
curr_x += 200
if curr_x > 350:
curr_x = 20
curr_y += 155
curr_y += 20
page.text_font(g_font(14))
page.text(20, curr_y, "Indexed color spaces")
writer.page_end()
def test_main(argv=None):
writer = testlib.create_test_doc(argv, 'colorspaces.pdf')
g_font.set_writer(writer)
do_iccbased_spaces(writer)
do_cie_spaces(writer)
do_device_spaces(writer)
do_indexed_spaces(writer)
writer.finalize()
if __name__ == '__main__':
test_main()
```
#### File: apitest/py/colorstate.py
```python
import jagpdf
import os
import jag.testlib as testlib
import sys
def do_page(writer):
writer.page_start(3*72, 80)
page = writer.page().canvas()
font = testlib.EasyFont(writer)
page.text_font(font())
page.color_space("f", jagpdf.CS_DEVICE_RGB)
page.color("f", .7, 0, 0)
page.text(20, 20, "This text should be red!")
page.state_save()
page.color_space("f", jagpdf.CS_DEVICE_CMYK)
page.color("f", 1, 1, 0, .45)
page.text(20, 40, "This text should be dark blue!")
page.state_restore()
page.text(20, 60, "This text should be red again!")
writer.page_end()
def test_main(argv=None):
doc_writer = testlib.create_test_doc(argv, 'colorstate.pdf')
do_page(doc_writer)
doc_writer.finalize()
if __name__ == '__main__':
test_main()
```
#### File: apitest/py/cubespng.py
```python
import jagpdf
import jag.testlib as testlib
import os
g_png_dir = os.path.expandvars('${JAG_TEST_RESOURCES_DIR}/images/')
media = 400, 400
def get_pattern(doc):
pcell = doc.canvas_create()
pcell.color_space('f', jagpdf.CS_DEVICE_GRAY)
pcell.color('f', 0)
pcell.rectangle(0, 0, 10, 10)
pcell.rectangle(10, 10, 10, 10)
pcell.path_paint('f')
return doc.tiling_pattern_load('step=20, 20', pcell)
def image_pos(img):
img_width = img.width() / img.dpi_x() * 72
img_height = img.height() / img.dpi_y() * 72
x = (media[0]-img_width) / 2
y = (media[1]-img_height) / 2
return x, y
def test_main(argv=None):
cfg = jagpdf.create_profile()
cfg.set("doc.compressed", "1")
doc = testlib.create_test_doc(argv, 'transparent_cubes.pdf', cfg)
img = doc.image_load_file(os.path.join(g_png_dir, 'cubes_transparent.png'))
doc.page_start(*media)
canvas = doc.page().canvas()
canvas.color_space_pattern('f')
canvas.pattern('f', get_pattern(doc))
canvas.rectangle(0, 0, *media)
canvas.path_paint('f')
canvas.image(img, *image_pos(img))
doc.page_end()
doc.finalize()
if __name__ == '__main__':
test_main()
```
#### File: apitest/py/customimage.py
```python
import os
import jag.imagemanip as imagemanip
import jag.testlib as testlib
import jagpdf
import tempfile
import sys
import md5
#mask interpolation - it seems the it does not have any effect
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def add(self, **kwds):
self.__dict__.update(kwds)
return self
g_font = testlib.EasyFont()
g_img_dim = 72, 72
g_alt_img_dim = 67, 59
g_temp_files = testlib.TemporaryFiles()
def prepare_page(doc, dim):
doc.page_start(*dim)
page = doc.page().canvas()
testlib.do_background(page, (0.6, 0.3, 0.2), dim, 5)
page.color_space("fs", jagpdf.CS_DEVICE_RGB)
page.color("fs", 1.0, 1.0, 1.0)
page.text_font(g_font(6))
grid = imagemanip.grid_coords(3, 80)
placer = imagemanip.ImagePlacer(doc, page, 20, 20, 95, 10)
return page, grid, placer
def default_cfg():
return Bunch(img_dim=g_img_dim, spec_fn=None, bits_a = [16, 8, 4, 2, 1], dpi=(72,72), from_file=False)
g_img_cache = imagemanip.img_cache()
def get_img_id(doc, image, bpc, nr_channels, cs, p = default_cfg()):
spec = doc.image_definition()
spec.dimensions(*p.img_dim)
spec.bits_per_component(bpc)
spec.dpi(*p.dpi)
spec.color_space(cs)
spec.format(jagpdf.IMAGE_FORMAT_NATIVE)
img_data = g_img_cache.img_data(image, bpc, nr_channels, *p.img_dim)
if p.from_file:
handle, tmp_file = tempfile.mkstemp()
img_data.tofile(open(tmp_file,'wb'))
os.close(handle)
g_temp_files.add(tmp_file)
if p.from_file:
spec.file_name(tmp_file)
else:
spec.data(img_data)
desc = "%d levels per channel (%d %s)" % (2**bpc, bpc, bpc > 1 and "bits" or "bit")
if p.spec_fn:
desc2 = p.spec_fn(spec)
if desc2:
desc = desc2
id_ = doc.image_load(spec)
testlib.must_throw(doc.image_load, spec) # cannot load the same spec twice
return id_, desc
def do_generic(doc, image, cs, nr_channels, title, p = default_cfg()):
page_dim = 4.8*72, 3.8*72
page, grid, placer = prepare_page(doc, page_dim)
for bpc in p.bits_a:
img, desc = get_img_id(doc, image, bpc, nr_channels, cs, p)
placer(img, desc, *grid.next())
page.text_font(g_font(14))
page.text(20, page_dim[1] - 30, title)
doc.page_end()
###########################################################################
LineX_d = imagemanip.image(imagemanip.LineX, *g_img_dim)
LineY_d = imagemanip.image(imagemanip.LineY, *g_img_dim)
InvertedEllipseC_d = imagemanip.image(imagemanip.InvertedEllipseC, *g_img_dim)
Rhomboid_d = imagemanip.image(imagemanip.Rhomboid, *g_img_dim)
Cross_d = imagemanip.image(imagemanip.Cross, *g_img_dim)
Checkboard_d = [(x%4 in [0,1] and y%4 in [0,1]) and 1 or 0 for x,y in imagemanip.grid_coords(*g_img_dim)]
LineX_d_alt = imagemanip.image(imagemanip.LineX, *g_alt_img_dim)
LineY_d_alt = imagemanip.image(imagemanip.LineY, *g_alt_img_dim)
InvertedEllipseC_d_alt = imagemanip.image(imagemanip.InvertedEllipseC, *g_alt_img_dim)
Rhomboid_d_alt = imagemanip.image(imagemanip.Rhomboid, *g_alt_img_dim)
Cross_d_alt = imagemanip.image(imagemanip.Cross, *g_alt_img_dim)
#odd mask goes through file, even through memory
class HardMask:
def set_doc(self, doc):
self.dim = 64, 64
self.doc = doc
self.registry = {}
generic = imagemanip.image(imagemanip.InvertedEllipseC, *self.dim)
self.mask_data = imagemanip.pack_bits(generic, 1, 1, *self.dim)
self.req_nr = 0
self.test_errors()
def test_errors(self):
spec = self.doc.define_image_mask()
testlib.must_throw(self.doc.register_image_mask, spec)
spec.dimensions(*self.dim)
testlib.must_throw(self.doc.register_image_mask, spec)
def id(self, interpolate, reverse):
key = (interpolate, reverse)
if key in self.registry:
return self.registry[key]
else:
self.req_nr += 1
spec = self.doc.define_image_mask()
spec.dimensions(*self.dim)
spec.interpolate(interpolate)
spec.bit_depth(1)
if self.req_nr%2:
handle, tmp_file = tempfile.mkstemp()
self.mask_data.tofile(open(tmp_file,'wb'))
os.close(handle)
g_temp_files.add(tmp_file)
spec.file_name(tmp_file)
else:
spec.data(self.mask_data)
if reverse:
pass
#spec.reverse()
id = self.doc.register_image_mask(spec)
self.registry[key] = id
return id
g_hardmask = HardMask()
###########################################################################
class hard_mask_fn:
def __init__(self):
self.val = [(0,1), (1,1), (0,0), (1,0)]
def __call__(self, spec):
val = self.val.pop()
spec.image_mask(g_hardmask.id(*val))
return "interpolate .. %s, reverse %s" % (val[0] and "yes" or "no", val[1] and "yes" or "no")
class gamma_fn:
def __init__(self):
self.val = [1.0, 1.4, 1.8, 2.2, 2.6, 3.0]
def __call__(self, spec):
val = self.val.pop()
spec.gamma(val)
return 'gamma ' + str(val)
class decode_fn:
def __init__(self, channels):
self.val = [(0, 1), (1, 0), (0, 0.5), (0.5, 1), (0.25, 0.75), (0.4, 0.6)]
self.channels = channels
def __call__(self, spec):
val = self.val.pop()
spec.decode(self.channels*val)
return 'decode ' + ("[%2f, %2f]" % val)
class alternate_fn:
def __init__(self, doc, img_id):
self.val = [img_id, None]
self.supported = doc.version() >= 3
def __call__(self, spec):
val = self.val.pop()
if val:
if self.supported:
spec.alternate_for_printing(val)
return 'alternated'
else:
return 'not alternated'
class rendering_intent_fn:
def __init__(self):
self.val = [None,\
"RI_ABSOLUTE_COLORIMETRIC",\
"RI_RELATIVE_COLORIMETRIC",\
"RI_SATURATION",\
"RI_PERCEPTUAL"]
def __call__(self, spec):
val = self.val.pop()
if val:
spec.rendering_intent(getattr(jagpdf, val))
return val[3:]
else:
return 'default'
class interpolate_fn:
def __init__(self):
self.val = [0, 1]
def __call__(self, spec):
val = self.val.pop()
spec.interpolate(val)
return val and "interpolated" or "not interpolated"
class color_key_mask8_fn:
def __init__(self, channels):
self.val = [None, (0,127), (127,255), (64,192), (96,160), (32,224)]
self.ch = channels
def __call__(self, spec):
val = self.val.pop()
if val:
spec.color_key_mask(self.ch*val)
return "<%.2f, %.2f>" % (val[0]/255.0, val[1]/255.0)
else:
return "not-masked"
# return Bunch(img_dim=g_img_dim, spec_fn=None, bits_a = [16, 8, 4, 2, 1])
def do_grayscale(doc):
for idata in ["LineX", "Cross", "InvertedEllipseC", "Rhomboid"]:
do_generic(doc, globals()[idata+'_d'], jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - " + idata)
cfg = default_cfg().add(from_file=True)
do_generic(doc, LineX_d, jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - LineX (from file)", cfg)
cfg = default_cfg().add(img_dim = g_alt_img_dim)
do_generic(doc, LineY_d_alt, jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - %dx%d" % g_alt_img_dim, cfg)
cfg = default_cfg().add(bits_a = 6*[16], spec_fn = gamma_fn())
do_generic(doc, LineY_d, jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - gamma", cfg)
cfg = default_cfg().add(dpi=(144,144))
do_generic(doc, LineY_d, jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - 144 dpi", cfg)
cfg = default_cfg().add(bits_a = 6*[16], spec_fn = decode_fn(1))
do_generic(doc, Cross_d, jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - decode", cfg)
alt_img_id, desc = get_img_id(doc, Cross_d, 16, 1, jagpdf.CS_DEVICE_GRAY)
cfg = default_cfg().add(bits_a = 2*[16], spec_fn = alternate_fn(doc, alt_img_id))
do_generic(doc, LineX_d, jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - alternate for print", cfg)
Cross_d_low = imagemanip.image(imagemanip.Cross, 18, 18)
cfg = default_cfg().add(bits_a = 2*[16], dpi=(18,18), spec_fn = interpolate_fn(), img_dim = (18,18))
do_generic(doc, Cross_d_low, jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - interpolate", cfg)
cfg = default_cfg().add(bits_a = 6*[8], spec_fn = color_key_mask8_fn(1))
do_generic(doc, LineX_d, jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - color key mask", cfg)
cfg = default_cfg().add(bits_a = 4*[8], spec_fn = hard_mask_fn())
do_generic(doc, Cross_d, jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - hard mask", cfg)
def do_rgb(doc):
channels = [LineX_d, LineY_d, Rhomboid_d]
generic_image = imagemanip.interleave_channels(*channels)
do_generic(doc, generic_image, jagpdf.CS_DEVICE_RGB, 3, "RGB")
cfg = default_cfg().add(bits_a = 5*[16], spec_fn = rendering_intent_fn())
channels = [LineX_d, LineY_d, Rhomboid_d]
generic_image = imagemanip.interleave_channels(*channels)
do_generic(doc, generic_image, jagpdf.CS_DEVICE_RGB, 3, "RGB - rendering intent", cfg)
cfg = default_cfg().add(img_dim = g_alt_img_dim)
channels = [LineX_d_alt, LineY_d_alt, Rhomboid_d_alt]
generic_image2 = imagemanip.interleave_channels(*channels)
do_generic(doc, generic_image2, jagpdf.CS_DEVICE_RGB, 3, "RGB - %dx%d" % g_alt_img_dim, cfg)
cfg = default_cfg().add(bits_a = 6*[8], spec_fn = color_key_mask8_fn(3))
do_generic(doc, generic_image, jagpdf.CS_DEVICE_RGB, 3, "RGB - color key mask", cfg)
cfg = default_cfg().add(bits_a = 4*[8], spec_fn = hard_mask_fn())
do_generic(doc, generic_image, jagpdf.CS_DEVICE_RGB, 3, "RGB - hard mask", cfg)
def do_cmyk(doc):
channels = [LineX_d, LineY_d, Rhomboid_d, InvertedEllipseC_d]
generic_image = imagemanip.interleave_channels(*channels)
do_generic(doc, generic_image, jagpdf.CS_DEVICE_CMYK, 4, "CMYK")
cfg = default_cfg().add(img_dim = g_alt_img_dim)
channels = [LineX_d_alt, LineY_d_alt, Rhomboid_d_alt, InvertedEllipseC_d_alt]
generic_image2 = imagemanip.interleave_channels(*channels)
do_generic(doc, generic_image2, jagpdf.CS_DEVICE_CMYK, 4, "CMYK - %dx%d" % g_alt_img_dim, cfg)
cfg = default_cfg().add(bits_a = 6*[8], spec_fn = color_key_mask8_fn(4))
do_generic(doc, generic_image, jagpdf.CS_DEVICE_CMYK, 4, "CMYK - color key mask", cfg)
cfg = default_cfg().add(bits_a = 4*[8], spec_fn = hard_mask_fn())
do_generic(doc, generic_image, jagpdf.CS_DEVICE_CMYK, 4, "CMYK - hard mask", cfg)
def do_cielab(doc):
channels = [g_img_dim[0]*g_img_dim[1]*[0.5], LineX_d, LineY_d]
image = imagemanip.interleave_channels(*channels)
do_generic(doc, image, doc.color_space_load('cielab; white=0.9505, 1.089'), 3, "CIE Lab")
def do_indexed(doc):
palette = [str(v) for v in range(256)]
cfg = default_cfg().add(bits_a = 1*[8])
do_generic(doc, LineX_d, doc.color_space_load('gray; palette=' + ','.join(palette)), 1, "Palette", cfg)
def check_errors(doc):
spec = doc.image_definition()
testlib.must_throw(doc.image_load, spec)
spec.data([10,20,30,40])
testlib.must_throw(doc.image_load, spec)
spec.dimensions(2, 2)
testlib.must_throw(doc.image_load, spec)
spec.bits_per_component(8)
testlib.must_throw(doc.image_load, spec)
spec.color_space(jagpdf.CS_DEVICE_GRAY)
spec.format(jagpdf.IMAGE_FORMAT_NATIVE)
doc.image_load(spec)
spec1 = doc.image_definition()
spec1.format(jagpdf.IMAGE_FORMAT_PNG)
spec1.file_name("this_file_does_not_exist")
testlib.must_throw(doc.image_load, spec1)
def do_main(argv=None):
out_files = ["customimage15.pdf",\
"customimage14.pdf",\
"customimage13.pdf",\
"customimage12.pdf"]
# (0,2,1) - removed
for index, version, strict in [(3,2,0), (2,3,1), (1,4,1), (0,5,1)]:
if strict and version == 2:
# it seems that this test branch is flawed as the
# exceptions are raised in different places then
# originally inteded
checker = testlib.must_throw
cfg = testlib.test_config()
cfg.set("doc.version", str(version))
cfg.set("doc.strict_mode", str(strict))
doc = jagpdf.create_as_stream(testlib.NoopStreamOut(), cfg)
else:
checker = lambda fn, *args: fn(*args)
doc, cfg = testlib.get_legacy_doc(argv,
out_files[index],
{'doc.version':version,
'doc.strict_mode':strict})
g_font.set_writer(doc)
checker(g_hardmask.set_doc, doc)
checker(do_grayscale, doc)
checker(do_rgb, doc)
checker(do_cmyk, doc)
checker(do_cielab, doc)
checker(do_indexed, doc)
check_errors(doc)
doc.finalize()
def test_main(argv=None):
try:
do_main(argv)
#print g_img_cache.stats()
except:
g_temp_files.release()
raise
if __name__ == '__main__':
test_main()
# testlib.profile_test(test_main)
```
#### File: apitest/py/defaultfont2.py
```python
import jagpdf
import jag.testlib as testlib
def test_main(argv=None):
doc = testlib.create_test_doc(argv, 'defaultfont2.pdf')
doc.page_start(200, 36)
doc.page().canvas().text(10, 10, 'written in the default font')
doc.page_end()
doc.page_start(200, 48)
canvas = doc.page().canvas()
canvas.state_save()
courier = doc.font_load('standard;name=Courier;size=10')
canvas.text_font(courier)
canvas.text(10, 10, 'written in Courier')
canvas.state_restore()
doc.page().canvas().text(10, 30, 'written in the default font')
doc.page_end()
doc.page_start(200, 36)
doc.page().canvas().text(10, 10, 'written in the default font')
doc.page_end()
doc.finalize()
if __name__ == "__main__":
test_main()
```
#### File: apitest/py/docoutline.py
```python
import jagpdf
import sys
import os
import jag.testlib as testlib
#raw_input('attach')
g_font = testlib.EasyFontTTF()
g_page_height = 7*72
ITALIC = 1
BOLD = 2
def do_page(doc, title, x=None, y=None, style=None, rgb=None, text=None):
global g_page_height
outline = doc.outline()
if None==text:
text=title
doc.page_start(5.9*72, g_page_height)
if None!=style:
if doc.version() >= 4:
outline.style(style)
if None!=rgb:
if doc.version() >= 4:
outline.color(*rgb)
if None==x:
outline.item(title)
else:
outline.item(title, 'mode=XYZ; left=%f; top=%f' % (x, y))
page = doc.page().canvas()
page.text_font(g_font())
page.text(20, g_page_height/2, text)
doc.page_end()
g_page_height -= 36
def standard_usage(doc):
outline = doc.outline()
do_page(doc, 'P\xc5\x99\xc3\xadli\xc5\xa1 \xc5\xbelu\xc5\xa5ou\xc4\x8dk\xc3\xbd k\xc5\xaf\xc5\x88', text="utf16-be bookmark")
outline.state_save()
do_page(doc, '2nd page - gray', rgb=(0.5,0.5,0.5))
outline.state_restore()
do_page(doc, '3rd page - default style')
outline.level_down()
outline.state_save()
do_page(doc, '3a page - bold red', style=BOLD, rgb=(1.0,0.0,0.0))
outline.state_save()
do_page(doc, '3b page - 1/2h bold italic red', 0, g_page_height/2, style=BOLD|ITALIC)
outline.state_restore()
outline.level_down()
do_page(doc, '3b1 page - bold red')
outline.level_up()
outline.state_restore()
do_page(doc, '3c page - default style')
# the last level_up() is not called intentionally as the
# implementation is supposed to take care about that
def fault_injection():
doc = jagpdf.create_stream(testlib.NoopStreamOut())
outline = doc.outline()
testlib.must_throw(outline.item, "Invalid bookmark")
outline.state_save()
doc.page_start(5.9*72, g_page_height)
testlib.must_throw(outline.level_down)
testlib.must_throw(outline.level_up)
doc.page_end()
outline.state_restore()
testlib.must_throw(outline.state_restore)
doc.finalize()
def do_document(argv, cfg, name):
doc = testlib.create_test_doc(argv, name, cfg)
g_font.set_writer(doc)
standard_usage(doc)
doc.finalize()
def do_invalid_destinations(argv,cfg,name):
invalid_dests = [\
"zoom=1.2",
"mode=nonsense",
"mode=XYZ;zoom=onan",
"mode=FitR;left=1;top=1;bottom=1"
]
for d in invalid_dests:
doc = testlib.create_test_doc(argv, name, cfg)
doc.page_start(10.0*72, 10.0*72)
doc.outline().item("~", d)
doc.page_end()
testlib.must_throw(doc.finalize)
doc = None
syntax_err_dests = ["oom=1.2"]
doc = testlib.create_test_doc(argv, name, cfg)
doc.page_start(10.0*72, 10.0*72)
for d in syntax_err_dests:
testlib.must_throw(doc.outline().item, "~", d)
doc.page_end()
doc.finalize()
def do_generic_bookmarks(argv, cfg, name):
rl = 72
rt = 9*72
rr = 72+144
doc = testlib.create_test_doc(argv, name, cfg)
outline = doc.outline()
doc.page_start(10.0*72, 10.0*72)
page = doc.page().canvas()
page.rectangle(rl, 7*72, 144, 144)
page.path_paint('s')
page.rectangle(72+36, 7*72+36, 72, 72)
page.path_paint('s')
outline.item("Zoom 100%", "mode=XYZ;zoom=1.0")
outline.item("Zoom 250%", "mode=XYZ;zoom=2.5")
outline.item("Zoom 25%", "mode=XYZ;zoom=.25")
outline.item("Rect top-left, retain zoom", "mode=XYZ;left=%lf;top=%lf" % (rl,rt) )
outline.item("Fit width, position rectangle top", "mode=FitH;top=%lf" % rt)
outline.item("Fit width, retain y", "mode=FitH")
outline.item("Fit height, position rectangle right", "mode=FitV;left=%lf" % rr)
outline.item("Fit height, retain x", "mode=FitV")
outline.item("Fit inner rectangle",
"mode=FitR;left=%lf;top=%lf;bottom=%lf;right=%lf" %\
(72+36, 7*72+36+72, 7*72+36, 72+36+72 ))
outline.item("Fit page", "mode=Fit")
outline.item("Fit page bbox", "mode=FitB")
outline.item("Fit bbox width, retain y", "mode=FitBH")
outline.item("Fit bbox width, top 1/2 rect", "mode=FitBH;top=%lf" % (rt-72))
outline.item("Fit bbox height, retain x", "mode=FitBV")
outline.item("Fit bbox height, left 1/2 rect", "mode=FitBV;left=%lf" % (rl+72))
outline.item("", "mode=XYZ;zoom=1.5")
outline.item("^^ an empty bookmark that zooms to 150%", "mode=XYZ;zoom=1.0")
doc.page_end()
doc.finalize()
def test_main(argv=None):
cfg = testlib.test_config()
# cfg.set("doc.trace_level", "5")
# cfg.set("doc.trace_show_loc", "0")
do_invalid_destinations(argv, cfg, 'docoutline_invalid_dest.pdf')
do_generic_bookmarks(argv, cfg, 'docoutline_generic.pdf')
do_document(argv, cfg, 'docoutline.pdf')
cfg.set("doc.version", "3")
do_document(argv, cfg, 'docoutline13.pdf')
cfg.set("doc.version", "4")
cfg.set("doc.encryption", "standard")
cfg.set("info.static_producer", "1")
cfg.set("doc.static_file_id", "1")
cfg.set("info.creation_date", "0")
do_document(argv, cfg, 'docoutline_enc.pdf')
## fault injection
fault_injection()
if __name__ == "__main__":
test_main()
```
#### File: apitest/py/grstatemachine.py
```python
import jagpdf
import jag.testlib as testlib
dim = 200, 200
# tests graphics state machine
def test_main(argv=None):
doc = jagpdf.create_stream(testlib.NoopStreamOut())
doc.page_start(*dim)
canvas = doc.page().canvas()
testlib.must_throw(canvas.line_to, 20, 20)
testlib.must_throw(canvas.path_paint, 'f')
canvas.text_start(0, 0)
testlib.must_throw(canvas.text, 10, 10, "Not allowed here!")
canvas.text("This is OK")
canvas.text_end()
doc.page_end()
doc.finalize()
if __name__ == '__main__':
test_main()
```
#### File: apitest/py/img-gamma.py
```python
import jagpdf
import os
import jag.testlib as testlib
g_font = testlib.EasyFont()
def do_basic_page(writer):
img_file = os.path.expandvars('${JAG_TEST_RESOURCES_DIR}/images/lena_uncalibrated.jpg')
for g in [1.0, 1.4, 1.8, 2.2]:
writer.page_start(5.6*72 + 4, 5.6*72 + 34)
page = writer.page().canvas()
img_spec = writer.image_definition()
img_spec.format(jagpdf.IMAGE_FORMAT_JPEG)
img_spec.file_name(img_file)
img_spec.gamma(g)
img = writer.image_load(img_spec);
page.image(img, 2, 32)
page.text_font(g_font(12))
page.text(10, 10, "gamma: " + str(g))
writer.page_end()
def do_gamma_preserve(writer):
img_file = os.path.expandvars('${JAG_TEST_RESOURCES_DIR}/images/lena_uncalibrated.jpg')
writer.page_start(5.6*72 + 4, 6.6*72 + 34)
page = writer.page().canvas()
page.color_space("fs", jagpdf.CS_DEVICE_RGB)
page.color("fs", .7, .2, .5)
page.rectangle(2, 10, 5.6*72, 72)
page.path_paint("fs")
img_spec = writer.image_definition()
img_spec.format(jagpdf.IMAGE_FORMAT_JPEG)
img_spec.file_name(img_file)
img_spec.gamma(2.2)
img = writer.image_load(img_spec);
page.image(img, 2, 32+74)
# page.color("fs", .7, .8, .5)
page.rectangle(46, 28, 4.2*72, 36)
page.path_paint("fs")
page.color("fs", .9, .9, .9)
page.text_font(g_font(8))
page.text(10, 12, "[preserve gamma:] two rectangles in the same color, one contained in another")
writer.page_end()
def test_main(argv=None):
doc_writer = testlib.create_test_doc(argv, 'img-gamma.pdf')
g_font.set_writer(doc_writer)
do_gamma_preserve(doc_writer)
do_basic_page(doc_writer)
doc_writer.finalize()
if __name__ == '__main__':
test_main()
```
#### File: apitest/py/kerning.py
```python
import jagpdf
import jag.testlib as testlib
# AY and YA seems to have the largest kerning
def test_main(argv=None):
doc = testlib.create_test_doc(argv, 'kerning.pdf')
font = doc.font_load('standard;name=Helvetica;size=12')
pheight = 58
doc.page_start(7*72, pheight)
canvas = doc.page().canvas()
canvas.text_font(font)
txt = "AYAYAYAYAYAYAYA YAYAYAYAYAYAYAYAYA YAYAYAYAYAYAYAY AYAYAYAYAY"
# fetch widths
finfo = font
widths = [finfo.advance(c) for c in txt]
txt_width = sum(widths)
# draw bounding lines
canvas.state_save()
canvas.color('s', 0.8)
canvas.move_to(12, 0)
canvas.line_to(12, pheight)
canvas.move_to(12+txt_width, 0)
canvas.line_to(12+txt_width, pheight)
canvas.path_paint('s')
canvas.state_restore()
# write whole string - single text object
canvas.text_start(12, 12)
canvas.text(txt)
canvas.text_end()
# write single characters - single text object
canvas.text_start(12, 24)
for c in txt:
canvas.text(c)
canvas.text_end()
# write single characters - text object for each
x = 12
for i,c in enumerate(txt):
canvas.text_start(x, 36)
canvas.text(c)
canvas.text_end()
x += widths[i]
doc.page_end()
doc.finalize()
if __name__ == '__main__':
test_main()
```
#### File: apitest/py/long_test.py
```python
import jagpdf
import jag.testlib as testlib
import sys
import os
import threading
s_profile ="""
info.title = title
info.author = unknown
info.subject = unknown
info.keywords = unknown
info.creator = unknown
info.creation_date = unknown
"""
s_image_dir = os.path.expandvars('${JAG_TEST_RESOURCES_DIR}')
s_jpeg_file = os.path.join(s_image_dir, "images-jpeg",
"PalmTree-CMYK-icc-FOGRA27.jpg")
def test_main(argv=None):
do_it()
num_threads = 10
docs_per_thread = 3;
while 1:
threads = [WorkerThread(docs_per_thread) for i in xrange(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
class WorkerThread(threading.Thread):
def __init__(self, num_docs):
threading.Thread.__init__(self)
self.num_docs = num_docs
def run(self):
while self.num_docs:
do_it()
self.num_docs -= 1
def do_it():
prof = jagpdf.create_profile_from_string(s_profile)
stream = testlib.NoopStreamOut()
doc = jagpdf.create_stream(stream)
doc.page_start(5.9*72, 3.5*72)
canvas = doc.page().canvas()
# meat
img = doc.image_load_file(s_jpeg_file)
canvas.image(img, 50, 50)
canvas.text(10, 10, 200 * 'a')
canvas.move_to(10, 10)
canvas.line_to(20, 20)
canvas.path_paint("fs")
font_ttf = testlib.EasyFontTTF(doc)(10)
font_core = testlib.EasyFont(doc)(10)
canvas.text_font(font_ttf)
canvas.text(10, 10, 50 * 'x')
font_ttf.advance('abcdefghijklmnopqrstuvwxyz')
canvas.text_font(font_core)
canvas.text(10, 10, 50 * 'y')
font_core.advance('abcdefghijklmnopqrstuvwxyz')
# finalize
doc.page_end()
doc.finalize()
# touch the result
s = 0
for b in stream.content():
s = s + ord(b)
if __name__ == "__main__":
test_main()
```
#### File: apitest/py/piechart.py
```python
import jagpdf
import jag.testlib as testlib
import math
paper = 597.6, 848.68
# -- scheme generator
# http://www.wellstyled.com/tools/colorscheme2/index-en.html
colors_str="""#FF3300
#B32400
#FFCCBF
#FF9980
#00B366
#007D48
#BFFFE4
#80FFC9
#0033CC
#00248F
#BFCFFF
#809FFF
#FF9900
#B36B00
#FFE6BF
#FFCC80
"""
chart_data = [
('Africa', 767),
('Asia', 3634),
('Europe', 729),
('South America and the Caribbean', 511),
('Northern America', 307),
('Oceania', 30)
]
def color_to_rgb(str):
r = int(str[1:3], 16) / 255.0
g = int(str[3:5], 16) / 255.0
b = int(str[5:7], 16) / 255.0
return r, g, b
colors = [color_to_rgb(c) for c in colors_str.split()]
def get_color():
for i in [0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14]:
yield colors[i]
def draw_piechart(canvas, cx, cy, rx, ry, font, items, title, font_title):
"""items is a sequence of [name, quantity]"""
total_quant = reduce(lambda s, i: s + i[1], items, 0)
items.sort(lambda l, r: cmp(r[1], l[1]))
color = get_color()
color_map = dict([(name, color.next()) for name, q in items])
items.reverse()
canvas.color_space('fs', jagpdf.CS_DEVICE_RGB)
# shadow
# canvas.color('f', 0.2, 0.2, 0.2)
# canvas.arc(cx+4, cy-4, rx, ry, 0, 2 * math.pi)
# canvas.path_paint('f')
# chart
angle = math.pi / 2.0
color = get_color()
max_str_len = 0.0
canvas.line_join(jagpdf.LINE_JOIN_BEVEL)
for name, quant in items:
canvas.color('fs', *color_map[name])
sweep = quant * 2 * math.pi / total_quant
canvas.arc(cx, cy, rx, ry, angle, sweep)
canvas.line_to(cx, cy)
canvas.path_close()
canvas.path_paint('fs')
angle += sweep
max_str_len = max(max_str_len, font.advance(name))
# legend boxes
items.reverse()
legend_x, legend_y = cx - rx, cy + ry + (1 + len(items))*font.height()
y = 0
box_h = font.bbox_ymax() - font.bbox_ymin()
box_w = 20
for name, quant in items:
canvas.color('f', *color_map[name])
canvas.rectangle(legend_x, legend_y - y + font.bbox_ymin(), box_w, box_h)
canvas.path_paint('f')
y += font.height()
# legend text
canvas.text_font(font)
canvas.text_start(legend_x + box_w + 8, legend_y)
perc_offset = max_str_len + 10
canvas.color('f', 0, 0, 0)
for name, quant in items:
canvas.text("%s" % name)
canvas.text_translate_line(perc_offset, 0)
canvas.text("%.2f%%" % (100.0 * quant / total_quant))
canvas.text_translate_line(-perc_offset, -font.height())
canvas.text_end()
# edge
# canvas.color('s', 0.5, 0.5, 0.5)
# canvas.arc(cx, cy, rx, ry, 0, 2 * math.pi)
# canvas.path_paint('s')
# title
canvas.text_font(font_title)
canvas.color('f', 0, 0, 0)
title_w = font_title.advance(title)
canvas.text(legend_x + ((2 * rx - title_w) / 2.0), \
legend_y + 1.4 * font_title.height(), title)
def test_main(argv=None):
doc = testlib.create_test_doc(argv, 'piechart.pdf')
font = doc.font_load('standard;name=Helvetica;size=12')
font_title = doc.font_load('standard;name=Helvetica-Bold;size=28')
doc.page_start(*paper)
canvas = doc.page().canvas()
draw_piechart(canvas, 300, 400, 250, 200, font,
chart_data, 'World Population', font_title)
doc.page_end()
doc.finalize()
if __name__ == "__main__":
test_main()
```
#### File: apitest/py/t0133.py
```python
import jagpdf
import jag.testlib as testlib
import os
def do(writer):
writer.page_start(3*72, 1.5*72)
page = writer.page().canvas()
img = os.path.expandvars('${JAG_TEST_RESOURCES_DIR}/images/klenot.png')
page.image(writer.image_load_file(img, jagpdf.IMAGE_FORMAT_PNG), 10, 30)
writer.page_end()
def test_main(argv=None):
profile = testlib.test_config()
profile.set("doc.encryption", "standard")
profile.set("doc.static_file_id", "1")
profile.set("info.creation_date", "0")
profile.set("info.static_producer", "1")
doc = testlib.create_test_doc(argv, 'encrypted_indexed_cs.pdf', profile)
do(doc)
doc.finalize()
if __name__ == '__main__':
test_main()
```
#### File: code/tools/afm_parser.py
```python
import glyphlist
import glob
import re
from string import Template
import sys
import md5
from collections import defaultdict
import math
import random
from StringIO import StringIO
import os
AFM_DIR = '../../external/data/Core14_AFMs/'
TYPEMAN_DIR = '../src/resources/typeman/'
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
class Face:
def __init__( self, ctx ):
self.ctx = ctx
self.chars = []
self.FontName = ""
self.FullName = ""
self.FamilyName = ""
self.FontBBox = None #[llx lly urx ury]
self.EncodingScheme = ""
self.CharacterSet = ""
self.CapHeight = 0
self.XHeight = 0
self.Ascender = 0
self.Descender = 0
self.UnderlinePosition = 0
self.BuiltinEncoding = 0
self.UnderlineThickness = 0
self.ItalicAngle = 0
self.IsFixedPitch = True
self.Weight = 400 # 100-900
self.StdHW = 0
self.StdVW = 0
self.KernGetter = "NULL"
self.md5 = md5.new()
def finalize(self):
if self.EncodingScheme == 'FontSpecific':
# sort by unicode
self.chars.sort( lambda l,r: cmp(l.code,r.code) )
else:
# sort by code
self.chars.sort( lambda l,r: cmp(l.unicode,r.unicode) )
def on_afm_line(self,line):
"""called for each input line"""
self.md5.update( line )
class HandlerBase:
def __init__( self, face ):
self.face = face
def on_line( self, s ):
self.face.on_afm_line( s )
self.process_line_( s )
class FontMetricsHandler(HandlerBase):
def __init__( self, face, arg ):
HandlerBase.__init__( self, face )
def process_line_( self, s ):
kwd, val = get_kwd_and_val(s)
if kwd in set( ['FontName','FullName','FamilyName', 'EncodingScheme', 'CharacterSet'] ):
setattr( self.face, kwd, val ) #verbatim
elif kwd in set( [
'CapHeight',
'XHeight',
'Ascender',
'Descender',
'UnderlinePosition',
'UnderlineThickness',
'StdHW', 'StdVW' ] ):
setattr( self.face, kwd, int(val) )
elif kwd in set( ['ItalicAngle'] ):
setattr( self.face, kwd, float(val) )
elif kwd == "FontBBox":
self.face.FontBBox = [int(s) for s in val.split(" ") ]
assert( len(self.face.FontBBox) == 4 )
elif kwd == "IsFixedPitch":
self.face.IsFixedPitch = val=='true' and True or False
elif kwd == "Weight":
#see: http://www.w3.org/TR/CSS21/fonts.html#font-boldness
self.face.Weight = { 'Medium' : 400,
'Normal' : 400,
'Roman' : 400,
'Bold' : 700,
'Light' : 300 }[val]
elif kwd in set( ['Version', 'Notice', 'Comment', 'Characters'] ):
pass #ignore
elif kwd in set( ['MappingScheme', 'EscChar', 'IsBaseFont', 'VVector', 'IsFixedV', 'CharWidth'] ):
assert not "unsupported keyword"
else:
print "kwd: ", kwd
assert not "unknown keyword"
class CharMetricsHandler(HandlerBase):
def __init__( self, face, arg ):
HandlerBase.__init__( self, face )
def process_line_( self, s ):
l = [ item.strip().split( ' ', 1) for item in s.split(';')[:-1] ]
rd = dict( l )
bbox = [int(s) for s in rd['B'].split(" ") ]
assert( len(bbox) == 4 )
try:
u = glyphlist.glyph_to_unicode_map[rd['N']]
except:
assert( self.face.EncodingScheme == 'FontSpecific' )
u = 0
self.face.chars.append( Bunch(unicode=u,
code=int(rd['C']),
widthx=int(rd['WX']),
bbox=bbox) )
class KernDataHandler(HandlerBase):
def __init__( self, face, arg ):
HandlerBase.__init__( self, face )
def process_line_( self, s ):
assert not "should not get here"
class KernPairsHandler(HandlerBase):
def __init__( self, face, arg ):
HandlerBase.__init__( self, face )
self.getter_fun = None
def process_line_( self, s ):
kwd, left, right, value = s.split(' ')
assert( kwd == 'KPX' )
left = glyphlist.glyph_to_unicode_map[left]
right = glyphlist.glyph_to_unicode_map[right]
# store the kerning info to ctx.kern_dict,
# which is (left, right) -> {get_fun: value}
if not self.getter_fun:
self.getter_fun = 'kern_' + font_name_to_id(self.face.FontName)
self.face.KernGetter = self.getter_fun
self.face.ctx.kern_dict[(left,right)][self.getter_fun] = value
def get_kwd_and_val( line ):
sp = line.split( " ", 1 )
assert( len(sp) == 1 or len(sp) == 2 )
if len(sp) == 1:
return sp[0], None
else:
return sp
def get_handler_type( handler ):
return globals()[handler+'Handler']
def font_name_to_id( fontname ):
return re.sub( '[^a-zA-Z_]', '_', fontname )
def font_name_to_enum( fontname ):
return "T1_" + font_name_to_id( fontname ).upper()
def process_afm(instream, ctx):
"""processes single afm file"""
handlers = []
face = Face(ctx)
for line in instream:
line = line.strip()
key, val = get_kwd_and_val( line )
if key.startswith( 'Start' ):
handlers.append( get_handler_type( key[5:] )(face, val) )
elif key.startswith( 'End' ):
last=handlers.pop()
assert( last.__class__==get_handler_type(key[3:]) )
else:
handlers[-1].on_line( line )
face.finalize()
return face;
def process_afm_dir(dirname, ctx):
"""non-recursively processes diretory of afm files"""
faces = []
for fname in glob.glob( dirname + '/*.afm' ):
faces.append(process_afm(open(fname), ctx))
return faces
###########################################################################
# header generator
cpp_header_muster="""
"""
def do_cpp_header( faces, outs ):
ENUMS = ",\n ".join( [ font_name_to_enum(f.FontName) for f in faces ] )
header_templ = os.path.join(TYPEMAN_DIR, 't1adobestandardfonts.h.template')
header_templ = open(header_templ).read()
outs.write( Template(header_templ).substitute(locals()))
###########################################################################
# cpp generator
cpp_impl_muster="""
"""
kern_getter_templ="""
Int $getter_fun(kern_offsets_t const& krec) {
return krec.$value_holder;
}
"""
def make_kern_pair_key(left, right):
return left + (right << 16)
def output_kern_table(templ, ctx, getter_to_index, value_to_index):
# insertion into the hash table depends on randomizer, so make it
# deterministic here
random.seed(0)
# these 3 primes in combination with table size give ~93% load factor
hash1_p = 226783
hash2_p = 1354601
hash3_p = 1622471
hash_table_size = 3491
num_hash_functions = 3
num_cells = 1
h = HFunctionsDivision(hash1_p, hash2_p, hash3_p)
# these 2 primes in combination with table size give ~62% load factor
hash1_p = 16069
hash2_p = 43787
hash_table_size = 5261
num_hash_functions = 2
h = HFunctionsDivision(hash1_p, hash2_p)
# 2 primes, 2 cells -> 91.7%
hash1_p = 1984061
hash2_p = 885931
num_cells = 2
h = HFunctionsDivision(hash1_p, hash2_p)
hash_table_size = 1777
#
ch = CuckooNest(hash_table_size, h, num_cells)
result = []
min_unicode, max_unicode = sys.maxint, 0
values = {} # offset tuple -> its index
values[(0, 0, 0, 0, 0, 0)] = 0
for k, v in ctx.kern_dict.iteritems():
key = make_kern_pair_key(*k)
min_unicode = min(min_unicode, k[0], k[1])
max_unicode = max(max_unicode, k[0], k[1])
value = 8 * [0]
for getter, val in v.iteritems():
value[getter_to_index[getter]] = value_to_index[val]
value = tuple(value)
try:
value_i = values[value]
except KeyError:
value_i = len(values)
values[value] = value_i
ch.insert(key, str(value_i))
result += ch.c_output("{0xffffffff, 0}")
kerning_table = ",\n ".join(result)
num_kerning_offsets = len(values)
offset_list = [(v, k) for k, v in values.iteritems()]
offset_list.sort()
off_tuples = (os for i, os in offset_list)
off_strings = (", ".join(str(o) for o in off) for off in off_tuples)
offset_c_values = ("{%s}" % s for s in off_strings)
kerning_offsets = ",\n ".join(offset_c_values)
return Template(templ).safe_substitute(locals())
def output_kern_data(templ, ctx):
"""outputs data needed for pair kerning"""
getters, values = set(), set()
for pair, d in ctx.kern_dict.iteritems():
for g, val in d.iteritems():
getters.add(g)
values.add(val)
getter_to_index = dict([(g, i) for i, g in enumerate(getters)])
vlist = [(v, i + 1) for i, v in enumerate(values)]
vlist.append((0, 0))
vlist.sort(lambda l, r : cmp(l[1], r[1]))
value_to_index = dict(vlist)
kern_values = ",\n ".join((str(v) for v, i in vlist))
templ = output_kern_table(templ, ctx, getter_to_index, value_to_index)
# output getter functions (they access offset value for given font)
kerning_getters = []
for getter_fun, value_holder_i in getter_to_index.iteritems():
value_holder = "offset_%d" % value_holder_i
kerning_getters.append(Template(kern_getter_templ).substitute(locals()))
kerning_getters = "\n".join(kerning_getters)
return Template(templ).safe_substitute(locals())
def do_cpp_impl(faces, outs, ctx):
FACE_PTRS = ",\n ".join( [ "&"+font_name_to_id(f.FontName) for f in faces ] )
FACE_DEFS = []
for face in faces:
FACE_DEFS.append( do_cpp_impl_face(face) )
FACE_DEFS = "\n".join( FACE_DEFS )
impl_templ = os.path.join(TYPEMAN_DIR, 't1adobestandardfonts.cpp.template')
impl_templ = open(impl_templ).read()
impl_templ = output_kern_data(impl_templ, ctx)
outs.write(Template(impl_templ).substitute(locals()))
cpp_impl_face_muster="""
const int ${FACEID}_num_glyphs = $NUM_GLYPHS;
const t1s_glyph ${FACEID}_glyphs[${FACEID}_num_glyphs] = {
${GLYPHS_DEF}
};
const t1s_face $FACEID = {
{
/* units */ 1000,
/* bbox_xmin */ $FontBBox_xmin,
/* bbox_ymin */ $FontBBox_ymin,
/* bbox_xmax */ $FontBBox_xmax,
/* bbox_ymax */ $FontBBox_ymax,
/* baseline_distance */ $BaselineDistance,
/* ascender */ $Ascender,
/* descender */ $Descender,
/* avg_width */ $AvgWidth,
/* max_width */ $MaxWidth,
/* missing_width */ $MissingWidth,
/* cap height */ $CapHeight,
/* xheight */ $XHeight
}, /* font metrics */
/* font name */ \"$FontName\",
/* full name */ \"$FullName\",
/* family name */ \"$FamilyName\",
/* encoding scheme */ \"$EncodingScheme\",
/* built-in encoding */ $BuiltinEncoding,
/* char set */ \"$CharacterSet\",
/* underline position */ $UnderlinePosition,
/* underline thickness */ $UnderlineThickness,
/* italic angle */ $ItalicAngle,
/* is fixed pitch */ $IsFixedPitch,
/* weight */ $Weight,
/* horizontal stem w */ $StdHW,
/* vertical stem w */ $StdVW,
/* num glyphs */ $NUM_GLYPHS,
/* glyph metrics */ ${FACEID}_glyphs,
/* kerning getter */ ${KernGetter},
/* hash */ { $HASH }
};
"""
def calc_face_width_attrs( face ):
AvgWidth, MaxWidth, MissingWidth = 0, -1, -1
for c in face.chars:
AvgWidth += c.widthx
if c.widthx > MaxWidth:
MaxWidth = c.widthx
if c.unicode == 32:
MissingWidth = c.widthx
AvgWidth = AvgWidth / len( face.chars )
return locals()
def do_cpp_impl_face(face):
FACEID = font_name_to_id( face.FontName )
NUM_GLYPHS = len(face.chars)
GLYPHS_DEF = []
for i in range( 0, NUM_GLYPHS, 5 ):
GLYPHS_DEF.append( ", ".join( ["{%d,%d,%d}" % (c.unicode, c.code, c.widthx)
for c in face.chars[i:i+5]] ) )
GLYPHS_DEF = ",\n ".join(GLYPHS_DEF)
locals().update( face.__dict__ )
locals()['IsFixedPitch'] = locals()['IsFixedPitch'] and "true" or "false"
locals()['BuiltinEncoding'] = locals()['EncodingScheme'] == 'FontSpecific' and "true" or "false"
HASH = ", ".join( [ "0x%02x"%ord(b) for b in face.md5.digest() ] )
locals().update( calc_face_width_attrs(face) )
FontBBox_xmin = face.FontBBox[0]
FontBBox_ymin = face.FontBBox[1]
FontBBox_xmax = face.FontBBox[2]
FontBBox_ymax = face.FontBBox[3]
# taken from FreeType, t1objs.c
BaselineDistance = 1000*12/10
if BaselineDistance < locals()['Ascender']-locals()['Descender']:
BaselineDistance = locals()['Ascender']-locals()['Descender']
return Template(cpp_impl_face_muster).substitute( locals() )
def gen_cpp_jagbase():
ctx = Bunch(kern_dict=defaultdict(lambda : {}))
faces = process_afm_dir(AFM_DIR, ctx)
if faces:
header_file = os.path.join(TYPEMAN_DIR, 't1adobestandardfonts.h')
do_cpp_header(faces, open(header_file, 'wb' ))
impl_file = os.path.join(TYPEMAN_DIR, 't1adobestandardfonts.cpp')
do_cpp_impl(faces, open(impl_file, 'wb'), ctx)
#C 33 ; WX 600 ; N exclam ; B 202 -15 398 572 ;
def encoding_status():
content = open(AFM_DIR + 'Courier-Bold.afm').read()
names = re.compile('; N ([a-zA-Z]+) ;')
core_names = set(names.findall(content))
encodings = ['windows-1250', 'windows-1251', 'windows-1252', 'windows-1253']
for enc in encodings:
for i in xrange(128,256):
try:
c = unicode(chr(i), enc)
assert len(c) == 1
codepoint = ord(c[0])
name = glyphlist.unicode_to_glyph(codepoint)
if name not in core_names:
print enc, name, "0x%x" % codepoint
except UnicodeDecodeError, err:
print enc, err
# ---------------------------------------------------------------------------
# kerning stats
#
def kern_generator():
from glyphlist import glyph_to_unicode_map as gmap
for fontfile in glob.glob('../../external/data/Core14_AFMs/*.afm'):
for line in open(fontfile):
if line.startswith('KPX'):
kpx, left, right, offset = line.split()
yield fontfile, gmap[left], gmap[right], offset
def kern_stats():
# unique lefts per font
# avg number of rights per single left
# % of kern pairs in all pair in lorem ipsum
kd = defaultdict(lambda : {})
pairs_total = 0
pairs_unique = set()
values_unique = set()
pairs_per_font = defaultdict(lambda : 0)
pairs_freq_font = defaultdict(lambda : 0)
max_unicode = 0
max_left = 0
max_right = 0
min_left = sys.maxint
min_right = sys.maxint
max_diff = 0
glyphs = set()
max_val, min_val = 0, sys.maxint
for font, left, right, val in kern_generator():
kd[font][(left, right)] = val
pairs_total += 1
pairs_unique.add((left, right))
values_unique.add(val)
max_val = max(max_val, int(val))
min_val = min(min_val, int(val))
pairs_per_font[font] += 1
pairs_freq_font[(left, right)] += 1
max_unicode = max(max_unicode, left, right)
max_left = max(max_left, left)
max_right = max(max_right, right)
min_left = min(min_left, left)
min_right = min(min_right, right)
max_diff = max(max_diff, abs(left - right))
glyphs.add(left)
glyphs.add(right)
# post-proc
pairs_dist = defaultdict(lambda : 0)
for v in pairs_freq_font.itervalues():
pairs_dist[v] += 1
# out
log2_glyphs = defaultdict(lambda : 0)
for g in glyphs:
log2_glyphs[math.ceil(math.log(g, 2))] += 1
print 'total:', pairs_total
print 'unique pairs:', len(pairs_unique), ', tree depth:', math.log(len(pairs_unique), 2)
print 'unique glyphs:', len(glyphs)
print 'unique values:', len(values_unique)
print 'min val:', min_val, ', max_val:', max_val, ", diff:", (max_val - min_val)
print 'pairs per font:', ', '.join([str(v) for v in pairs_per_font.itervalues()])
print 'pairs freq in fonts:', ', '.join(['%d: %d' % (k, v) for k, v in pairs_dist.iteritems()])
print 'bits per glyph:', ', '.join(("%d: %d" % (k, v) for k, v in log2_glyphs.iteritems()))
print 'max unicode:', max_unicode, ', max left:', max_left, ', max right:', max_right
print 'min left:', min_left, ', min right:', min_right, ', max diff:', max_diff
class CuckooNest:
def __init__(self, nr_buckets, hash_funs, nr_cells=1):
self.nr_buckets = nr_buckets
self.hash_funs = hash_funs
self.nr_cells = nr_cells
self.table = nr_cells * nr_buckets * [None]
self.nr_items = 0
def cells(self, n, key):
"""Calculate hash using n-th hash function and return a list of cell
indices."""
pos = self.hash_funs(n, key) % self.nr_buckets
return [self.nr_cells * pos + n for n in range(self.nr_cells)]
def insert(self, key, value):
cells = self.cells(0, key)
item = (key, value)
for n in xrange(self.nr_items + 1):
for cell in cells:
if None == self.table[cell]:
self.table[cell] = item
self.nr_items += 1
return
p0 = random.choice(cells)
item, self.table[p0] = self.table[p0], item
all_cells = [self.cells(i, item[0]) for i in range(len(self.hash_funs))]
all_cells.remove(cells)
cells = random.choice(all_cells)
raise TableFull('cannot insert %d' % item[0])
def load_factor(self):
return float(self.nr_items) / len(self.table)
def lookup(self, key):
for i in range(len(self.hash_funs)):
pos = self.cells(i, key)
for p in pos:
if self.table[p] and self.table[p][0] == key:
return self.table[p][1]
return None
def stats(self):
print '#items:', self.nr_items
print 'load factor:', float(self.nr_items) / len(self.table)
def load_factor(self):
return float(self.nr_items) / len(self.table)
def c_output(self, empty_slot):
result = []
for i in range(len(self.table)):
item = self.table[i]
if item != None:
result.append("{0x%08x, %s}" % item)
else:
result.append(empty_slot)
return result
class TableFull(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class HFunctionsDivision:
def __init__(self, *primes):
self.primes = primes
def __call__(self, i, key):
return key % self.primes[i]
def __len__(self):
return len(self.primes)
def __str__(self):
return 'Division: ' + ', '.join((str(p) for p in self.primes))
def HDivisionIter():
# h = HFunctionsDivision(1984061, 885931)
# yield CuckooNest(1777, h, 2), h
while 1:
from primes import primes
h = HFunctionsDivision(random.choice(primes),
random.choice(primes),
random.choice(primes))
yield CuckooNest(3491, h), h
import itertools
def eratosthenes():
'''Yields the sequence of prime numbers via the Sieve of Eratosthenes.'''
D = { } # map each composite integer to its first-found prime factor
for q in itertools.count(2): # q gets 2, 3, 4, 5, ... ad infinitum
p = D.pop(q, None)
if p is None:
# q not a key in D, so q is prime, therefore, yield it
yield q
# mark q squared as not-prime (with q as first-found prime factor)
D[q*q] = q
else:
# let x <- smallest (N*p)+q which wasn't yet known to be composite
# we just learned x is composite, with p first-found prime factor,
# since p is the first-found prime factor of q -- find and mark it
x = p + q
while x in D:
x += p
D[x] = p
def gen_primes(n):
print "primes = [ \\"
for p in eratosthenes():
if p > n:
break
print "%d," % p
print "]"
def get_pairs_dict():
pairs_dict = {}
min_key, max_key = sys.maxint, 0
for font, left, right, val in kern_generator():
pairs_dict.setdefault((left, right), {})[font] = val
return pairs_dict
def output_keys():
for k, v in get_pairs_dict().iteritems():
print make_kern_pair_key(*k)
def hfuns_generator(n):
from primes import primes
while 1:
yield HFunctionsDivision(*[random.choice(primes) for i in range(n)])
def test_load_factor():
from primes import primes
N = 50
p1 = primes.index(5003) # 3271
p2 = primes.index(6007)
sizes = primes[p1:p2]
pairs_dict = get_pairs_dict()
items = [(make_kern_pair_key(*k), v) for k, v in pairs_dict.iteritems()]
cells = 1
maximize_load_factor(N, items, sizes, hfuns_generator(2), cells)
def maximize_load_factor(N, input_data, nr_buckets_lst, hfun_gen, nr_cells):
found, reset = 1, 2
low, high = 0, len(nr_buckets_lst)
status = reset
while high > low:
if status & reset:
max_factor = 0.0
nr_tries = N
mid = low + (high - low) / 2
else:
nr_tries *= 2
status = reset
for i in xrange(nr_tries):
hfuns = hfun_gen.next()
htable = CuckooNest(nr_buckets_lst[mid], hfuns, nr_cells)
try:
for key, val in input_data:
htable.insert(key, val)
print 'OK:', nr_buckets_lst[mid], htable.load_factor(), hfuns
high = mid - 1
status = found | reset
break
except TableFull:
if htable.load_factor() > max_factor:
max_factor = htable.load_factor()
status = 0
if status == reset:
print 'not found:', nr_buckets_lst[mid], ', load factor:', max_factor, \
'target was:', len(input_data) / float(nr_buckets_lst[mid])
low = mid + 1
def construct_hash_table():
pairs_dict = get_pairs_dict()
found = False
best_lf = 0.0
hiter = HDivisionIter()
for h, funs in hiter:
try:
for k, v in pairs_dict.iteritems():
h.insert(make_kern_pair_key(*k), v)
h.stats()
found = True
break
except TableFull, exc:
if h.load_factor() > best_lf:
print 'Load factor: %.3f' % h.load_factor(), 'for', funs
best_lf = h.load_factor()
# verify
if found:
for k, v in pairs_dict.iteritems():
assert v == h.lookup(make_kern_pair_key(*k))
assert h.lookup(make_kern_pair_key(5000, 5000)) == None
print 'OK for ' + str(funs)
return h
else:
print 'FAILED'
def kern_frequency(fname):
h = construct_hash_table()
data = " ".join(open(fname).read().split())
freq = 0
for i in range(0, len(data)-1):
k = make_kern_pair_key(ord(data[i]), ord(data[i+1]))
if h.lookup(k):
freq += 1
return len(data), freq, float(freq) / (len(data)-1)
if __name__ == "__main__":
#gen_cpp_jagbase()
#encoding_status()
#kern_stats()
#construct_hash_table()
test_load_factor()
#gen_primes(0x20002c) # redirect to primes.py
#print kern_frequency('/home/jarda/tmp/kant-critique-142.txt')
#test_is_prime()
#output_keys()
```
#### File: code/tools/doc_generator.py
```python
import sys
import os
import fnmatch
import shutil
def cmd_stdout( cmd, indata=None ):
in_, out = os.popen4( cmd )
if indata:
in_.write( indata )
in_.close()
outdata = out.read()
ret = out.close()
retcode = ret!=None and ret or 0
return retcode, outdata
def get_root_dir():
root = os.getenv( 'JAGBASE_ROOT' )
if not root:
raise RuntimeError( 'JAGBASE_ROOT not defined.' )
return os.path.abspath(root)
def glob_iter( dirname, patterns ):
for dirpath, dirnames, filenames in os.walk(dirname):
for pat in patterns:
files = [ os.path.join( dirpath, f ) for f in filenames ]
for fname in fnmatch.filter(files, pat ):
yield os.path.abspath( fname )
# copying of the resulting html files is done here
def process_html( lang ):
bjam_args = "-a doc html doc-lang=%s" % lang
retcode, outdata = run_bjam_cmd( bjam_args )
print outdata
if retcode:
raise RuntimeError( 75*'-' + '\nbjam failed with %d' % retcode )
copy_htmldoc_files(lang)
def copy_htmldoc_files(lang):
html_src_dir = os.path.abspath( os.path.join( get_root_dir(), 'doc/quickbook/html/' ) )
dist_dir = os.path.abspath( os.path.join( get_root_dir(), 'distribution/doc/%s/html' % lang ) )
prefix_len = len(html_src_dir) + 1
for fname in glob_iter(html_src_dir, ['*.png', '*.html', '*.css'] ):
dest = os.path.join( dist_dir, fname[prefix_len:] )
target_dir = os.path.dirname( dest )
if not os.path.isdir( target_dir ):
os.makedirs( target_dir )
shutil.copyfile( fname, dest )
# copying of certain formats is carried out by the dist-doc target
def process_self_installing( lang, format ):
bjam_args = "-a dist-doc %s doc-lang=%s" % ( format, lang )
retcode, outdata = run_bjam_cmd( bjam_args )
print outdata
if retcode:
raise RuntimeError( 75*'-' + '\nbjam failed with %d' % retcode )
# bjam is run from the quickbook directory
def run_bjam_cmd( args ):
run_dir = os.path.join( get_root_dir(), 'doc/quickbook' )
curr_dir = os.getcwd()
os.chdir( run_dir )
try:
cmd = 'bjam ' + args
retcode, output = cmd_stdout( cmd )
finally:
os.chdir( curr_dir )
return retcode, output
def main():
lang, format = parse_cmd_line()
if format in ['ps', 'pdf']:
process_self_installing( lang, format )
else:
globals()['process_'+format]( lang )
helpik="""doc_generator.py <c,cpp,python> <html,pdf,ps>"""
def usage():
print helpik
sys.exit(1)
def parse_cmd_line():
"""returns (language, format)"""
def check_arg( arg, allowed ,feat ):
if arg not in allowed:
raise RuntimeError( "Unknown %s: %s" % ( feat, arg ) )
if len(sys.argv) != 3:
usage()
check_arg( sys.argv[1], ['c', 'cpp', 'python' ], 'language' )
check_arg( sys.argv[2], ['html', 'pdf', 'ps' ], 'format' )
return sys.argv[1:]
if __name__ == "__main__":
try:
main()
except RuntimeError, exc:
print exc
sys.exit(1)
```
#### File: pygccxml/declarations/typedef.py
```python
import declaration
import dependencies
class typedef_t( declaration.declaration_t ):
"""describes C++ typedef declaration"""
def __init__( self, name='', type=None ):
"""creates class that describes C++ typedef"""
declaration.declaration_t.__init__( self, name )
self._type = type
def _get__cmp__items( self ):
"""implementation details"""
return [self.type]
def __eq__(self, other):
if not declaration.declaration_t.__eq__( self, other ):
return False
return self.type == other.type
def _get_type(self):
return self._type
def _set_type(self, type):
self._type = type
type = property( _get_type, _set_type
, doc="reference to the original L{type<type_t>}" )
def i_depend_on_them( self, recursive=True ):
return [ dependencies.dependency_info_t( self, self.type ) ]
@property
def byte_size (self):
"Size of this type in bytes @type: int"
return self._type.byte_size
@property
def byte_align (self):
"alignment of this type in bytes @type: int"
return self._type.byte_align
```
#### File: pygccxml/declarations/type_visitor.py
```python
class type_visitor_t(object):
"""
types visitor interface
All functions within this class should be redefined in derived classes.
"""
def __init__(self):
object.__init__(self)
def visit_void( self ):
raise NotImplementedError()
def visit_char( self ):
raise NotImplementedError()
def visit_unsigned_char( self ):
raise NotImplementedError()
def visit_signed_char( self ):
raise NotImplementedError()
def visit_wchar( self ):
raise NotImplementedError()
def visit_short_int( self ):
raise NotImplementedError()
def visit_short_unsigned_int( self ):
raise NotImplementedError()
def visit_bool( self ):
raise NotImplementedError()
def visit_int( self ):
raise NotImplementedError()
def visit_unsigned_int( self ):
raise NotImplementedError()
def visit_long_int( self ):
raise NotImplementedError()
def visit_long_unsigned_int( self ):
raise NotImplementedError()
def visit_long_long_int( self ):
raise NotImplementedError()
def visit_long_long_unsigned_int( self ):
raise NotImplementedError()
def visit_float( self ):
raise NotImplementedError()
def visit_double( self ):
raise NotImplementedError()
def visit_long_double( self ):
raise NotImplementedError()
def visit_complex_long_double(self):
raise NotImplementedError()
def visit_complex_double(self):
raise NotImplementedError()
def visit_complex_float(self):
raise NotImplementedError()
def visit_jbyte(self):
raise NotImplementedError()
def visit_jshort(self):
raise NotImplementedError()
def visit_jint(self):
raise NotImplementedError()
def visit_jlong(self):
raise NotImplementedError()
def visit_jfloat(self):
raise NotImplementedError()
def visit_jdouble(self):
raise NotImplementedError()
def visit_jchar(self):
raise NotImplementedError()
def visit_jboolean(self):
raise NotImplementedError()
def visit_volatile( self ):
raise NotImplementedError()
def visit_const( self ):
raise NotImplementedError()
def visit_pointer( self ):
raise NotImplementedError()
def visit_reference( self ):
raise NotImplementedError()
def visit_array( self ):
raise NotImplementedError()
def visit_free_function_type( self ):
raise NotImplementedError()
def visit_member_function_type( self ):
raise NotImplementedError()
def visit_member_variable_type( self ):
raise NotImplementedError()
def visit_declarated( self ):
raise NotImplementedError()
def visit_restrict( self ):
raise NotImplementedError()
```
#### File: code/tools/msggen.py
```python
import StringIO
import time
import os
import string
import re
import sys
##
## Globals
##
g_curr_time = time.strftime( '%c' )
##
## Templates
##
prologue = """// Copyright (c) 2005-2009 <NAME>
//
// Distributed under the MIT license (See accompanying file
// LICENSE.txt or copy at http://jagpdf.org/LICENSE.txt)
//
// this file was generated by msggen.py
"""
epilogue = """
} // namespace jag
/** EOF @file */
"""
msg_header_file="""
#include <core/errlib/except.h>
#include <boost/format/format_fwd.hpp>
#include <boost/shared_ptr.hpp>
namespace jag
{
${header_cont}
"""
msg_impl_file="""
#include <${msg_include}>
#include <boost/format.hpp>
using namespace boost;
namespace jag
{
static
shared_ptr<format> my_fmt( const char* fmtstr )
{
shared_ptr<format> fmter( new format(fmtstr) );
fmter->exceptions(io::no_error_bits);
return fmter;
}
${impl_cont}
"""
msg_decl="""
struct msg_${msg_name}
{
boost::shared_ptr<boost::format> m_fmt;
public:
msg_${msg_name}( ${ctor_args} );
operator msg_info_t() const;
static unsigned msg_id();
};
"""
msg_def="""
msg_${msg_name}::msg_${msg_name}( ${ctor_args} )
{
m_fmt = my_fmt( \"${msg_txt}\" );
*m_fmt ${fmt_action};
}
msg_${msg_name}::operator msg_info_t() const
{
return msg_info_t( msg_id(), m_fmt->str() );
}
unsigned msg_${msg_name}::msg_id()
{
return ${msg_id};
}
"""
try:
import hashlib
md5_creator = hashlib.md5
except ImportError:
import md5
md5_creator = md5.new
def get_guards( fname ):
if os.path.splitext(fname)[1]!='.h':
return '',''
m = md5_creator()
m.update(os.path.basename(fname))
guard = 'JAG_' + m.hexdigest()
return string.Template( '#ifndef ${guard}\n#define ${guard}\n' ).substitute( locals()),\
string.Template( '#endif // ${guard}\n' ).substitute( locals())
re_fmt_plh = re.compile( "%([0-9])(.)%" )
def handler_msg( opts, line ):
# print 'msg:', line
msg_id, msg_name, msg_txt = line.split( None, 2 )
# form message id
msg_id = int(msg_id)
assert msg_id < 65536 # msg id is 16bit
msg_id += opts['module_id']
msg_id = "0x%x" % msg_id
if msg_id in opts.setdefault( 'msgids', set() ):
assert not "duplicate ids"
opts['msgids'].add( msg_id )
# form formatting args
fmtargs_s = set()
for item in re_fmt_plh.findall( msg_txt ):
fmtargs_s.add( item )
fmtargs_l = [ (int(n), f) for n,f in fmtargs_s ]
fmtargs_l.sort()
ctor_args, fmt_action = [], ''
arg2type = { 'i' : 'int',\
's' : 'char const*' }
for i, (n,f) in enumerate( fmtargs_l ):
assert i+1 == n # check missing fmt arguments (one-based)
ctor_args.append( "%s p%d" % (arg2type[f], n ) )
fmt_action = fmt_action + " %% p%d" % n
ctor_args = ", ".join( ctor_args )
# remove type specifier from the fmt str
msg_txt = re_fmt_plh.sub( lambda m: '%%%d%%' % int(m.group(0)[1]), msg_txt )
# write message declaration and definition
opts['msg_h'].write( string.Template( msg_decl ).substitute( locals() ) )
opts['msg_c'].write( string.Template( msg_def ).substitute( locals() ) )
def finalize( fname, buff ):
fout = open( fname, 'wb' )
guard_start, guard_end = get_guards( fname )
if fname.endswith( 'cpp' ):
fout.write( '#include "precompiled.h"\n' )
fout.write( prologue )
fout.write( guard_start )
fout.write( buff )
fout.write( epilogue )
fout.write( guard_end )
fout.close()
remsg_head = re.compile( '\[messages=([0-9]+)\]' )
def main( infile, impl_fname, header_fname ):
opts = dict( msg_h = StringIO.StringIO(),\
msg_c = StringIO.StringIO() )
handler = None
for line in open( infile ):
line = line.strip()
if not line or line[0] == '#':
continue
else:
m = remsg_head.match( line )
if m:
opts['module_id']=int(m.group(1))
assert opts['module_id'] < 256 # module id is 8 bit
opts['module_id'] = opts['module_id'] << 16
handler=handler_msg
continue
assert handler
handler( opts, line )
msg_include = os.path.basename( header_fname )
header_cont = opts['msg_h'].getvalue()
finalize( header_fname, string.Template( msg_header_file ).substitute( locals() ) )
impl_cont = opts['msg_c'].getvalue()
finalize( impl_fname, string.Template( msg_impl_file ).substitute( locals() ) )
if __name__ == "__main__":
# main( "msgdef.jmsg", "msg.cxx", "msg.h" )
# sys.exit(0)
if len(sys.argv) != 4:
print "usage msggen <msgdef_file> <impl_file> <header_file>"
sys.exit(1)
main( sys.argv[1], sys.argv[2], sys.argv[3] )
```
#### File: tools/source_parsing/doxyxml.py
```python
import os
import new
import sys
try:
import xml.dom.minidom
from xml.dom.ext.Dom2Sax import Dom2SaxParser
import xml.xpath
except ImportError:
# these are needed only for the documentation build
pass
def normalize_cpp_id( id_ ):
if id_.startswith('::'):
return id_[2:]
return id_
def nodeattr( node, key ):
return node.attributes[(None,key)].nodeValue
###########################################################################
# saxhandler wraps
class saxhandler_with_filtering:
"""filters out specified elements"""
def __init__( self, h, elements ):
self.h = h
self.elements = elements
self.blocked = []
def startElement( self, name, attrs ):
if not self.blocked:
if name in self.elements:
self.blocked.append( name )
else:
self.h.startElement( name, attrs )
def endElement( self, name ):
if name in self.elements:
assert self.blocked[-1] == name
self.blocked.pop()
elif not self.blocked:
self.h.endElement( name )
def characters( self, ch ):
if not self.blocked:
self.h.characters( ch )
class saxhandler_first_node:
"""lets through only the first node"""
def __init__( self, h ):
self.h = h
self.s = []
self.done = False
def startElement( self, name, attrs ):
if not self.done:
self.s.append( name )
self.h.startElement( name, attrs )
def endElement( self, name ):
if not self.done:
self.h.endElement( name )
self.s.pop()
if not self.s:
self.done = True
def characters( self, ch ):
if not self.done:
self.h.characters( ch )
###########################################################################
# object hirerarchy
class doxybase(object):
def __init__( self, node, doxy ):
self.node_ = node
self.doxy = doxy
def is_internal(self):
for xpath in ['detaileddescription/internal',
'briefdescription/internal']:
if xml.xpath.Evaluate(xpath, self.node_):
return True
return False
def is_publicly_documented(self):
for xpath in ['detaileddescription//jag_undocumented',
'briefdescription//jag_undocumented']:
if xml.xpath.Evaluate(xpath, self.node_):
return False
return True
def _process_node( self, xpath, handler, allow_empty=False, first_child=True, take_parent=False ):
nodes = xml.xpath.Evaluate(xpath, self.node_)
if not nodes and allow_empty:
return
assert len(nodes)==1
p = Dom2SaxParser()
p.setContentHandler(handler)
node = first_child and nodes[0].firstChild or nodes[0]
if take_parent:
node = node.parentNode
p.parse(node)
def detailed_desc(self, handler):
hwrap = saxhandler_with_filtering(handler, set( ['simplesect', 'parameterlist'] ) )
self._process_node( 'detaileddescription', hwrap )
def brief_desc(self, handler):
self._process_node( 'briefdescription', handler )
def parameters_desc(self, handler):
hwrap = saxhandler_first_node(handler)
# @kind="exception" -> exceptions
self._process_node( 'detaileddescription/para/parameterlist[@kind="param"]', hwrap, True, False )
def simplesect_desc(self, handler, kind ):
hwrap = saxhandler_first_node(handler)
self._process_node( 'detaileddescription/para/simplesect[@kind="%s"]'%kind, hwrap, True, False )
# To add a custom alias go to file doxyfile-template and adjust the
# ALIAS option. The alias should look like this:
#
# ALIAS = jag_alias="\par My Alias.\n\xmlonly<jag_alias/>\endxmlonly"
#
# Details:
# If we use "@jag_alias my alias text" somewhere in the documentation
# then Doxygen inserts <jag_alias/> as a sibling of "my alias
# text". So we need to find <jag_alias/> element first and then
# process its parent.
#
def custom_alias_desc(self, handler, alias):
hwrap = saxhandler_with_filtering(handler, set(['title', alias]))
self._process_node( 'detaileddescription/para/simplesect[@kind="par"]/para/%s' %
alias, hwrap, True, False, take_parent=True )
def id(self):
return nodeattr( self.node_, 'id' )
class class_d(doxybase):
def __init__( self, node, doxy ):
doxybase.__init__( self, node, doxy )
def method( self, name ):
xpath = 'sectiondef/memberdef[@kind="function"]/name/text()'
nodes = [ node for node in xml.xpath.Evaluate(xpath, self.node_) if node.nodeValue == name]
assert len(nodes) == 1
return memfun_d( nodes[0].parentNode.parentNode, self.doxy )
class memfun_d(doxybase):
def __init__( self, node, doxy ):
doxybase.__init__( self, node, doxy )
assert self.node_.parentNode.nodeName == 'sectiondef'
def section( self ):
""" ret values: 'user-defined', 'public-func' """
return nodeattr( self.node_.parentNode, 'kind' )
def section_header( self ):
assert self.section() == 'user-defined'
return xml.xpath.Evaluate('header/text()', self.node_.parentNode)[0].nodeValue
def section_desc( self, handler ):
"""returns node"""
assert self.section() == 'user-defined'
nodes = xml.xpath.Evaluate('description', self.node_.parentNode)
if nodes:
assert len(nodes) == 1
node = nodes[0]
p = Dom2SaxParser()
p.setContentHandler(handler)
p.parse(node.firstChild)
class enumeration_d(doxybase):
def __init__( self, node, doxy ):
doxybase.__init__( self, node, doxy )
def value( self, value_str ):
xpath = 'enumvalue/name/text()'
nodes = [ node for node in xml.xpath.Evaluate(xpath, self.node_) if node.nodeValue == value_str]
assert len(nodes) == 1
return enum_value_d( nodes[0].parentNode.parentNode, self.doxy )
class enum_value_d(doxybase):
def __init__( self, node, doxy ):
doxybase.__init__( self, node, doxy )
class function_d(doxybase):
def __init__( self, node, doxy ):
doxybase.__init__( self, node, doxy )
###########################################################################
class Doxy:
def __init__( self, dir_ ):
self.dir_ = os.path.abspath( dir_ )
self.cache = {}
self.index = self._get_doc( 'index.xml' )
def _get_doc( self, name ):
if name not in self.cache:
doc = xml.dom.minidom.parse(os.path.join(self.dir_, name)).documentElement
self.cache[name] = doc
return self.cache[name]
def _generic_find( self, xpath, name ):
name = normalize_cpp_id( name )
nodes = [ node for node in xml.xpath.Evaluate(xpath, self.index) if node.nodeValue == name]
if len(nodes) != 1:
raise RuntimeError( "Doxy: number of nodes for '%s' is %d" % (name,len(nodes)) )
return nodes[0].parentNode.parentNode
def find_class( self, name ):
xpath = 'compound[@kind="class"]/name/text()'
cls_node = self._generic_find( xpath, name )
xmlfile = nodeattr( cls_node, 'refid' ) + '.xml'
doc = self._get_doc( xmlfile )
return class_d( doc.childNodes[1], self )
def _find_in_ns( self, type_, typedef, name ):
xpath = 'compound[@kind="namespace"]/member[@kind="%s"]/name/text()' % type_
ns, entity = normalize_cpp_id( name ).rsplit( '::', 1 )
entity_node = self._generic_find( xpath, entity )
ns_node = entity_node.parentNode
assert xml.xpath.Evaluate("name/text()", ns_node)[0].nodeValue == ns
ns_xmlfile = nodeattr( ns_node, 'refid' ) + '.xml'
ns_doc = self._get_doc( ns_xmlfile )
entity_id = nodeattr( entity_node, 'refid' )
xpath = 'compounddef/sectiondef[@kind="%s"]/memberdef[@kind="%s"][@id="%s"]' % ( typedef, type_, entity_id )
return xml.xpath.Evaluate(xpath, ns_doc)[0]
def find_enumeration( self, name ):
return enumeration_d( self._find_in_ns( 'enum', 'enum', name), self )
def find_function( self, name ):
return function_d( self._find_in_ns( 'function', 'func', name), self )
def get_comments_w32( doxycfg, xmldir ):
for drive in ['c', 'd']:
doxyexe = drive + ':/Progra~1/doxygen/bin/doxygen.exe'
if os.path.isfile(doxyexe):
break
in_, out = os.popen4(doxyexe + ' -')
in_.write( doxycfg )
in_.close()
doxyout = out.read()
ret = out.close()
if ret is None:
return Doxy( xmldir ), doxyout
else:
raise RuntimeError( doxyout + 'Doxygen failed with code %d' % ret )
pass
def get_comments_linux( doxycfg, xmldir ):
in_, out = os.popen4( 'doxygen -' )
in_.write( doxycfg )
in_.close()
doxyout = out.read()
ret = out.close()
if ret is None:
return Doxy( xmldir ), doxyout
else:
raise RuntimeError( doxyout + 'Doxygen failed with code %d' % ret )
pass
def get_comments( doxycfg, xmldir ):
if sys.platform.startswith( 'linux' ):
return get_comments_linux( doxycfg, xmldir )
else:
return get_comments_w32( doxycfg, xmldir )
###########################################################################
# test
def _dump_method( m ):
print 'internal:', m.is_internal()
print 'section:', m.section()
if m.section()=='user-defined':
print 'section header:', m.section_header()
print 'section description:', m.section_desc()
if __name__ == "__main__":
d = Doxy( 'c:/Code/cpp/sandbox/jagbase/code/tools/source_parsing/xml-jagbase' )
# d = Doxy('c:/Code/python/sig/xml/')
# c = d.find_class( "::Strs::DocPlatform::IStoreManager" )
# for m_str in [ 'init_internal', 'typemapped' ]:
# m = c.method( m_str )
# print '>>', m_str
# _dump_method( m )
# e = d.find_enumeration( "::Strs::DocPlatform::RequestPriority" )
# for val in ['UNDEFINED_internal', 'ASAP', 'NO_RUSH']:
# ev = e.value( val )
# print val, '-', ev.is_internal() and 'Internal' or 'Public'
# f = d.find_function( "::Strs::DocPlatform::factory" )
```
|
{
"source": "jgrewe/odml-ui",
"score": 2
}
|
#### File: odml-ui/odmlui/EditorTab.py
```python
from gi import pygtkcompat
pygtkcompat.enable()
pygtkcompat.enable_gtk(version='3.0')
import gtk
import os.path
import odml
import odml.validation
from odml.tools.odmlparser import ODMLReader, ODMLWriter, allowed_parsers
from .CommandManager import CommandManager
from .Helpers import uri_to_path, get_parser_for_uri, get_extension, \
create_pseudo_values, get_parser_for_file_type
from .MessageDialog import ErrorDialog
from .treemodel import event
from .ValidationWindow import ValidationWindow
class EditorTab(object):
"""
Represents a Document Object in the Editor
"""
file_uri = None
edited = 0
def __init__(self, window, cmdm=None):
if cmdm is None:
cmdm = CommandManager()
cmdm.enable_undo = self.enable_undo
cmdm.enable_redo = self.enable_redo
cmdm.error_func = window.command_error
self.command_manager = cmdm
self.document = None
self.window = window
self._clones = [self]
def new(self, doc=None):
"""
initialize a new document
"""
if doc is None:
doc = odml.Document()
sec = odml.Section(name="Default Section")
doc.append(sec)
self.window.registry.add(doc)
self.document = doc
self.file_uri = None
def parse_properties(self, odml_sections):
for i in odml_sections:
create_pseudo_values(i.properties)
self.parse_properties(i.sections)
def load(self, uri):
self.file_uri = uri
file_path = uri_to_path(uri)
parser = get_parser_for_uri(file_path)
odml_reader = ODMLReader(parser=parser)
try:
self.document = odml_reader.from_file(file_path)
except Exception as e:
ErrorDialog(None, "Error while parsing '%s'" % file_path, str(e))
return False
self.document.finalize()
self.parse_properties(self.document.sections)
self.window.registry.add(self.document)
self.window._info_bar.show_info("Loading of %s done!" % (os.path.basename(file_path)))
return True
def reset(self):
self.edited = 0 # initialize the edit stack position
self.command_manager.reset()
self.enable_undo(enable=False)
self.enable_redo(enable=False)
@property
def is_modified(self):
return self.edited != len(self.command_manager)
def save_if_changed(self):
"""
if the document was modified, ask the user if he or she wants to save the document
returns false if the user cancelled the action
"""
if not self.is_modified:
return True
dialog = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL,
gtk.MESSAGE_INFO, gtk.BUTTONS_YES_NO,
"%s has been modified. Do you want to save your changes?" % self.file_uri)
dialog.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
dialog.set_title("Save changes?")
dialog.set_default_response(gtk.RESPONSE_CANCEL)
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_CANCEL:
return False
if response == gtk.RESPONSE_NO:
return True
return self.window.save(None)
def save(self, uri, file_type=None):
# Mandatory document validation before save to avoid
# not being able to open an invalid document.
self.remove_validation()
validation = odml.validation.Validation(self.document)
self.document.validation_result = validation
for e in self.document.validation_result.errors:
if e.is_error:
self.window._info_bar.show_info("Invalid document. Please fix errors (red) before saving.")
self.validate()
return
self.document.clean()
parser = None
if file_type:
parser = get_parser_for_file_type(file_type)
if not parser:
parser = get_parser_for_uri(uri)
odml_writer = ODMLWriter(parser=parser)
file_path = uri_to_path(uri)
ext = get_extension(file_path)
if ext != parser:
file_path += ".%s" % parser.lower()
try:
odml_writer.write_file(self.document, file_path)
except Exception as e:
self.window._info_bar.show_info("Save failed: %s" % e)
return
self.document.finalize() # undo the clean
self.window._info_bar.show_info("%s was saved" % (os.path.basename(file_path)))
self.edited = len(self.command_manager)
return True # TODO return false on any error and notify the user
def enable_undo(self, enable=True):
for tab in self._clones:
tab._enable_undo(enable)
def _enable_undo(self, enable):
if self.window.current_tab is self:
self.window.enable_undo(enable)
def enable_redo(self, enable=True):
for tab in self._clones:
tab._enable_redo(enable)
def _enable_redo(self, enable=True):
if self.window.current_tab is self:
self.window.enable_redo(enable)
def clone(self, klass=None):
if klass is None:
klass = self.__class__
ntab = klass(self.window, self.command_manager)
self._clones.append(ntab)
ntab._clones = self._clones
ntab.file_uri = self.file_uri
ntab.document = self.document
return ntab
def validate(self):
"""check the document for errors"""
self.remove_validation()
validation = odml.validation.Validation(self.document)
self.document.validation_result = validation
if len(validation.errors) > 0:
self.update_validation_error_objects(validation.errors)
ValidationWindow(self).show()
else:
self.window._info_bar.show_info("The document is valid. No errors found.")
self.remove_validation()
def update_validation_error_objects(self, errors):
"""
send out a change event for all error-affected objects
so that the gui can refresh these
"""
for err in errors:
c = event.ChangeContext(('_error', True))
c.post_change = True
c.action = "set"
c.pass_on(err.obj)
def remove_validation(self):
"""remove any dangling validation references"""
if not hasattr(self.document, "validation_result"):
return
errors = self.document.validation_result.errors
del self.document.validation_result
self.update_validation_error_objects(errors)
def get_name(self):
"""return the filename of this tab's document"""
return os.path.basename(str(self.file_uri))
def update_label(self):
"""update the tab label with the current filename"""
self.label.set_text(self.get_name())
def close(self):
"""
any cleanup?
"""
self._clones.remove(self)
```
#### File: odml-ui/odmlui/PropertyView.py
```python
from gi import pygtkcompat
pygtkcompat.enable()
pygtkcompat.enable_gtk(version='3.0')
import gtk
import odml
import odml.dtypes as dtypes
import odml.terminology as terminology
from odml import DType
from . import commands
from . import TextEditor
from .DragProvider import DragProvider
from .Helpers import create_pseudo_values
from .TreeView import TerminologyPopupTreeView
from .treemodel import PropertyModel, ValueModel
from .dnd.odmldrop import OdmlDrag, OdmlDrop
from .dnd.targets import ValueDrop, PropertyDrop, SectionDrop
from .dnd.text import TextDrag, TextDrop, TextGenericDropForPropertyTV
COL_KEY = 0
COL_VALUE = 1
class PropertyView(TerminologyPopupTreeView):
"""
The Main treeview for editing properties and their value-attributes
"""
_section = None
def __init__(self, registry):
super(PropertyView, self).__init__()
tv = self._treeview
for name, (id, propname) in PropertyModel.ColMapper.sort_iteritems():
if name == "Type":
combo_col = self.create_odml_types_col(id, name, propname)
tv.append_column(combo_col)
else:
renderer, column = self.add_column(
name=name,
edit_func=self.on_edited,
id=id, data=propname)
if name == "Value":
tv.set_expander_column(column)
tv.set_headers_visible(True)
tv.set_rules_hint(True)
tv.show()
# set up our drag provider
dp = DragProvider(self._treeview)
_exec = lambda cmd: self.execute(cmd)
vd = ValueDrop(exec_func=_exec)
pd = PropertyDrop(exec_func=_exec)
sd = SectionDrop(exec_func=_exec)
for target in [
OdmlDrag(mime="odml/property-ref", inst=odml.property.Property),
TextDrag(mime="odml/property", inst=odml.property.Property),
OdmlDrag(mime="odml/value-ref", inst=ValueModel.Value),
TextDrag(mime="odml/value", inst=ValueModel.Value),
TextDrag(mime="TEXT"),
OdmlDrop(mime="odml/value-ref", target=vd, registry=registry, exec_func=_exec),
OdmlDrop(mime="odml/property-ref", target=pd, registry=registry, exec_func=_exec),
OdmlDrop(mime="odml/section-ref", target=sd, registry=registry, exec_func=_exec),
TextDrop(mime="odml/value", target=vd),
TextDrop(mime="odml/property", target=pd),
TextDrop(mime="odml/section", target=sd),
TextGenericDropForPropertyTV(exec_func=_exec),
]:
dp.append(target)
dp.execute = _exec
dp.connect()
def dtype_renderer_function(self, tv_column, cell_combobox, tree_model, tree_iter, data):
"""
Defines a custom cell renderer function, which is executed for
every cell of the column, and sets the DType value from the underlying model.
Argument 'Data': Here, it defines the column number in the Tree View.
"""
cell_data = tree_model.get(tree_iter, data)[0]
cell_combobox.set_property('markup', cell_data)
@property
def section(self):
return self._section
@section.setter
def section(self, section):
if self._section is section and self.model:
return
self._section = section
if self.model:
self.model.destroy()
self.model = PropertyModel.PropertyModel(section)
@property
def model(self):
return self._treeview.get_model()
@model.setter
def model(self, new_value):
self._treeview.set_model(new_value)
def on_selection_change(self, tree_selection):
(model, tree_iter) = tree_selection.get_selected()
if not tree_iter:
return
obj = model.get_object(tree_iter)
self.on_property_select(obj)
# Always expand multi value properties when selected
is_multi_value = isinstance(obj, odml.property.Property) and len(obj.value) > 1
if is_multi_value:
tree_selection.get_tree_view().expand_row(model.get_path(tree_iter), False)
def on_property_select(self, prop):
"""called when a different property is selected"""
pass
def on_get_tooltip(self, model, path, iter, tooltip):
"""
set the tooltip text, if the gui queries for it
"""
obj = model.get_object(iter)
doc = obj.document
if doc and hasattr(doc, "validation_result"):
errors = doc.validation_result[obj]
if len(errors) > 0:
tooltip.set_text("\n".join([e.msg for e in errors]))
return True
def on_object_edit(self, tree_iter, column_name, new_text):
"""
called upon an edit event of the list view
updates the underlying model property that corresponds to the edited cell
"""
prop = tree_iter._obj
# are we editing the first_row of a <multi> value?
first_row = not tree_iter.parent
first_row_of_multi = first_row and tree_iter.has_child
if not first_row and column_name != "pseudo_values":
return
# Do not replace multiple values with pseudo_value placeholder text.
if first_row_of_multi and column_name == "pseudo_values" and new_text == "<multi>":
return
cmd = None
# if we edit another attribute (e.g. unit), set this for all values of this property
if first_row_of_multi and column_name == "pseudo_values":
# editing multiple values of a property at once
cmds = []
for value in prop.pseudo_values:
cmds.append(commands.ChangeValue(
object=value,
attr=[column_name, "value"],
new_value=new_text))
cmd = commands.Multiple(cmds=cmds)
else:
# first row edit event for the property, so switch to appropriate object
# - Only if the 'value' column is edited, edit the pseudo-value object.
# - Else, edit the property object
if column_name == 'pseudo_values' and first_row:
prop = prop.pseudo_values[0]
if column_name == "pseudo_values" and first_row:
column_name = [column_name, "value"] # backup the value attribute too
cmd = commands.ChangeValue(
object=prop,
attr=column_name,
new_value=new_text)
if cmd:
self.execute(cmd)
def get_popup_menu_items(self):
model, path, obj = self.popup_data
menu_items = self.create_popup_menu_items("Add Property", "Empty Property", model.section,
self.add_property, lambda sec: sec.properties,
lambda prop: prop.name, stock="odml-add-Property")
if obj is not None: # can also add value
prop = obj
if hasattr(obj, "_property"): # we care about the properties only
prop = obj._property
value_filter = lambda prop: [val for val in prop.values if val.value is not None and val.value != ""]
for item in self.create_popup_menu_items("Add Value", "Empty Value", prop, self.add_value,
value_filter, lambda val: val.value, stock="odml-add-Value"):
menu_items.append(item)
for item in self.create_popup_menu_items("Set Value", "Empty Value", prop, self.set_value,
value_filter, lambda val: val.value):
if item.get_submenu() is None:
continue # don't want a sole Set Value item
menu_items.append(item)
val = obj
if prop is obj:
val = prop.pseudo_values[0] if len(prop.pseudo_values) == 1 else None
if val is not None and val.dtype == "text":
menu_items.append(self.create_menu_item("Edit text in larger window", self.edit_text, val))
# cannot delete properties that are linked (they'd be override on next load), instead allow to reset them
merged = prop.get_merged_equivalent()
if prop is obj and merged is not None:
if merged != obj:
menu_items.append(self.create_menu_item("Reset to merged default", self.reset_property, obj))
else:
menu_items.append(self.create_popup_menu_del_item(obj))
return menu_items
def edit_text(self, widget, val):
"""
popup menu action: edit text in larger window
"""
t = TextEditor.TextEditor(val, "value")
t.execute = self.execute
def reset_property(self, widget, prop):
"""
popup menu action: reset property
"""
dst = prop.get_merged_equivalent().clone()
cmd = commands.ReplaceObject(obj=prop, repl=dst)
self.execute(cmd)
def set_value(self, widget, prop_value_pair):
"""
popup menu action: set value
"""
(prop, val) = prop_value_pair
model, path, obj = self.popup_data
if val is None:
val = ValueModel.Value(prop)
else:
val = val.clone()
if obj is prop:
obj = prop.values[0]
prop = obj._property
# first append, then remove to keep the constraint that a property
# will always hold at least one value
cmd = commands.Multiple(cmds=[
commands.AppendValue(obj=prop, val=val),
commands.DeleteObject(obj=obj)
])
self.execute(cmd)
def add_value(self, widget, obj_value_pair):
"""
popup menu action: add value
add a value to the selected property
"""
(obj, val) = obj_value_pair
if val is None:
val = ValueModel.Value(obj)
else:
val = val.clone()
cmd = commands.AppendValue(obj=obj.pseudo_values, val=val)
self.execute(cmd)
# Reset model if the Value changes from "normal" to MultiValue.
if self.model and len(obj.value) > 1:
self.model.destroy()
self.model = PropertyModel.PropertyModel(obj.parent)
# Reselect updated object to update view.
self.select_object(obj)
def add_property(self, widget, obj_prop_pair):
"""
popup menu action: add property
add a property to the active section
"""
(obj, prop) = obj_prop_pair
if prop is None:
name = self.get_new_obj_name(obj.properties, prefix='Unnamed Property')
prop = odml.Property(name=name, dtype='string')
# The default value part should be put in odML core library
prop._value = [dtypes.default_values('string')]
create_pseudo_values([prop])
else:
prefix = prop.name
name = self.get_new_obj_name(obj.properties, prefix=prefix)
prop = prop.clone()
prop.name = name
cmd = commands.AppendValue(obj=obj, val=prop)
self.execute(cmd)
# Maybe define a generic Combo Box column creator ?
def create_odml_types_col(self, id, name, propname):
# Get all the members of odml.DType, which are not callable and are not `private`.
dtypes_list = [x for x in dir(DType) if not callable(getattr(DType, x)) and not x.startswith('__')]
dtypes_combo_list = gtk.ListStore(str)
for i in dtypes_list:
dtypes_combo_list.append([i])
combo_renderer = gtk.CellRendererCombo.new()
combo_renderer.set_property("has-entry", False)
combo_renderer.set_property("text-column", 0)
combo_renderer.set_property("model", dtypes_combo_list)
combo_renderer.set_property("editable", True)
combo_renderer.connect("edited", self.on_edited, propname)
combo_col = gtk.TreeViewColumn(name, combo_renderer)
combo_col.set_min_width(40)
combo_col.set_resizable(True)
combo_col.set_cell_data_func(combo_renderer, self.dtype_renderer_function, id)
return combo_col
```
#### File: odmlui/treemodel/TreeIters.py
```python
import sys
from . import GenericIter
from . import nodes
from .ValueModel import Value
class PropIter(GenericIter.GenericIter):
"""
An iterator for a Property
returns ValueIter objects if queried for children.
Since Values don't have a parent relationship, the PropIter will store
pass a corresponding parent attribute (the Property object) to the ValueIter
As odML supports multi-values, each property may or may not have multiple children.
"""
def get_value(self, attr):
if attr == "pseudo_values":
if self.has_child:
return self.get_mulitvalue(attr)
else:
return self.get_singlevalue(attr)
else:
return self.escape(getattr(self._obj, attr))
def get_mulitvalue(self, name):
# Most of the stuff is empty and handled by the ValueIter
if name == "pseudo_values":
return self.escape("<multi>")
return ""
def get_singlevalue(self, name):
#here we proxy the value object
if len(self._obj.pseudo_values) == 0:
return ""
return ValueIter(self._obj.pseudo_values[0]).get_value(name)
@property
def has_child(self):
return self.n_children > 1
@property
def n_children(self):
return len(self._obj.pseudo_values)
@property
def parent(self):
return None
class ValueIter(GenericIter.GenericIter):
"""
An iterator for a Value object
"""
def get_value(self, attr):
if attr == "pseudo_values":
value = self._obj.get_display()
# Some issues with the rendering of `unicode` in Python 2 directly
# to Tree Column cell renderer. Hence, first encode it here.
if ValueIter.is_python2:
value = value.encode('utf-8')
# If the value is an empty string, render a placeholder text.
if value == '':
value = '<i>n/a</i>'
return value
# Return an empty string for anything lese
return ""
class SectionIter(GenericIter.GenericIter):
@property
def parent(self):
if not self._obj.parent:
return None
if not self._obj.parent.parent: # the parent is the document root
return None
return super(SectionIter, self).parent
class SectionPropertyIter(GenericIter.GenericIter):
@property
def n_children(self):
return len(self._obj.properties)
# associate the odml-classes to the corresponding iter-classes
nodes.Section.IterClass = SectionIter
nodes.Property.IterClass = PropIter
Value.IterClass = ValueIter
```
#### File: odmlui/treemodel/ValueModel.py
```python
import odml.base as base
import odml.format as format
import odml.dtypes as dtypes
from . import nodes, event
class ValueNode(nodes.ParentedNode):
def path_from(self, path):
raise TypeError("Value objects have no children")
def path_to(self, child):
raise TypeError("Value objects have no children")
class ValueFormat(format.Format):
_name = "value"
_args = {}
_map = {}
class Value(base.baseobject, base._baseobj, ValueNode, event.ModificationNotifier):
"""
Since the odML value node has been merged with the odml.Property, and is
only available as a 'pure' python list we cannot render it to the Editor
UI directly. So, we make use of this wrapper class which acts as a mapper
between the model values and the values rendered in Editor UI.
A list of objects from this class is added as an additional attribute to
the original `odml.Property` node, as `pseudo_values`. All interactions
from the Editor interact with these pseudo_values, and internally, these
pseudo-values update the original property._value list.
"""
_Changed = event.Event("value")
_Changed.finish = event.pass_on_change
_format = ValueFormat
def __init__(self, parent, index=None):
self._property = parent
if index is None: # Instantiate a new odML value
index = len(self._property.value)
dtype = self.parent.dtype
default_value = dtypes.default_values(dtype)
self.parent.value.append(default_value)
assert(isinstance(index, int))
self._index = index
def __repr__(self):
return "PseudoValue <%s>" % str(self.pseudo_values)
@property
def parent(self):
"""the property containing this value"""
return self._property
@property
def dtype(self):
"""
Retuns the parent DType
"""
return self.parent.dtype
@property
def pseudo_values(self):
"""
Return a single element from the parent property's value list.
"""
return self.parent._value[self._index]
@pseudo_values.setter
def pseudo_values(self, new_string):
"""
First, try to check if the new value fits in the parent property's
dtype. If it does, then update the value.
"""
prop_dtype = self.parent.dtype
new_value = dtypes.get(new_string, prop_dtype)
self.parent._value[self._index] = new_value
@property
def value(self):
return self.pseudo_values
def can_display(self, text=None, max_length=-1):
"""
return whether the content of this can be safely displayed in the gui
"""
if text is None:
text = self.pseudo_values
if text is None:
return True
if max_length != -1 and len(text) > max_length:
return False
if "\n" in text or "\t" in text:
return False
return True
def get_display(self, max_length=-1):
"""
return a textual representation that can be used for display
typically takes the first line (max *max_length* chars) and adds '…'
"""
text = str(self.pseudo_values)
# Always escape "&" and "<" since they break the view otherwise.
text = text.replace("&", "&").replace("<", "<")
if self.can_display(text, max_length):
return text
text = text.split("\n")[0]
if max_length != -1:
text = text[:max_length]
if self.can_display(text, max_length):
return (text + u'…').encode('utf-8')
return "(%d bytes)" % len(self._value)
def reorder(self, new_index):
return self._reorder(self.parent.value, new_index)
def clone(self):
obj = base.baseobject.clone(self)
obj._property = None
return obj
```
|
{
"source": "jgrewe/relacsed_nix",
"score": 2
}
|
#### File: rlxnix/base/repro.py
```python
import nixio
import logging
from tqdm import tqdm
from .trace_container import TraceContainer, TimeReference
from .stimulus import Stimulus
from ..utils.util import nix_metadata_to_dict, metadata_to_json
from ..utils.data_trace import DataType
from ..utils.data_loader import DataLink, SegmentType
class ReProRun(TraceContainer):
"""This class represents the data of a RePro run. It offers access to the data and metadata.
"""
def __init__(self, repro_run: nixio.Tag, traces, relacs_nix_version=1.1):
"""Create a RePro instance that represent one run of a relacs RePro.
Parameters
----------
repro_run: nixio.Tag
the nix - tag that represents the repro run
traces: dict of rlxnix.DataTrace
Dict of trace infos.
relacs_nix_version: float
The mapping version number. Defaults to 1.1.
"""
super().__init__(repro_run, traces, relacs_nix_version=relacs_nix_version)
self._stimuli = []
self._metadata = None
def _get_signal_trace_map(self):
logging.critical("Repro._get_trace_map must be overwritten!")
@property
def metadata(self):
"""Returns the metadata for this ReProRun. The settings herein are the base settings of the RePro. They may vary for each stimulus. For a complete view use the ReProRun.stimulus_metadata property.
Returns:
--------
dictionary
The metadata dictionary
"""
if self._metadata is None:
self._metadata = nix_metadata_to_dict(self._tag.metadata)
return self._metadata
def add_stimulus(self, stimulus:Stimulus):
"""INTERNAL USE ONLY! Adds a stimulus to the list of stimuli run in the context of this RePro run.
Parameters
----------
stimulus : rlxnix.Stimulus
The stimulus that was run.
"""
self._stimuli.append(stimulus)
@property
def stimuli(self):
"""List of stimuli that were presented within the context of this RePro Run.
Returns:
--------
stimulus: rlxnix.Stimulus
The Stimulus instance that provides access to the data during the stimulus output.
"""
return self._stimuli
@property
def stimulus_count(self):
return len(self._stimuli)
def trace_data(self, name, reference=TimeReference.Zero):
"""Get the data that was recorded while this repro was run.
Paramters
---------
name: str
name of the referenced data trace e.g. "V-1" for the recorded voltage.
reference: TimeReference
Controls the time reference of the time axis and event times. If TimeReference.ReproStart is given all times will start after the Repro/Stimulus start. Defaults to TimeReference.Zero, i.e. all times will start at zero, the RePro/stimulus start time will be subtracted from event times and time axis.
Returns
-------
data: np.ndarray
The recorded continuos or event data
time: np.ndarray
The respective time vector for continuous traces, None for event traces
"""
return self._trace_data(name, reference=reference)
@property
def stimulus_data_links(self) -> list:
"""Collection of rlxnix.DataLink objects for each stimulus presented in this ReproRun.
Returns
-------
list of rlxnix.DataLink
List of DataLink objects
"""
data_links = []
for s in tqdm(self.stimuli, disable=not(logging.root.level == logging.INFO)):
dl = s.data_link()
if dl is not None:
data_links.append(dl)
return data_links
@property
def data_link(self) -> DataLink:
""" Returns a DataLink object to the data recorded in this ReproRun
Returns
-------
rlxnix.DataLink
The DataLink object
"""
dataset = self.repro_tag._parent.name + ".nix"
block_id = self.repro_tag._parent.id
tag_id = self.repro_tag.id
type = SegmentType.ReproRun
mdata = metadata_to_json(self.metadata)
dl = DataLink(dataset, block_id, tag_id, type, self.start_time,
self.stop_time, metadata=mdata,
mapping_version=self._mapping_version)
return dl
def _check_stimulus(self, stimulus_index):
if stimulus_index >= len(self.stimuli) or stimulus_index < 0:
raise IndexError(f"Stimulus index {stimulus_index} is out of bounds for number of stimuli {len(self.stimuli)}")
def _check_trace(self, trace_name, data_type=DataType.Continuous):
"""Checks if the provided trace name is among the traces in the file and if the expected data type matches the expectation.
Parameters
----------
trace_name : str
The name of the trace.
data_type : DataType, optional
The expected trace type. If you expect a Continuous data type and the provided name points to event data False will be returned, by default DataType.Continuous
Returns
-------
bool
True if the trace was found and the data type matches the expectation, False otherwise.
"""
if trace_name is None:
logging.warning("Repro.check_trace: Trace name is not specified!")
return False
if trace_name not in self._tag.references:
logging.warning(f"Trace {trace_name} not found!")
return False
trace = self._trace_map[trace_name]
if trace.trace_type != data_type:
logging.warning(f"Data type of trace {trace.name} does not match expected data type (expected: {data_type}, found: {trace.trace_type}).")
return False
return True
def __str__(self) -> str:
info = "Repro: {n:s} \t type: {t:s}\n\tstart time: {st:.2f}s\tduration: {et:.2f}s"
return info.format(n=self.name, t=self.type, st=self.start_time, et=self.duration)
def __repr__(self) -> str:
repr = "ReproRun object for repro run {name} from {start:.4f} to {stop:.4f}s, Tag {id} at {pos}."
return repr.format(name=self.name, start=self.start_time, stop=self.stop_time, id=self.repro_tag.id, pos=hex(id(self)))
def __getitem__(self, key) -> Stimulus:
if isinstance(key, int):
return self._stimuli[key]
else:
raise KeyError(f"Key is invalid! {key} is not instance of int.")
def __len__(self) -> int:
return len(self.stimuli)
```
#### File: plugins/efish/baseline.py
```python
import logging
import numpy as np
from .efish_ephys_repro import EfishEphys
class Baseline(EfishEphys):
"""Represents the run of the Baseline repro of the efish plugin-set.
"""
_repro_name = "BaselineActivity"
def __init__(self, repro_run, traces, relacs_nix_version=1.1) -> None:
super().__init__(repro_run, traces, relacs_nix_version)
@property
def baseline_rate(self):
"""Baseline spike rate.
Returns
-------
float
The average spike rate.
"""
return len(self.spikes()) / self.duration
@property
def baseline_cv(self):
"""Coefficient of variation of the interspike intervals of the baseline spike response. Depends on the spike times to be stored in the file.
The CV is defines as the standard deviation of the interspike intervals normalized to the average interspike interval and describes the regularity of the spontaneous spiking.
A CV of 0 indicates perfectly regular spiking while a value of 1 is typical for random poisson spiking.
Returns
-------
_description_
"""
spikes = self.spikes()
if spikes is None or len(spikes) == 0:
logging.warn("There are no baseline spikes")
return 0.0
isis = np.diff(spikes)
return np.std(isis) / np.mean(isis)
@property
def eod_frequency(self):
""" Returns the EOD frequency (in Hz) of the fish. Depends on the eod times event signal to be present.
Returns
-------
float or None
the eod frequency in Hz, None if the eod times are not stored in the file.
"""
if "eod times" not in self._signal_trace_map:
logging.warning("EOD times are not stored in the file. You need to detect the eod times manually... ")
return None
return len(self.eod_times()) / self._duration
def serial_correlation(self, max_lags=50):
"""Returns the serial correlation of the baseline interspike intervals.
Parameters
----------
max_lags : int, optional
The number of lags to be calculated, by default 50
Returns
-------
np.ndarray
The serial correlations from lag 0 to max_lags -1
"""
if self.spikes() is None or len(self.spikes()) < max_lags:
return None
isis = np.diff(self.spikes())
unbiased = isis - np.mean(isis, 0)
norm = sum(unbiased ** 2)
a_corr = np.correlate(unbiased, unbiased, "same") / norm
a_corr = a_corr[int(len(a_corr) / 2):]
return a_corr[:max_lags]
```
#### File: plugins/efish/chirps.py
```python
import nixio
import numpy as np
import matplotlib.pyplot as plt
from .efish_ephys_repro import EfishEphys
class Chirps(EfishEphys):
_repro_name = "Chirps"
def __init__(self, repro_run: nixio.Tag, traces, relacs_nix_version=1.1):
super().__init__(repro_run, traces, relacs_nix_version=relacs_nix_version)
@property
def chirp_times(self):
""" The times of the artificial chirps of a given stimulus presentation.
Returns
-------
list
The chirp times relative to stimulus onset
str
The unit
"""
cts = []
unit = ""
for s in self.stimuli:
metadata = s.metadata
cts.append(metadata[s.name]["ChirpTimes"][0])
unit = s.metadata[s.name]["ChirpTimes"][1]
return cts, unit
@property
def beat_specification(self):
"""Returns the way the beat is specified. Will return either *absolute frequency* or "Relative EODf".
In the first case the beat frequency is given by the *delta_f* property, in the latter by the *relative_eodf* property.
Returns
-------
str
the beat selection setting of the Chirps RePro.
"""
spec = self.metadata["RePro-Info"]["settings"]["beatsel"][0][0]
return spec
@property
def relative_eodf(self):
"""The beat frequency specified relative to the EOD frequency of the fish.
Returns
-------
float
the releodf setting of the repro run.
"""
rel = self.metadata["RePro-Info"]["settings"]["releodf"][0][0]
return rel
@property
def delta_f(self):
"""The difference frequency to the recorded fish's EOD frequency for all stimulus presentations
Returns
-------
float
The dfs used.
str
the unit
"""
df = self.metadata["RePro-Info"]["settings"]["deltaf"][0][0]
unit = self.metadata["RePro-Info"]["settings"]["deltaf"][1]
return df, unit
@property
def chirp_duration(self):
"""The chirp durations of the stimulus presentations.
Returns
-------
float
The chirp duration.
str
the unit
"""
cd = self.metadata["RePro-Info"]["settings"]["chirpwidth"][0][0]
unit = self.metadata["RePro-Info"]["settings"]["chirpwidth"][1]
return cd, unit
@property
def chirp_size(self):
"""The size of the frequency excursion of the chirp.
Returns
-------
list
List containing the chirp size for each stimulus presentation.
str
the unit
"""
cs = self.metadata["RePro-Info"]["settings"]["chirpsize"][0][0]
unit = self.metadata["RePro-Info"]["settings"]["chirpsize"][1]
return cs, unit
def _plot_axis(self, axis, x_data, y_data, spikes, chirp_times, ylabel):
axis.plot(x_data, y_data, lw=0.5, color="tab:blue", label="voltage")
axis.scatter(spikes, np.ones_like(spikes) * np.max(y_data), s=10, marker="*", c="tab:green", label="spikes")
axis.scatter(chirp_times, np.ones_like(chirp_times) * np.min(y_data), s=20, marker="o", c="tab:red", label="chirps")
axis.set_ylabel(ylabel)
axis.spines["top"].set_visible(False)
axis.spines["right"].set_visible(False)
axis.set_xlim([x_data[0], x_data[-1]])
def plot_overview(self, stimulus_index=0, filename=None):
"""[summary]
Parameters
----------
stimulus_index : int, optional
The stimulus index, by default 0
filename: str, optional
The filename for the figure. If not given, the plot will be shown. By default None
"""
spikes = self.spikes(stimulus_index=stimulus_index)
voltage, time = self.membrane_voltage(stimulus_index=stimulus_index)
eod, eod_time = self.local_eod(stimulus_index=stimulus_index)
stim, stim_time = self.stimulus_output(stimulus_index=stimulus_index)
chirp_times, _ = self.chirp_times
c_times = chirp_times[stimulus_index]
fig, axes = plt.subplots(ncols=1, nrows=3, sharex="all")
self._plot_axis(axes[0], time, voltage, spikes, c_times, "voltage [mV]")
axes[0].legend(fontsize=7, ncol=3, loc=(0.5, 1.05))
self._plot_axis(axes[1], eod_time, eod, spikes, c_times, "voltage [mV]")
self._plot_axis(axes[2], stim_time, stim, spikes, c_times, "voltage [mV]")
axes[-1].set_xlabel("time [s]")
if filename is not None:
fig.savefig(filename)
plt.close()
else:
plt.show()
```
#### File: rlxnix/test/test_data_loader.py
```python
import os
import nixio
import logging
import rlxnix as rlx
from rlxnix.utils.data_loader import SegmentType
def test_to_pandas():
filename = os.path.join("..", "..", "data", "2021-11-11-aa.nix")
if not os.path.exists(filename):
logging.warning(f"file {filename} not found! Skipping test 'test_data_loader.test_to_pandas'")
return
dataset = rlx.Dataset(filename)
repro_count = 0
stimulus_count = 0
for r in dataset.repro_runs():
repro_count += 1
stimulus_count += len(r.stimuli)
df = dataset.to_pandas()
assert len(df) == repro_count + stimulus_count
assert len(df[df.segment_type == str(SegmentType.ReproRun)] == repro_count)
assert len(df[df.segment_type == str(SegmentType.StimulusSegment)] == stimulus_count)
def test_from_pandas():
filename = os.path.join("..", "..", "data", "2021-11-11-aa.nix")
dataset = rlx.Dataset(filename)
if not os.path.exists(filename):
logging.warning(f"file {filename} not found! Skipping test 'test_data_loader.test_to_pandas'")
return
df = dataset.to_pandas()
assert len(df) > 0
dl = rlx.from_pandas(df, 100)
assert dl is None
dl = rlx.from_pandas(df, 10)
assert dl is not None
assert dl.tag_id == df["tag_id"].values[10]
dls = rlx.from_pandas(df, segment_type=SegmentType.ReproRun)
assert isinstance(dls, list)
for dl in dls:
assert dl.segment_type == str(SegmentType.ReproRun)
dls = rlx.from_pandas(df, segment_type=SegmentType.StimulusSegment)
assert isinstance(dls, list)
for dl in dls:
assert dl.segment_type == str(SegmentType.StimulusSegment)
```
#### File: rlxnix/utils/data_trace.py
```python
import logging
from .mappings import DataType, type_map
class DataTrace(object):
"""The DataTrace class represents a recorded data trace. The trace_type property holds whether the trace is an event or a continuously sampled trace. It further keeps the maximum number of samples and the maximum time information. It further provides access to the underlying nixio.DataArray.
"""
def __init__(self, data_array, mapping_version=1.1) -> None:
super().__init__()
event_type = type_map[mapping_version][DataType.Event]
continuous_type = type_map[mapping_version][DataType.Continuous]
t = data_array.type
if (event_type not in t) and (continuous_type not in t):
raise ValueError(f"DataTrace not valid to dataArrray of type {data_array.type}!")
self._data_array = data_array
self._name = data_array.name
self._id = data_array.id
self._type = data_array.type
self._trace_type = DataType.Continuous if continuous_type in data_array.type else DataType.Event
self._shape = data_array.shape
self._sampling_interval = None
if self._trace_type == DataType.Event:
if sum(data_array.shape) > 0:
self._max_time = data_array[-1]
else:
self._max_time = 0.0
else:
self._max_time = self._shape[0] * data_array.dimensions[0].sampling_interval
self._sampling_interval = data_array.dimensions[0].sampling_interval
@property
def trace_type(self):
"""The DataType stored in this trace. Either DataType.Continuous for continuously sampled data or DataType.Event for event type data.
Returns
-------
DataType
The DataType of this trace.
"""
return self._trace_type
@property
def maximum_time(self):
"""The maximum time represetend in this Trace
Returns
-------
float
The maximum time
"""
return self._max_time
@property
def shape(self):
"""The ashape of the stored data
Returns
-------
tuple
The DataArray shape.
"""
return self._shape
@property
def name(self):
"""The name of this trace.
Returns
-------
str
The name
"""
return self._name
@property
def data_array(self):
"""Returns the underlying nixio.DataArray entity.
Returns
-------
nixio.DataArray
The nix entity that holds the trace data.
"""
return self._data_array
@property
def sampling_interval(self):
"""The sampling interval of this trace.
Returns
-------
float
The sampling interval in seconds.
"""
if self.trace_type == DataType.Event:
logging.warning("DataTrace: sampling interval makes no sense for event traces!")
return self._sampling_interval
def __str__(self) -> str:
str = f"Name: {self._name}\tid: {self._id}\ntype: {self._type}\t data type: {self._trace_type}\t shape {self._shape}\n maximum time: {self._max_time}"
return str
def __repr__(self) -> str:
return "DataTrace (Name: %r, DataArray: %r, DataType: %r)" % (self.name, self.data_array.id, self.trace_type)
class TraceList(list):
def __init_subclass__(cls) -> None:
return super().__init_subclass__()
def __contains__(self, __o: object) -> bool:
if isinstance(__o, str):
return any([dt.name == __o for dt in self])
if isinstance(__o, DataTrace):
return any([dt.name == __o.name for dt in self])
return super().__contains__(__o)
def __getitem__(self, index) -> DataTrace:
if isinstance(index, str):
for dt in self:
if dt.name == index:
return dt
raise KeyError(f"Provided key {index} is not valid in this list.")
else:
return super().__getitem__(index)
def append(self, trace):
if not isinstance(trace, DataTrace):
raise ValueError("TraceList can only accommodate DataTrace objects!")
super().append(trace)
```
#### File: rlxnix/utils/mappings.py
```python
from enum import Enum
import nixio
import nixio
class DataType(Enum):
Continuous = 0
Event = 1
StimulusSegment = 2
type_map = {1.0: {DataType.Event: "nix.events.position",
DataType.Continuous: "nix.data.sampled",
DataType.StimulusSegment: "nix.event.stimulus"},
1.1: {DataType.Event: "relacs.data.event",
DataType.Continuous: "relacs.data.sampled",
DataType.StimulusSegment: "relacs.stimulus"}
}
def tag_start_and_extent(tag, index, mapping_version):
start_time = None
duration = None
if isinstance(tag, nixio.MultiTag):
if mapping_version == 1.0:
start_time = tag.positions[index][0]
duration = tag.extents[index][0] if tag.extents else 0.0
else:
start_time = tag.positions[index, 0][0]
duration = tag.extents[index, 0][0] if tag.extents else 0.0
elif isinstance(tag, nixio.Tag):
start_time = tag.position[0]
duration = tag.extent[0] if tag.extent else 0.0
return start_time, duration
```
|
{
"source": "JGridleyMLDL/TrustforSecurity",
"score": 3
}
|
#### File: TrustforSecurity/Multilayer_Error_Check/generate data paper edition.py
```python
import json
import sys
import numpy
import copy
#extractors = {0: {"precision": 0.5, "recall": 0.5}}
#sources = {0: {"KBT": 0.5, "triples": [[0,0,0], [0,1,None]]}}
#Correct triples have a value of 0, incorrect triples have a value of 1 through 25
#format = {0: {0: [], 1: [], 2: []} }
def generateTriples(quantity):
triples = []
for i in range(1, quantity + 1):
triples.append([i, i, i])
return triples
#Randomly shuffles triples
def generateSource(allTriples, accuracy = 0.7):
triples = copy.deepcopy(allTriples)
numpy.random.default_rng().shuffle(triples)
for triple in triples:
if numpy.random.default_rng().integers(0, 100)/100 > accuracy:
tmp = numpy.random.default_rng().integers(0, 2)
if tmp == 0:
triple[0] = 0
elif tmp == 1:
triple[1] = 0
else:
triple[2] = 0
return {"KBT": 0.7, "triples": triples}
#Generates an extractor with a precision and recall of [0.1, 1.0) uniformly
def generateExtractor():
return {"precision": 0.5, "recall": 0.5}
def extract(extractor, source):
extractedTriples = []
for triple in source["triples"]:
if numpy.random.default_rng().integers(0, 100)/100 > extractor["recall"]:
extractedTriples.append(copy.deepcopy(triple))
for i in range(len(extractedTriples[-1])):
if numpy.random.default_rng().integers(0, 100)/100 > numpy.cbrt(extractor["precision"]):
extractedTriples[-1][i] = -1
numpy.random.default_rng().shuffle(extractedTriples)
return extractedTriples
def main():
if len(sys.argv) != 4:
print("Usage:", sys.argv[0], "[number of triples] [number of sources] [number of extractors]")
exit()
triples = []
sources = {}
extractors = {}
multilayerinput = {}
print("Generating triples...")
triples = generateTriples(int(sys.argv[1]))
print("Completed!\n")
print("Generating sources...")
for i in range(int(sys.argv[2])):
sources[i] = generateSource(triples)
print("Completed!\n")
print("Generating extractors...")
for i in range(int(sys.argv[3])):
extractors[i] = generateExtractor()
print("Completed!\n")
print("Extracting triples from sources...")
for extractorID in range(int(sys.argv[3])):
tmp = {}
for sourceID in range(int(sys.argv[2])):
if numpy.random.default_rng().integers(0, 100)/100 > 0.5:
tmp[sourceID] = extract(extractors[extractorID], sources[sourceID])
multilayerinput[extractorID] = tmp
print("Completed!\n")
print("Writing to files...")
with open("triples.json", "w") as triplesFile, open("sources.json", "w") as sourcesFile, open("extractors.json", "w") as extractorsFile, open("multilayerinput.json", "w") as multilayerInputFile:
json.dump(triples, triplesFile, indent = 2)
json.dump(sources, sourcesFile, indent = 2)
json.dump(extractors, extractorsFile, indent = 2)
json.dump(multilayerinput, multilayerInputFile, indent = 2)
print("Completed!")
if __name__ == "__main__":
main()
```
#### File: TrustforSecurity/OldCode/generate_data.py
```python
import json
import sys
import numpy
#extractors = {0: {"precision": 0.5, "recall": 0.5}}
#sources = {0: {"KBT": 0.5, "triples": [[0,0,0], [0,1,None]]}}
#Correct triples have a value of 0, incorrect triples have a value of 1 through 25
#format = {0: {0: [], 1: [], 2: []} }
def generateTriples(quantity):
relationQuantity = round(numpy.sqrt(quantity))
triples = []
i = 0
while i * relationQuantity < quantity:
for j in range(int(relationQuantity)):
if i * relationQuantity + j >= quantity:
break
triples.append([i, j, 0])
i += 1
return triples
#Generates a source with [minimumNumberOfTriples, maximumNumberOfTriples) triples and a KBT of [0.0, 1.0] uniformly
#Randomly shuffles allTriples
def generateSource(allTriples, minimumNumberOfTriples, maximumNumberOfTriples):
triples = numpy.random.default_rng().choice(allTriples, numpy.random.default_rng().integers(minimumNumberOfTriples, maximumNumberOfTriples), replace = False, shuffle = False).tolist()
incorrectTripleCount = numpy.random.default_rng().integers(0, len(triples) + 1)
for i in range(incorrectTripleCount):
triples[i][2] = int(numpy.random.default_rng().integers(1, 26))
numpy.random.default_rng().shuffle(triples)
return {"KBT": (len(triples) - incorrectTripleCount)/len(triples), "triples": triples}
#Generates an extractor with a precision and recall of [0.1, 1.0) uniformly
def generateExtractor():
return {"precision": numpy.random.default_rng().uniform(0.1, 1.0), "recall": numpy.random.default_rng().uniform(0.1, 1.0)}
def extract(extractor, source, allTriples):
relevantCount = round(extractor["recall"] * len(source["triples"]))
irrelevantCount = min(round((1/extractor["precision"] - 1) * relevantCount), len(source["triples"]) - relevantCount) #Messes up precision to an unknown degree
extractedTriples = numpy.random.default_rng().choice(source["triples"], relevantCount + irrelevantCount, replace = False, shuffle = False).tolist()
for tripleIndex in range(irrelevantCount):
extractedTriples[tripleIndex][2] = int(numpy.random.default_rng().integers(1, 26))
numpy.random.default_rng().shuffle(extractedTriples)
return extractedTriples
def main():
if len(sys.argv) != 4:
print("Usage:", sys.argv[0], "[number of triples] [number of sources] [number of extractors]")
exit()
triples = []
sources = {}
extractors = {}
multilayerinput = {}
print("Generating triples...")
triples = generateTriples(int(sys.argv[1]))
print("Completed!\n")
print("Generating sources...")
for i in range(int(sys.argv[2])):
sources[i] = generateSource(triples, 1, 101) #Controls how many triples can be in a source
print("Completed!\n")
print("Generating extractors...")
for i in range(int(sys.argv[3])):
extractors[i] = generateExtractor()
print("Completed!\n")
print("Extracting triples from sources...")
for extractorID in range(int(sys.argv[3])):
tmp = {}
for sourceID in range(int(sys.argv[2])):
tmp[sourceID] = extract(extractors[extractorID], sources[sourceID], triples)
multilayerinput[extractorID] = tmp
print("Completed!\n")
print("Writing to files...")
with open("triples.json", "w") as triplesFile, open("sources.json", "w") as sourcesFile, open("extractors.json", "w") as extractorsFile, open("multilayerinput.json", "w") as multilayerInputFile:
json.dump(triples, triplesFile, indent = 2)
json.dump(sources, sourcesFile, indent = 2)
json.dump(extractors, extractorsFile, indent = 2)
json.dump(multilayerinput, multilayerInputFile, indent = 2)
print("Completed!")
if __name__ == "__main__":
main()
```
#### File: TrustforSecurity/OldCode/multilayer.py
```python
import sys
import json
import numpy
from dataclasses import dataclass
from collections import defaultdict
### Default Values ###
gamma = 0.44
initialPrecision = 0.8
initialRecall = 0.8
initialAccuracy = 0.8
### Data Classes ###
@dataclass
class Extractor:
precision: float
recall: float
presenceVote: float
absenceVote: float
def __init__(self, precision=initialPrecision, recall=initialRecall):
self.precision = precision
self.recall = recall
self.updatePresenceAndAbsenceVote()
# Equation 7
def getSpecificity(self) -> float:
return (gamma / (1.0 - gamma)) * ((1.0 - self.precision) / self.precision) * self.recall
# Equation 12
def updatePresenceAndAbsenceVote(self) -> None:
specificity = self.getSpecificity()
self.presenceVote = numpy.log(self.recall) - numpy.log(specificity)
self.absenceVote = numpy.log(1.0 - self.recall) - numpy.log(1.0 - specificity)
@dataclass
class Source:
accuracy: float
def __init__(self, accuracy=initialAccuracy):
self.accuracy = accuracy
### Estimating C ###
def sigmoid(x):
return (1.0 / (1.0 + numpy.exp(-x)))
# Equation 14 (Change to 31 later)
def getVoteCount(sourceDataItemSlice, value, extractors):
voteCount = 0.0
for extractorIndex in range(sourceDataItemSlice.size):
if sourceDataItemSlice[extractorIndex] == value:
voteCount += extractors[extractorIndex].presenceVote
else:
voteCount += extractors[extractorIndex].absenceVote
return voteCount
# Equation 15
def getProbabilityCwdvGivenXwdv(sourceDataItemSlice, value, extractors, alpha):
return sigmoid(getVoteCount(sourceDataItemSlice, value, extractors) + numpy.log(alpha / (1.0 - alpha)))
### Estimating V ###
# Equation 23, 24, and 25
def getProbabilityVdEqualsVGivenX(dataItemDomainSlice, relevantValue, sources, probabilityCwdvGivenXwdvSlice):
numerator = 0.0
denominator = 0.0
n = len(dataItemDomainSlice) - 1.0
if n == 0: # If there is only one value extracted for a given data item, then it MUST be the correct value for that data item (not necessarily true)
return 1.0
for value in dataItemDomainSlice:
for sourceIndex in range(len(sources)):
if value in probabilityCwdvGivenXwdvSlice[
sourceIndex]: # Assume that if a triple was never extracted for a particular source then the probability is zero
expVCV = numpy.exp(probabilityCwdvGivenXwdvSlice[sourceIndex][value] * numpy.log(
(n * sources[sourceIndex].accuracy) / (1.0 - sources[sourceIndex].accuracy)))
if value == relevantValue:
numerator += expVCV
denominator += expVCV
return numerator / denominator
### Estimating Accuracy ###
# Equation 28
def getAccuracyForSource(sourceSliceOfCwdv, probabilityVdEqualsVGivenX):
numerator = 0.0
denominator = 0.0
for dataItemIndex in range(len(sourceSliceOfCwdv)):
if sourceSliceOfCwdv[dataItemIndex]:
numerator += sourceSliceOfCwdv[dataItemIndex][1] * probabilityVdEqualsVGivenX[dataItemIndex][
sourceSliceOfCwdv[dataItemIndex][0]]
denominator += sourceSliceOfCwdv[dataItemIndex][1]
return numerator / denominator
### Estimating Precision and Recall ###
# Equation 29 and 30 (Change to 32 and 33 later)
def getPrecisionAndRecallForExtractor(extractorSliceOfValueCube, probabilityCwdvGivenXwdv, denominatorRecall):
numerator = 0.0
denominatorPrecision = 0.0
for sourceIndex in range(extractorSliceOfValueCube.shape[0]):
for dataItemIndex in range(extractorSliceOfValueCube.shape[1]):
if (extractorSliceOfValueCube[sourceIndex][dataItemIndex]):
if extractorSliceOfValueCube[sourceIndex][dataItemIndex] in probabilityCwdvGivenXwdv[sourceIndex][
dataItemIndex]: # This seems wrong, double check
numerator += probabilityCwdvGivenXwdv[sourceIndex][dataItemIndex][
extractorSliceOfValueCube[sourceIndex][dataItemIndex]]
denominatorPrecision += 1.0
return numerator / denominatorPrecision, numerator / denominatorRecall
### Calculating Alpha ###
def getAlpha(probabilityVdEqualsVGivenX, accuracy):
return probabilityVdEqualsVGivenX * accuracy + (1 - probabilityVdEqualsVGivenX) * (1 - accuracy)
### Multilayer Algorithm ###
# Values Cube is a 3D Matrix with Axis (Extactor, Source, Data Item) with Value as the entries
# sourceDataItemDomain stores a set of all extracted values by source and data item in 2D matrix
# dataItemDomain stores a set of all extracted values by data item in a 2D list
def multilayer(valuesCube, sourceDataItemDomain, dataItemDomain, maxIterations):
# Initialize all necessary data structures
extractors = numpy.array([Extractor() for _ in range(valuesCube.shape[0])],
dtype="object") # Stores all extractors in list by extractor index
sources = numpy.array([Source() for _ in range(valuesCube.shape[1])],
dtype="object") # Stores all sources in list by source index
probabilityCwdvGivenXwdv = numpy.array(
[[dict() for _ in range(valuesCube.shape[2])] for _ in range(valuesCube.shape[1])],
dtype="object") # 2D list by source and data item index holding a map from value to P(Cwdv|Xwdv)
argmaxProbabilityCwdvGivenXwdv = numpy.array([[None] * valuesCube.shape[2] for _ in range(valuesCube.shape[1])],
dtype="object") # 2D list by source and data item index holding (data item index, value, P(Cwdv|Xwdv)) tuple that is argmax P(Cwdv|Xwdv)
probabilityVdEqualsVGivenX = numpy.array([dict() for _ in range(valuesCube.shape[2])],
dtype="object") # 1D List by data item index holding a map from value to P(Vd = v|Xd)
alphas = numpy.array(
[[defaultdict(lambda: 0.5) for _ in range(valuesCube.shape[2])] for _ in range(valuesCube.shape[1])],
dtype="object") # 2D list by source and data item index holding map from value to alpha
# Begin Iterative Algorithm
for _ in range(maxIterations):
print("ITERATION")
# Estimate C
for sourceIndex in range(len(sourceDataItemDomain)):
for dataItemIndex in range(len(sourceDataItemDomain[sourceIndex])):
for value in sourceDataItemDomain[sourceIndex][dataItemIndex]:
tmp = getProbabilityCwdvGivenXwdv(valuesCube[:, sourceIndex, dataItemIndex], value, extractors,
alphas[sourceIndex][dataItemIndex][value])
probabilityCwdvGivenXwdv[sourceIndex][dataItemIndex][value] = tmp
if not argmaxProbabilityCwdvGivenXwdv[sourceIndex][dataItemIndex] or \
argmaxProbabilityCwdvGivenXwdv[sourceIndex][dataItemIndex][1] < tmp:
argmaxProbabilityCwdvGivenXwdv[sourceIndex][dataItemIndex] = (value, tmp)
# Estimate V
for dataItemIndex in range(len(dataItemDomain)):
for value in dataItemDomain[dataItemIndex]:
probabilityVdEqualsVGivenX[dataItemIndex][value] = getProbabilityVdEqualsVGivenX(
dataItemDomain[dataItemIndex], value, sources, probabilityCwdvGivenXwdv[:, dataItemIndex])
# Estimate Accuracy
for sourceIndex in range(len(sources)):
sources[sourceIndex].accuracy = getAccuracyForSource(argmaxProbabilityCwdvGivenXwdv[sourceIndex],
probabilityVdEqualsVGivenX)
# Estimate Precision and Recall
denominatorRecall = sum(
value for values in [values.values() for values in probabilityCwdvGivenXwdv.flatten()] for value in values)
for extractorIndex in range(len(extractors)):
extractors[extractorIndex].precision, extractors[extractorIndex].recall = getPrecisionAndRecallForExtractor(
valuesCube[extractorIndex], probabilityCwdvGivenXwdv, denominatorRecall)
extractors[extractorIndex].updatePresenceAndAbsenceVote()
# Check for Convergence
# Calculate Alpha
for sourceIndex in range(len(sourceDataItemDomain)):
for dataItemIndex in range(len(sourceDataItemDomain[sourceIndex])):
for value in sourceDataItemDomain[sourceIndex][dataItemIndex]:
alphas[sourceIndex][dataItemIndex][value] = getAlpha(
probabilityVdEqualsVGivenX[dataItemIndex][value], sources[sourceIndex].accuracy)
error_Checking(valuesCube, sources)
return sources, extractors
def main():
'''
if len(sys.argv) != 3:
print(f"Usage: python {sys.argv[0]} [filename]")
exit()
'''
with open("multilayerinput_small.json", 'r') as inputFile:
data = json.load(inputFile)
extractorToIndex = {}
sourceToIndex = {}
dataItemToIndex = {}
for extractor in data:
if extractor not in extractorToIndex:
extractorToIndex[extractor] = len(extractorToIndex)
for source in data[extractor]:
if source not in sourceToIndex:
sourceToIndex[source] = len(sourceToIndex)
for triple in data[extractor][source]:
dataItem = (triple[0], triple[1])
value = triple[2]
if dataItem not in dataItemToIndex:
dataItemToIndex[dataItem] = len(dataItemToIndex)
valueCube = numpy.empty((len(extractorToIndex), len(sourceToIndex), len(dataItemToIndex)), dtype="object")
sourceDataItemDomain = numpy.array(
[[set() for _ in range(len(dataItemToIndex))] for _ in range(len(sourceToIndex))], dtype="object")
dataItemDomain = numpy.array([set() for _ in range(len(dataItemToIndex))], dtype="object")
for extractor in data:
for source in data[extractor]:
for triple in data[extractor][source]:
dataItem = (triple[0], triple[1])
value = triple[2]
valueCube[extractorToIndex[extractor], sourceToIndex[source], dataItemToIndex[dataItem]] = value
sourceDataItemDomain[sourceToIndex[source], dataItemToIndex[dataItem]].add(value)
dataItemDomain[dataItemToIndex[dataItem]].add(value)
print(multilayer(valueCube, sourceDataItemDomain, dataItemDomain, 100))
##################
# Error Checking #
##################
def calc_SqV(pVdEqualsVGivenX, IVdequalsV, dataItemDomain):
# Calculates the difference between True (I) values and our (p) values
total = 0
for dataItemIndex in range(len(dataItemDomain)):
for value in dataItemDomain[dataItemIndex]:
diff = pVdEqualsVGivenX[dataItemIndex][value] - IVdequalsV[dataItemIndex][value]
total += pow(diff, 2)
return total
def calc_SqC(sourceDataItemDomain, pCwdvGivenXwdv, ICwdv):
total = 0
for sourceIndex in range(len(sourceDataItemDomain)):
for dataItemIndex in range(len(sourceDataItemDomain[sourceIndex])):
for value in sourceDataItemDomain[sourceIndex][dataItemIndex]:
diff = pCwdvGivenXwdv[sourceIndex][dataItemIndex][value] - ICwdv[sourceIndex][dataItemIndex][value]
total += pow(diff, 2)
return total
def calc_SqA(A_calc, A_actual):
total = 0
for sourceIndex in range(len(A_calc)):
diff = A_calc[sourceIndex].accuracy - A_actual[sourceIndex]
total += pow(diff, 2)
return total
# pVdEqualsVGivenX, dataItemDomain, sourceDataItemDomain, pCwdvGivenXwdv,
def error_Checking(valuesCube, A_calc):
# Optional argument number 2 is the validation dataset
# Accuracy Comparison
with open("sources.json", 'r') as inputFile:
data = json.load(inputFile)
return 0
A_real = numpy.array([0 for _ in range(valuesCube.shape[1])])
for s in data:
A_real[int(s)] = int(data[s]["KBT"])
SqA = calc_SqA(A_calc, A_real)
print("Squared Error of Source Accuracies: " + str(SqA))
# Extractor Comparison
with open("")
if __name__ == "__main__":
main()
```
#### File: OldCode/Object-oriented Version/DataClass.py
```python
class DataMatrix:
# Imagine having a 3 Dimensional Matrix for all of the data
Matrix3D = numpy.zeros(Extractors, Sources, DataItems)
#Needs to be accessible for sources and extractors --> Can iterate by i, j, or k
#Need modifing functions for while reading in JSON data
# Function or class that keeps track of extractor accuracies.
class Extractors:
#Idea is a 3d matrix with accuracies, precision, recall, Q
extractor_Accuracies = numpy(extractor_id_rows, accuracies_as_columns)
def New_Accuracy_Prediction(extractor_id, accuracy):
#creates a new columns for this iteration's accuracies
Matrix3D = numpy.zeros(Extractors, Sources, DataItems)
f = open("data.json", "r")
data = json.load(f)
for e in range(data.keys()):
for s in range(e.keys()):
for i in range(len(data[e][s])):
#data_pair = (data[s][e][i][0], (data[s][e][i][1], data[s][e][i][2]))
Matrix3D[e][s][i] = (0, 0)
```
#### File: OldCode/Object-oriented Version/multilayer (1).py
```python
Version/multilayer (1).py
import json
import numpy
from dataclasses import dataclass
from collections import defaultdict
### Default Values ###
gamma = 0.5
initialPrecision = 0.8
initialRecall = 0.8
initialAccuracy = 0.8
### Data Classes ###
@dataclass
class Extractor:
precision: float
recall: float
presenceVote: float
absenceVote: float
def __init__(self, precision = initialPrecision, recall = initialRecall):
self.precision = precision
self.recall = recall
self.updatePresenceAndAbsenceVote()
#Equation 7
def getSpecificity(self) -> float:
return (gamma/(1.0 - gamma)) * ((1.0 - self.precision)/self.precision) * self.recall
#Equation 12
def updatePresenceAndAbsenceVote(self) -> None:
specificity = self.getSpecificity()
self.presenceVote = numpy.log(self.recall) - numpy.log(specificity)
self.absenceVote = numpy.log(1.0 - self.recall) - numpy.log(1.0 - specificity)
@dataclass
class Source:
accuracy: float
def __init__(self, accuracy = initialAccuracy):
self.accuracy = accuracy
### Estimating C ###
def sigmoid(x):
return (1.0/(1.0 + numpy.exp(-x)))
#Equation 14 (Replace with 31 for Extractors with Confidence Values)
def getVoteCount(sourceDataItemSlice, value, extractors):
voteCount = 0
for extractorIndex in range(sourceDataItemSlice.size):
if sourceDataItemSlice[extractorIndex] == value:
voteCount += extractors[extractorIndex].presenceVote
else:
voteCount += extractors[extractorIndex].absenceVote
return voteCount
#Equation 15
def getProbabilityCwdvGivenXwdv(sourceDataItemSlice, value, extractors, alpha):
return sigmoid(getVoteCount(sourceDataItemSlice, value, extractors) + numpy.log(alpha/(1.0 - alpha)))
### Multilayer Algorithm ###
#Values Cube is a 3D Matrix with Axis (Extactor, Source, Data Item) with Value as the entries
def multilayer(valuesCube, maxIterations):
extractors = defaultdict(Extractor)
sources = defaultdict(Source)
pass
def getMatrixFromJSON(jsonData):
pass
if __name__ == "__main__":
multilayer(numpy.array([[["0", "0", "0", "0"],
["0", "1", "0", "1"]],
[["1", "1", "1", "1"],
["0", "0", "2", "2"]],
[["2", "2", "2", "2"],
["2", "0", "1", "2"]]]), 100)
#Read input data and call the model
#with open("data.json", "r") as f:
# data = f.read()
# Parsing the file
#json_data = json.loads(data)
```
#### File: OldCode/Object-oriented Version/multilayer.py
```python
import numpy as np
import math as m
import json
class DataPoint:
def __init__(self, E, W, D, V):
self.extractor = E
self.value = V
self.source = W
self.data_item = D
def calc_ewdv(self, e, w, d, v):
# Checks if the triple is was extracted - THIS NEEDS ADJUSTING -
if e in self.extractor and ((w, d, v) in e.triplets):
return 1
else:
return 0
class Extractor:
def __init__(self, data, R_e=0.8, Q_e=0.2):
# data being a set() of triplet tuples containing all extracted information by the extractor over the dataset
self.triplets = data
self.R_e = R_e
self.Q_e = Q_e
class InputData:
def __init__(self, data):
self.extractors = data['extractors']
self.data_pairs = data['data_items'] # List of (d, v)
self.sources = data['sources']
self.data_items = []
self.data_values = []
for d in self.data_pairs:
self.data_items.append(d[0])
self.data_values.append(d[1])
'''
Class which will act as the container for an instance of this truth-discovery algorithm. This will maintain all hyperparameters along with default starting values
'''
class Model:
def __init__(self, max_iter=100, A_w=0.5):
self.A_w = A_w
self.max_iter = max_iter
self.theta = np.array(2)
'''
Analog for Algorithm 1 MultiLayer(X, t_max) from Knowledge Based Trust paper
'''
def multilayer(self):
self.A = np.zeros(len(self.X))
self.A += self.A_w
# Test for if Z and theta converge, then return the value Loop - Convergence condition goes here
itr = 0
iteration_results = np.array()
while itr <= self.max_iter: # Change this to also check for convergence
# Intermediary steps
this_itr = np.array()
for extractor in range(len(Extractors)):
# Estimate C
C = self.calc_Cwdv(DP) # Percolate through other functions
self.A_w = self.iterate_ahat(data)
vote_count = self.calc_VCC_prime(data, DP)
# Estimate V
V = self.calc_Vd(data)
# Estimate thetas
theta1 = self.calc_A_w_hat(DP, data)
theta2 = np.array(self.calc_P_e_hat(data),
self.calc_R_e_hat(data))
# Save the results
self.A[extractor] = theta1
this_itr.append((theta1, theta2)) # Make this more organized?
iteration_results.append(this_itr)
'''
This block of functions are designated as those required to estiamte C
'''
def calc_Pre_e(self, e):
# Presence vote for a triple that it extracts (Eq. 12)
return m.log(e.R_e) - m.log(e.Q_e)
def calc_Abs_e(self, e):
# Absence vote for a triple that it does not extract (Eq. 13)
return m.log(1 - e.R_e) - m.log(1 - e.Q_e)
def calc_VCC(self, DP):
# VCC calculates the for vote count (Eq. 14)
# For each extractor, count the sum of presence and absence votes
Pre_e, Abs_e = 0
for e in DP.extractor:
if DP.calc_ewdv(e, DP.source, DP.data_item, DP.value) == 1:
Pre_e += self.calc_Pre_e(e)
elif DP.calc_ewdv(e, DP.source, DP.data_item, DP.value):
Abs_e += self.calc_Abs_e(e)
return Pre_e + Abs_e
# Modeled after equation 15 in the paper -- Returns P[C_{wdv} = 1 | X_{wdv}]
def calc_Cwdv(self, DP):
# Probability that w provides (d, v) given whether it was extracted
result = calc_VCC(DP) + m.log((self.A_w) / (1 - self.A_w)) #Check if its supposed to be alpha
result = 1 / (1 + (m.e ** -(result)))
return result
INCORRENT - NEEDS ARGMAX ON PG 942
# Modeled after equation 26 in the paper -- Returns \hat{a}^{t+1}
def iterate_ahat(self, X):
# Recalculates our assumption that a = p(Cwdv = 1)
return self.calc_Vd(X) * self.A_w + (1 - self.calc_Vd(X))*(1 - self.A_w)
# Modeled after equation 31 in the paper -- Returns VCC'(w, d, v)
def calc_VCC_prime(self, X, DP):
# Accounts for extraction confidence values
result = 0
for e in X.extractors:
if DP.calc_ewdv(e, DP.source, DP.data_item, DP.value):
result += self.calc_Pre_e(e)
elif DP.calc_ewdv(e, DP.source, DP.data_item, DP.value):
result += self.calc_Abs_e(e)
return result
'''
This block of functions is designated as those required to estimate V
'''
# Modeled after equation 23 in the paper -- Returns VCV'(w, d, v)
def calc_VCV_prime(self, DP):
# Calculates the vote count for an extractor using weights now
n = len(self.A)
result = self.calc_Cwdv(DP) * m.log((n * self.A_w) / (1 - self.A_w))
return result
# Modeled after equation 24 in the paper -- Returns VCV'(d, v)
def calc_VCV_prime_general(self, X, DP):
# Sum of all vote counts for each source from the extractor with weights
result = 0
for w in X.sources:
DP.source = w
result += self.calc_VCV_prime(DP)
return result
# Modeled after equation 25 in the paper -- Returns P[V_d = v | X_d]
def calc_Vd(self, X):
# Probability that the extracted value is the true value.
denom = 0
for v in X.true_v:
DP = DataPoint(None, None, X.data_item, v)
denom += m.e ** (self.calc_VCV_prime_general(X, DP))
return (m.e ** (self.calc_VCV_prime_general(X, DataPoint(None, None, X.data_item, X.value))) / denom)
'''
This block of functions is designed as those required to estimate Theta
'''
''' Theta 1 '''
# Modeled after equation 28 in the paper -- Returns \hat{A}_w^{t+1}
def calc_A_w_hat(self, DP, X):
# Calculating the accuracy of the web source
num_val = 0
den_val = 0
for dataPoint in X.getData():
if self.calc_Cwdv(dataPoint) > 1:
num_val += self.calc_Cwdv(dataPoint) * self.calc_Vd(X)
den_val += self.calc_Cwdv(dataPoint)
return num_val / den_val
''' Theta 2 '''
# Modeled after equation 32 in the paper -- Returns \hat{P}_e
def calc_P_e_hat(self, X):
# Calculating the precision of the extractor
num_val = 0
den_val = 0
for dataPoint in X.getData():
if dataPoint.calc_Xewdv() > 0:
num_val += dataPoint.calc_Xewdv() * self.calc_Cwdv(dataPoint)
den_val += dataPoint.calc_Xewdv()
# Modeled after equation 33 in the paper -- Returns \hat{R}_e
def calc_R_e_hat(self, X):
# Calculating the recall of the extractor
num_val, den_val = 0
for dataPoint in X.getData():
if dataPoint.calc_Xewdv() > 0:
num_val += dataPoint.calc_Xewdv() * self.calc_Cwdv(dataPoint)
den_val += self.calc_Cwdv(dataPoint)
return num_val / den_val
if __name__ == "__main__":
# Read input data and call the model
with open("data.json", "r") as f:
data = f.read()
# Parsing the file
json_data = json.loads(data)
```
|
{
"source": "jgrieger1/pubnubsub-handler",
"score": 3
}
|
#### File: jgrieger1/pubnubsub-handler/pubnubsubhandler.py
```python
import json
import threading
import logging
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub
from pubnub.enums import PNOperationType, PNStatusCategory, \
PNReconnectionPolicy
from pubnub.callbacks import SubscribeCallback
SUBSCRIPTIONS = {}
CHANNELS = []
_LOGGER = logging.getLogger(__name__)
class PubNubSubscriptionHandler():
"""
Connection handler for PubNub Subscriptions.
"""
def __init__(self, sub_key, keep_alive_function=None, keep_alive=3600,
sub_delay=1):
"""
Create the PubNub connection object.
Args:
sub_key (str): The PubNub Subscription key to use.
keep_alive_function (func, optional): If provided will be run
every keep_alive seconds. Use if something needs run to
keep your PubNub updates flowing. Defaults to None.
Example: For Wink subscriptions the Wink API needs polled
occasionally to keep updates flowing from PubNub.
keep_alive (int, optional): How often to run the keep_alive_function
in seconds. Defaults to 3600 (1 hour)
sub_delay (int, optional): How long to delay the call to subscribe.
Defaults to 1 second. (No delay)
"""
self._sub_key = sub_key
self._pnconfig = PNConfiguration()
self._pnconfig.reconnect_policy = PNReconnectionPolicy.EXPONENTIAL
self._pnconfig.subscribe_key = sub_key
self._pnconfig.ssl = True
self._pubnub = PubNub(self._pnconfig)
self._listener = PubNubSubCallback()
self._pubnub.add_listener(self._listener)
self._keep_alive_function = keep_alive_function
self._keep_alive = keep_alive
self._subscribed = False
self._subscription_delay = sub_delay
def add_subscription(self, channel, callback_function):
"""
Add a channel to subscribe to and a callback function to
run when the channel receives an update.
If channel already exists, create a new "subscription"
and append another callback function.
Args:
channel (str): The channel to add a subscription too.
callback_function (func): The function to run on an
update to the passed in channel.
"""
if channel not in CHANNELS:
CHANNELS.append(channel)
SUBSCRIPTIONS[channel] = [callback_function]
else:
SUBSCRIPTIONS[channel].append(callback_function)
# If a channel gets added after subscription has already been called
# call subscribe on the individual channel, here.
if self._subscribed:
_LOGGER.info("New channel added after main subscribe call.")
self._pubnub.subscribe().channels(channel).execute()
def subscribe(self):
"""
Call the real subscribe method in self._subscription_delay seconds.
This give the calling program more time to add devices to keep the
TCP connections to a minimum.
"""
threading.Timer(self._subscription_delay, self._subscribe).start()
def _run_keep_alive(self):
"""
Start a new thread timer to keep the keep_alive_function running
every keep_alive seconds.
"""
threading.Timer(self._keep_alive, self._run_keep_alive).start()
_LOGGER.info("Polling the API")
# This may or may not return something
self._keep_alive_function()
def unsubscribe(self):
"""
Completly stop all pubnub operations.
"""
_LOGGER.info("PubNub unsubscribing")
self._pubnub.unsubscribe_all()
self._pubnub.stop()
self._pubnub = None
def _subscribe(self):
"""
Start the subscription to the channel list.
If self._keep_alive_function isn't None start timer thread to
run self._keep_alive_function every self._keep_alive amount of seconds.
"""
_LOGGER.info("PubNub subscribing")
self._pubnub.subscribe().channels(CHANNELS).execute()
if self._keep_alive_function is not None:
threading.Timer(self._keep_alive, self._run_keep_alive).start()
self._subscribed = True
class PubNubSubCallback(SubscribeCallback):
"""
PubNub Callback handler.
"""
def status(self, pubnub, status):
"""
Things to do on different status updates.
"""
if status.operation == PNOperationType.PNSubscribeOperation \
or status.operation == PNOperationType.PNUnsubscribeOperation:
if status.category == PNStatusCategory.PNConnectedCategory:
# This is expected for a subscribe, this means there is no error or issue whatsoever
_LOGGER.info("PubNub connected")
elif status.category == PNStatusCategory.PNReconnectedCategory:
# This usually occurs if subscribe temporarily fails but reconnects. This means
# there was an error but there is no longer any issue
_LOGGER.info("PubNub reconnected")
elif status.category == PNStatusCategory.PNDisconnectedCategory:
# This is the expected category for an unsubscribe. This means there
# was no error in unsubscribing from everything
_LOGGER.info("PubNub unsubscribed")
elif status.category == PNStatusCategory.PNUnexpectedDisconnectCategory:
# This is usually an issue with the internet connection, this is an error, handle appropriately
# retry will be called automatically
_LOGGER.info("PubNub disconnected (lost internet?)")
else:
# This is usually an issue with the internet connection, this is an error, handle appropriately
# retry will be called automatically
_LOGGER.info("PubNub disconnected (lost internet?)")
elif status.operation == PNOperationType.PNHeartbeatOperation:
# Heartbeat operations can in fact have errors, so it is important to check first for an error.
# For more information on how to configure heartbeat notifications through the status
# PNObjectEventListener callback, consult <link to the PNCONFIGURATION heartbeart config>
if status.is_error():
# There was an error with the heartbeat operation, handle here
_LOGGER.info("PubNub failed heartbeat")
else:
# Heartbeat operation was successful
_LOGGER.info("PubNub heartbeat")
else:
pass
def presence(self, pubnub, presence):
"""
Don't currently support anything presence operations
"""
return
def message(self, pubnub, message):
"""
Called when a new message is recevied on one of the subscribed
to channels.
Proccess the message and call the channels callback function(s).
"""
try:
json_data = json.dumps(message.message.get('data'))
except AttributeError:
json_data = message.message
for func in SUBSCRIPTIONS[message.channel]:
# This means pubnub couldn't get the current state of the channel
# The pull_url is the location to pull the current state from.
# Returning None here to have the calling program handle this.
if 'pull_url' in json_data:
func(None)
else:
func(json.loads(json_data))
```
|
{
"source": "jgriesfeller/pyaerocom-pycharm-scratches",
"score": 2
}
|
#### File: jgriesfeller/pyaerocom-pycharm-scratches/EBASReadingScratch.py
```python
def main():
import matplotlib.pyplot as plt
plt.close("all")
from pyaerocom import const
from pyaerocom.io.read_aeronet_sunv3 import ReadAeronetSunV3
import pyaerocom.io as pio
import pyaerocom as pya
# pya.change_verbosity(new_level='warning', log=pya.logger)
# reader = ReadAeronetSunV3(const.AERONET_SUN_V3L2_AOD_ALL_POINTS_NAME)
# od = reader.read_file("/home/jang/MyPyaerocom/testdata-minimal/obsdata/AeronetSunV3Lev2.0.AP/renamed/19930101_20211120_Zvenigorod.lev20")
# print(od)
DATA_ID = const.EBAS_MULTICOLUMN_NAME
reader = pio.ReadUngridded(DATA_ID)
# data = reader.read(vars_to_retrieve="concCocpm25")
data = reader.read(vars_to_retrieve="concso4")
# data = reader.read(vars_to_retrieve="SO4ugSm3")
print(data)
# ETOPO1_Ice_g_gmt4.grd
if __name__ == "__main__":
main()
```
|
{
"source": "j-griffith/cinder",
"score": 3
}
|
#### File: api/validation/parameter_types.py
```python
import copy
import re
import unicodedata
import six
def _is_printable(char):
"""determine if a unicode code point is printable.
This checks if the character is either "other" (mostly control
codes), or a non-horizontal space. All characters that don't match
those criteria are considered printable; that is: letters;
combining marks; numbers; punctuation; symbols; (horizontal) space
separators.
"""
category = unicodedata.category(char)
return (not category.startswith("C") and
(not category.startswith("Z") or category == "Zs"))
def _get_all_chars():
for i in range(0xFFFF):
yield six.unichr(i)
# build a regex that matches all printable characters. This allows
# spaces in the middle of the name. Also note that the regexp below
# deliberately allows the empty string. This is so only the constraint
# which enforces a minimum length for the name is triggered when an
# empty string is tested. Otherwise it is not deterministic which
# constraint fails and this causes issues for some unittests when
# PYTHONHASHSEED is set randomly.
def _build_regex_range(ws=True, invert=False, exclude=None):
"""Build a range regex for a set of characters in utf8.
This builds a valid range regex for characters in utf8 by
iterating the entire space and building up a set of x-y ranges for
all the characters we find which are valid.
:param ws: should we include whitespace in this range.
:param exclude: any characters we want to exclude
:param invert: invert the logic
The inversion is useful when we want to generate a set of ranges
which is everything that's not a certain class. For instance,
produce all all the non printable characters as a set of ranges.
"""
if exclude is None:
exclude = []
regex = ""
# are we currently in a range
in_range = False
# last character we found, for closing ranges
last = None
# last character we added to the regex, this lets us know that we
# already have B in the range, which means we don't need to close
# it out with B-B. While the later seems to work, it's kind of bad form.
last_added = None
def valid_char(char):
if char in exclude:
result = False
elif ws:
result = _is_printable(char)
else:
# Zs is the unicode class for space characters, of which
# there are about 10 in this range.
result = (_is_printable(char) and
unicodedata.category(char) != "Zs")
if invert is True:
return not result
return result
# iterate through the entire character range. in_
for c in _get_all_chars():
if valid_char(c):
if not in_range:
regex += re.escape(c)
last_added = c
in_range = True
else:
if in_range and last != last_added:
regex += "-" + re.escape(last)
in_range = False
last = c
else:
if in_range:
regex += "-" + re.escape(c)
return regex
valid_description_regex_base = '^[%s]*$'
valid_description_regex = valid_description_regex_base % (
_build_regex_range())
name = {
'type': 'string', 'minLength': 1, 'maxLength': 255,
'format': 'name'
}
description = {
'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255,
'pattern': valid_description_regex,
}
boolean = {
'type': ['boolean', 'string'],
'enum': [True, 'True', 'TRUE', 'true', '1', 'ON', 'On', 'on',
'YES', 'Yes', 'yes', 'y', 't',
False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off', 'off',
'NO', 'No', 'no', 'n', 'f'],
}
uuid = {
'type': 'string', 'format': 'uuid'
}
extra_specs = {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9-_:. ]{1,255}$': {
'type': 'string', 'maxLength': 255
}
},
'additionalProperties': False
}
extra_specs_with_no_spaces_key = {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9-_:.]{1,255}$': {
'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255
}
},
'additionalProperties': False
}
group_snapshot_status = {
'type': 'string', 'format': 'group_snapshot_status'
}
extra_specs_with_null = copy.deepcopy(extra_specs)
extra_specs_with_null['patternProperties'][
'^[a-zA-Z0-9-_:. ]{1,255}$']['type'] = ['string', 'null']
name_allow_zero_min_length = {
'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255
}
uuid_allow_null = {
'type': ['string', 'null']
}
metadata_allows_null = copy.deepcopy(extra_specs)
metadata_allows_null['type'] = ['object', 'null']
container = {
'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255}
backup_url = {'type': 'string', 'minLength': 1, 'format': 'base64'}
backup_service = {'type': 'string', 'minLength': 0, 'maxLength': 255}
nullable_string = {
'type': ('string', 'null'), 'minLength': 0, 'maxLength': 255
}
```
#### File: dell_emc/unity/adapter.py
```python
import contextlib
import copy
import functools
import os
import random
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from cinder import exception
from cinder.i18n import _
from cinder import utils as cinder_utils
from cinder.volume.drivers.dell_emc.unity import client
from cinder.volume.drivers.dell_emc.unity import utils
from cinder.volume import utils as vol_utils
storops = importutils.try_import('storops')
if storops:
from storops import exception as storops_ex
else:
# Set storops_ex to be None for unit test
storops_ex = None
LOG = logging.getLogger(__name__)
PROTOCOL_FC = 'FC'
PROTOCOL_ISCSI = 'iSCSI'
class VolumeParams(object):
def __init__(self, adapter, volume):
self._adapter = adapter
self._volume = volume
self._volume_id = volume.id
self._name = volume.name
self._size = volume.size
self._description = (volume.display_description
if volume.display_description
else volume.display_name)
self._pool = None
self._io_limit_policy = None
@property
def volume_id(self):
return self._volume_id
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def size(self):
return self._size
@size.setter
def size(self, value):
self._size = value
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@property
def pool(self):
if self._pool is None:
self._pool = self._adapter._get_target_pool(self._volume)
return self._pool
@pool.setter
def pool(self, value):
self._pool = value
@property
def io_limit_policy(self):
if self._io_limit_policy is None:
qos_specs = utils.get_backend_qos_specs(self._volume)
self._io_limit_policy = self._adapter.client.get_io_limit_policy(
qos_specs)
return self._io_limit_policy
@io_limit_policy.setter
def io_limit_policy(self, value):
self._io_limit_policy = value
def __eq__(self, other):
return (self.volume_id == other.volume_id and
self.name == other.name and
self.size == other.size and
self.io_limit_policy == other.io_limit_policy)
class CommonAdapter(object):
protocol = 'unknown'
driver_name = 'UnityAbstractDriver'
driver_volume_type = 'unknown'
def __init__(self, version=None):
self.version = version
self.driver = None
self.config = None
self.configured_pool_names = None
self.reserved_percentage = None
self.max_over_subscription_ratio = None
self.volume_backend_name = None
self.ip = None
self.username = None
self.password = <PASSWORD>
self.array_cert_verify = None
self.array_ca_cert_path = None
self._serial_number = None
self.storage_pools_map = None
self._client = None
self.allowed_ports = None
def do_setup(self, driver, conf):
self.driver = driver
self.config = self.normalize_config(conf)
self.configured_pool_names = self.config.unity_storage_pool_names
self.reserved_percentage = self.config.reserved_percentage
self.max_over_subscription_ratio = (
self.config.max_over_subscription_ratio)
self.volume_backend_name = (
self.config.safe_get('volume_backend_name') or self.driver_name)
self.ip = self.config.san_ip
self.username = self.config.san_login
self.password = self.config.san_password
# Allow for customized CA
self.array_cert_verify = self.config.driver_ssl_cert_verify
self.array_ca_cert_path = self.config.driver_ssl_cert_path
sys_version = self.client.system.system_version
if utils.is_before_4_1(sys_version):
raise exception.VolumeBackendAPIException(
data=_('Unity driver does not support array OE version: %s. '
'Upgrade to 4.1 or later.') % sys_version)
self.storage_pools_map = self.get_managed_pools()
self.allowed_ports = self.validate_ports(self.config.unity_io_ports)
group_name = (self.config.config_group if self.config.config_group
else 'DEFAULT')
folder_name = '%(group)s.%(sys_name)s' % {
'group': group_name, 'sys_name': self.client.system.info.name}
persist_path = os.path.join(cfg.CONF.state_path, 'unity', folder_name)
storops.TCHelper.set_up(persist_path)
def normalize_config(self, config):
config.unity_storage_pool_names = utils.remove_empty(
'%s.unity_storage_pool_names' % config.config_group,
config.unity_storage_pool_names)
config.unity_io_ports = utils.remove_empty(
'%s.unity_io_ports' % config.config_group,
config.unity_io_ports)
return config
def get_all_ports(self):
raise NotImplementedError()
def validate_ports(self, ports_whitelist):
all_ports = self.get_all_ports()
# After normalize_config, `ports_whitelist` could be only None or valid
# list in which the items are stripped.
if ports_whitelist is None:
return all_ports.id
# For iSCSI port, the format is 'spa_eth0', and 'spa_iom_0_fc0' for FC.
# Unix style glob like 'spa_*' is supported.
whitelist = set(ports_whitelist)
matched, _ignored, unmatched_whitelist = utils.match_any(all_ports.id,
whitelist)
if not matched:
LOG.error('No matched ports filtered by all patterns: %s',
whitelist)
raise exception.InvalidConfigurationValue(
option='%s.unity_io_ports' % self.config.config_group,
value=self.config.unity_io_ports)
if unmatched_whitelist:
LOG.error('No matched ports filtered by below patterns: %s',
unmatched_whitelist)
raise exception.InvalidConfigurationValue(
option='%s.unity_io_ports' % self.config.config_group,
value=self.config.unity_io_ports)
LOG.info('These ports %(matched)s will be used based on '
'the option unity_io_ports: %(config)s',
{'matched': matched,
'config': self.config.unity_io_ports})
return matched
@property
def verify_cert(self):
verify_cert = self.array_cert_verify
if verify_cert and self.array_ca_cert_path is not None:
verify_cert = self.array_ca_cert_path
return verify_cert
@property
def client(self):
if self._client is None:
self._client = client.UnityClient(
self.ip,
self.username,
self.password,
verify_cert=self.verify_cert)
return self._client
@property
def serial_number(self):
if self._serial_number is None:
self._serial_number = self.client.get_serial()
return self._serial_number
def get_managed_pools(self):
names = self.configured_pool_names
array_pools = self.client.get_pools()
valid_names = utils.validate_pool_names(names, array_pools.name)
return {p.name: p for p in array_pools if p.name in valid_names}
def makeup_model(self, lun, is_snap_lun=False):
lun_type = 'snap_lun' if is_snap_lun else 'lun'
location = self._build_provider_location(lun_id=lun.get_id(),
lun_type=lun_type)
return {
'provider_location': location,
'provider_id': lun.get_id()
}
def create_volume(self, volume):
"""Creates a volume.
:param volume: volume information
"""
params = VolumeParams(self, volume)
log_params = {
'name': params.name,
'size': params.size,
'description': params.description,
'pool': params.pool,
'io_limit_policy': params.io_limit_policy}
LOG.info('Create Volume: %(name)s, size: %(size)s, description: '
'%(description)s, pool: %(pool)s, io limit policy: '
'%(io_limit_policy)s.', log_params)
return self.makeup_model(
self.client.create_lun(name=params.name,
size=params.size,
pool=params.pool,
description=params.description,
io_limit_policy=params.io_limit_policy))
def delete_volume(self, volume):
lun_id = self.get_lun_id(volume)
if lun_id is None:
LOG.info('Backend LUN not found, skipping the deletion. '
'Volume: %(volume_name)s.',
{'volume_name': volume.name})
else:
self.client.delete_lun(lun_id)
def _initialize_connection(self, lun_or_snap, connector, vol_id):
host = self.client.create_host(connector['host'])
self.client.update_host_initiators(
host, self.get_connector_uids(connector))
hlu = self.client.attach(host, lun_or_snap)
data = self.get_connection_info(hlu, host, connector)
data['target_discovered'] = True
if vol_id is not None:
data['volume_id'] = vol_id
conn_info = {
'driver_volume_type': self.driver_volume_type,
'data': data,
}
return conn_info
@cinder_utils.trace
def initialize_connection(self, volume, connector):
lun = self.client.get_lun(lun_id=self.get_lun_id(volume))
return self._initialize_connection(lun, connector, volume.id)
def _terminate_connection(self, lun_or_snap, connector):
is_force_detach = connector is None
if is_force_detach:
self.client.detach_all(lun_or_snap)
else:
host = self.client.create_host(connector['host'])
self.client.detach(host, lun_or_snap)
@cinder_utils.trace
def terminate_connection(self, volume, connector):
lun = self.client.get_lun(lun_id=self.get_lun_id(volume))
return self._terminate_connection(lun, connector)
def get_connector_uids(self, connector):
return None
def get_connection_info(self, hlu, host, connector):
return {}
def extend_volume(self, volume, new_size):
lun_id = self.get_lun_id(volume)
if lun_id is None:
msg = (_('Backend LUN not found for Volume: %(volume_name)s.') %
{'volume_name': volume.name})
raise exception.VolumeBackendAPIException(data=msg)
else:
self.client.extend_lun(lun_id, new_size)
def _get_target_pool(self, volume):
return self.storage_pools_map[utils.get_pool_name(volume)]
def _build_provider_location(self, lun_id=None, lun_type=None):
return utils.build_provider_location(
system=self.serial_number,
lun_type=lun_type,
lun_id=lun_id,
version=self.version)
def update_volume_stats(self):
return {
'volume_backend_name': self.volume_backend_name,
'storage_protocol': self.protocol,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'pools': self.get_pools_stats(),
}
def get_pools_stats(self):
self.storage_pools_map = self.get_managed_pools()
return [self._get_pool_stats(pool) for pool in self.pools]
@property
def pools(self):
return self.storage_pools_map.values()
def _get_pool_stats(self, pool):
return {
'pool_name': pool.name,
'total_capacity_gb': utils.byte_to_gib(pool.size_total),
'provisioned_capacity_gb': utils.byte_to_gib(
pool.size_subscribed),
'free_capacity_gb': utils.byte_to_gib(pool.size_free),
'reserved_percentage': self.reserved_percentage,
'location_info': ('%(pool_name)s|%(array_serial)s' %
{'pool_name': pool.name,
'array_serial': self.serial_number}),
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'max_over_subscription_ratio': (
self.max_over_subscription_ratio)}
def get_lun_id(self, volume):
"""Retrieves id of the volume's backing LUN.
:param volume: volume information
"""
if volume.provider_location:
return utils.extract_provider_location(volume.provider_location,
'id')
else:
# In some cases, cinder will not update volume info in DB with
# provider_location returned by us. We need to retrieve the id
# from array.
lun = self.client.get_lun(name=volume.name)
return lun.get_id() if lun is not None else None
def create_snapshot(self, snapshot):
"""Creates a snapshot.
:param snapshot: snapshot information.
"""
src_lun_id = self.get_lun_id(snapshot.volume)
snap = self.client.create_snap(src_lun_id, snapshot.name)
location = self._build_provider_location(lun_type='snapshot',
lun_id=snap.get_id())
return {'provider_location': location,
'provider_id': snap.get_id()}
def delete_snapshot(self, snapshot):
"""Deletes a snapshot.
:param snapshot: the snapshot to delete.
"""
snap = self.client.get_snap(name=snapshot.name)
self.client.delete_snap(snap)
def _get_referenced_lun(self, existing_ref):
if 'source-id' in existing_ref:
lun = self.client.get_lun(lun_id=existing_ref['source-id'])
elif 'source-name' in existing_ref:
lun = self.client.get_lun(name=existing_ref['source-name'])
else:
reason = _('Reference must contain source-id or source-name key.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
if lun is None or not lun.existed:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=_("LUN doesn't exist."))
return lun
def manage_existing(self, volume, existing_ref):
"""Manages an existing LUN in the array.
The LUN should be in a manageable pool backend, otherwise return error.
Rename the backend storage object so that it matches the
`volume['name']` which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
LUN ID or name are supported in `existing_ref`, like:
.. code-block:: none
existing_ref:{
'source-id':<LUN id in Unity>
}
or
.. code-block:: none
existing_ref:{
'source-name':<LUN name in Unity>
}
"""
lun = self._get_referenced_lun(existing_ref)
lun.modify(name=volume.name)
return {
'provider_location':
self._build_provider_location(lun_id=lun.get_id(),
lun_type='lun'),
'provider_id': lun.get_id()
}
def manage_existing_get_size(self, volume, existing_ref):
"""Returns size of volume to be managed by `manage_existing`.
The driver does some check here:
1. The LUN `existing_ref` should be managed by the `volume.host`.
"""
lun = self._get_referenced_lun(existing_ref)
target_pool_name = utils.get_pool_name(volume)
lun_pool_name = lun.pool.name
if target_pool_name and lun_pool_name != target_pool_name:
reason = (_('The imported LUN is in pool %(pool_name)s '
'which is not managed by the host %(host)s.') %
{'pool_name': lun_pool_name,
'host': volume.host})
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
return utils.byte_to_gib(lun.size_total)
def _disconnect_device(self, conn):
conn['connector'].disconnect_volume(conn['conn']['data'],
conn['device'])
def _connect_device(self, conn):
return self.driver._connect_device(conn)
@contextlib.contextmanager
def _connect_resource(self, lun_or_snap, connector, res_id):
"""Connects to LUN or snapshot, and makes sure disconnect finally.
:param lun_or_snap: the LUN or snapshot to connect/disconnect.
:param connector: the host connector information.
:param res_id: the ID of the LUN or snapshot.
:return: the connection information, in a dict with format
like (same as the one returned by `_connect_device`):
{
'conn': <info returned by `initialize_connection`>,
'device': <value returned by `connect_volume`>,
'connector': <host connector info>
}
"""
init_conn_func = functools.partial(self._initialize_connection,
lun_or_snap, connector, res_id)
term_conn_func = functools.partial(self._terminate_connection,
lun_or_snap, connector)
with utils.assure_cleanup(init_conn_func, term_conn_func,
False) as conn_info:
conn_device_func = functools.partial(self._connect_device,
conn_info)
with utils.assure_cleanup(conn_device_func,
self._disconnect_device,
True) as attach_info:
yield attach_info
def _dd_copy(self, vol_params, src_snap, src_lun=None):
"""Creates a volume via copying a Unity snapshot.
It attaches the `volume` and `snap`, then use `dd` to copy the
data from the Unity snapshot to the `volume`.
"""
dest_lun = self.client.create_lun(
name=vol_params.name, size=vol_params.size, pool=vol_params.pool,
description=vol_params.description,
io_limit_policy=vol_params.io_limit_policy)
src_id = src_snap.get_id()
try:
conn_props = cinder_utils.brick_get_connector_properties()
with self._connect_resource(dest_lun, conn_props,
vol_params.volume_id) as dest_info, \
self._connect_resource(src_snap, conn_props,
src_id) as src_info:
if src_lun is None:
# If size is not specified, need to get the size from LUN
# of snapshot.
lun = self.client.get_lun(
lun_id=src_snap.storage_resource.get_id())
size_in_m = utils.byte_to_mib(lun.size_total)
else:
size_in_m = utils.byte_to_mib(src_lun.size_total)
vol_utils.copy_volume(
src_info['device']['path'],
dest_info['device']['path'],
size_in_m,
self.driver.configuration.volume_dd_blocksize,
sparse=True)
except Exception:
with excutils.save_and_reraise_exception():
utils.ignore_exception(self.client.delete_lun,
dest_lun.get_id())
LOG.error('Failed to create cloned volume: %(vol_id)s, '
'from source unity snapshot: %(snap_name)s.',
{'vol_id': vol_params.volume_id,
'snap_name': src_snap.name})
return dest_lun
def _thin_clone(self, vol_params, src_snap, src_lun=None):
tc_src = src_snap if src_lun is None else src_lun
try:
LOG.debug('Try to thin clone from %s.', tc_src.name)
lun = self.client.thin_clone(
tc_src, vol_params.name,
description=vol_params.description,
io_limit_policy=vol_params.io_limit_policy,
new_size_gb=vol_params.size)
except storops_ex.UnityThinCloneLimitExceededError:
LOG.info('Number of thin clones of base LUN exceeds system '
'limit, dd-copy a new one and thin clone from it.')
# Copy via dd if thin clone meets the system limit
hidden = copy.copy(vol_params)
hidden.name = 'hidden-%s' % vol_params.name
hidden.description = 'hidden-%s' % vol_params.description
copied_lun = self._dd_copy(hidden, src_snap, src_lun=src_lun)
LOG.debug('Notify storops the dd action of lun: %(src_name)s. And '
'the newly copied lun is: %(copied)s.',
{'src_name': tc_src.name, 'copied': copied_lun.name})
storops.TCHelper.notify(tc_src,
storops.ThinCloneActionEnum.DD_COPY,
copied_lun)
lun = self.client.thin_clone(
copied_lun, vol_params.name,
description=vol_params.description,
io_limit_policy=vol_params.io_limit_policy,
new_size_gb=vol_params.size)
except storops_ex.SystemAPINotSupported:
# Thin clone not support on array version before Merlin
lun = self._dd_copy(vol_params, src_snap, src_lun=src_lun)
LOG.debug(
'Volume copied via dd because array OE is too old to support '
'thin clone api. source snap: %(src_snap)s, lun: %(src_lun)s.',
{'src_snap': src_snap.name,
'src_lun': 'Unknown' if src_lun is None else src_lun.name})
return lun
def create_volume_from_snapshot(self, volume, snapshot):
snap = self.client.get_snap(snapshot.name)
return self.makeup_model(
self._thin_clone(VolumeParams(self, volume), snap),
is_snap_lun=True)
def create_cloned_volume(self, volume, src_vref):
"""Creates cloned volume.
1. Take an internal snapshot of source volume, and attach it.
2. Thin clone from the snapshot to a new volume.
Note: there are several cases the thin clone will downgrade to `dd`,
2.1 Source volume is attached (in-use).
2.2 Array OE version doesn't support thin clone.
2.3 The current LUN family reaches the thin clone limits.
3. Delete the internal snapshot created in step 1.
"""
src_lun_id = self.get_lun_id(src_vref)
if src_lun_id is None:
raise exception.VolumeBackendAPIException(
data=_(
"LUN ID of source volume: %s not found.") % src_vref.name)
src_lun = self.client.get_lun(lun_id=src_lun_id)
src_snap_name = 'snap_clone_%s' % volume.id
create_snap_func = functools.partial(self.client.create_snap,
src_lun_id, src_snap_name)
vol_params = VolumeParams(self, volume)
with utils.assure_cleanup(create_snap_func,
self.client.delete_snap,
True) as src_snap:
LOG.debug('Internal snapshot for clone is created, '
'name: %(name)s, id: %(id)s.',
{'name': src_snap_name,
'id': src_snap.get_id()})
if src_vref.volume_attachment:
lun = self._dd_copy(vol_params, src_snap, src_lun=src_lun)
LOG.debug('Volume copied using dd because source volume: '
'%(name)s is attached: %(attach)s.',
{'name': src_vref.name,
'attach': src_vref.volume_attachment})
return self.makeup_model(lun)
else:
lun = self._thin_clone(vol_params, src_snap, src_lun=src_lun)
return self.makeup_model(lun, is_snap_lun=True)
def get_pool_name(self, volume):
return self.client.get_pool_name(volume.name)
@cinder_utils.trace
def initialize_connection_snapshot(self, snapshot, connector):
snap = self.client.get_snap(snapshot.name)
return self._initialize_connection(snap, connector, snapshot.id)
@cinder_utils.trace
def terminate_connection_snapshot(self, snapshot, connector):
snap = self.client.get_snap(snapshot.name)
return self._terminate_connection(snap, connector)
@cinder_utils.trace
def restore_snapshot(self, volume, snapshot):
return self.client.restore_snapshot(snapshot.name)
class ISCSIAdapter(CommonAdapter):
protocol = PROTOCOL_ISCSI
driver_name = 'UnityISCSIDriver'
driver_volume_type = 'iscsi'
def get_all_ports(self):
return self.client.get_ethernet_ports()
def get_connector_uids(self, connector):
return utils.extract_iscsi_uids(connector)
def get_connection_info(self, hlu, host, connector):
targets = self.client.get_iscsi_target_info(self.allowed_ports)
if not targets:
msg = _("There is no accessible iSCSI targets on the system.")
raise exception.VolumeBackendAPIException(data=msg)
one_target = random.choice(targets)
portals = [a['portal'] for a in targets]
iqns = [a['iqn'] for a in targets]
data = {
'target_luns': [hlu] * len(portals),
'target_iqns': iqns,
'target_portals': portals,
'target_lun': hlu,
'target_portal': one_target['portal'],
'target_iqn': one_target['iqn'],
}
return data
class FCAdapter(CommonAdapter):
protocol = PROTOCOL_FC
driver_name = 'UnityFCDriver'
driver_volume_type = 'fibre_channel'
def __init__(self, version=None):
super(FCAdapter, self).__init__(version=version)
self.lookup_service = None
def do_setup(self, driver, config):
super(FCAdapter, self).do_setup(driver, config)
self.lookup_service = utils.create_lookup_service()
def get_all_ports(self):
return self.client.get_fc_ports()
def get_connector_uids(self, connector):
return utils.extract_fc_uids(connector)
@property
def auto_zone_enabled(self):
return self.lookup_service is not None
def get_connection_info(self, hlu, host, connector):
targets = self.client.get_fc_target_info(
host, logged_in_only=(not self.auto_zone_enabled),
allowed_ports=self.allowed_ports)
if not targets:
msg = _("There is no accessible fibre channel targets on the "
"system.")
raise exception.VolumeBackendAPIException(data=msg)
if self.auto_zone_enabled:
data = self._get_fc_zone_info(connector['wwpns'], targets)
else:
data = {
'target_wwn': targets,
}
data['target_lun'] = hlu
return data
def _terminate_connection(self, lun_or_snap, connector):
# For FC, terminate_connection needs to return data to zone manager
# which would clean the zone based on the data.
super(FCAdapter, self)._terminate_connection(lun_or_snap, connector)
ret = None
if self.auto_zone_enabled:
ret = {
'driver_volume_type': self.driver_volume_type,
'data': {}
}
host = self.client.create_host(connector['host'])
if len(host.host_luns) == 0:
targets = self.client.get_fc_target_info(
logged_in_only=True, allowed_ports=self.allowed_ports)
ret['data'] = self._get_fc_zone_info(connector['wwpns'],
targets)
return ret
def _get_fc_zone_info(self, initiator_wwns, target_wwns):
mapping = self.lookup_service.get_device_mapping_from_network(
initiator_wwns, target_wwns)
targets, itor_tgt_map = utils.convert_to_itor_tgt_map(mapping)
return {
'target_wwn': targets,
'initiator_target_map': itor_tgt_map,
}
```
|
{
"source": "j-griffith/pushbutton-ci",
"score": 2
}
|
#### File: handler/src/handler.py
```python
import json
import logging
import os
import paramiko
import pika
import sys
import time
logging.basicConfig(filename='/src/handler.log', level=logging.INFO)
class GerritEventStream(object):
def __init__(self, *args, **kwargs):
logging.info('Connecting to gerrit stream using env variables...')
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
connected = False
while not connected:
try:
self.ssh.connect(os.environ.get('gerrit_host'),
int(os.environ.get('gerrit_port')),
os.environ.get('ci_account'),
key_filename=os.environ.get('gerrit_ssh_key'))
connected = True
except paramiko.SSHException as e:
logging.error('%s', e)
logging.warn('Gerrit may be down, will pause and retry...')
time.sleep(10)
self.stdin, self.stdout, self.stderr =\
self.ssh.exec_command("gerrit stream-events")
def __iter__(self):
return self
def next(self):
return self.stdout.readline()
def _verify_vars():
for key in ['gerrit_ssh_key', 'gerrit_host', 'gerrit_port', 'project_name',
'ci_name', 'ci_account', 'recheck_string']:
if not os.environ.get(key, None):
logging.error('Missing env variable: %s' % key)
sys.exit(1)
return True
def _is_valid_event(event):
valid = False
comment_added = event.get('comment', '')
project = event.get('change', {}).get('project', {})
branch = event.get('change', {}).get('branch', {})
author = event.get('author', {}).get('username', {})
if (comment_added and
project == os.environ.get('project_name') and
branch == 'master'):
if (os.environ.get('recheck_string') in comment_added or
('Verified+1' in comment_added and
author == 'jenkins')):
valid = True
if valid:
return event
else:
return None
reconnect = 6
while reconnect:
try:
connection = pika.BlockingConnection(
pika.ConnectionParameters(
host='rabbit'))
channel = connection.channel()
channel.queue_declare(queue='task_queue', durable=True)
reconnect = 0
except pika.exceptions.ConnectionClosed as ex:
logging.warning('Connection to RMQ failed, '
'remaining attempts: %s' %
reconnect)
reconnect -= 1
time.sleep(10)
if reconnect < 1:
raise(ex)
if not _verify_vars:
logging.error('Missing required variable, exiting!')
sys.exit(1)
connected = False
events = []
while not connected:
try:
events = GerritEventStream('sfcli')
logging.info('Connected to gerrit, streaming events.')
connected = True
except Exception as ex:
logging.exception('Error connecting to Gerrit: %s', ex)
time.sleep(60)
pass
logging.info('launching event handler/listener loop')
loop_counter = 0
while True:
for event in events:
try:
event = json.loads(event)
except Exception as ex:
logging.error('Failed json.loads on event: %s', event)
logging.exception(ex)
break
valid_event = _is_valid_event(event)
if valid_event:
logging.info('Identified valid event, sending to queue...')
channel.basic_publish(
exchange='',
routing_key='task_queue',
body=json.dumps(valid_event),
properties=pika.BasicProperties(
delivery_mode=2,))
```
|
{
"source": "jgrigera/indico",
"score": 2
}
|
#### File: indico/cli/maintenance.py
```python
from __future__ import unicode_literals
import click
from indico.cli.core import cli_group
from indico.core.db import db
from indico.core.db.sqlalchemy.principals import PrincipalType
from indico.core.db.sqlalchemy.util.models import get_simple_column_attrs
from indico.modules.attachments import Attachment, AttachmentFolder
from indico.modules.attachments.models.principals import AttachmentFolderPrincipal, AttachmentPrincipal
from indico.modules.events.contributions import Contribution
from indico.modules.events.contributions.models.principals import ContributionPrincipal
from indico.modules.events.models.principals import EventPrincipal
from indico.modules.events.models.roles import EventRole
from indico.modules.events.sessions import Session
from indico.modules.events.sessions.models.principals import SessionPrincipal
click.disable_unicode_literals_warning = True
@cli_group()
def cli():
pass
def _fix_role_principals(principals, get_event):
role_attrs = get_simple_column_attrs(EventRole) | {'members'}
for p in principals:
click.echo('Fixing {}'.format(p))
event = get_event(p)
try:
event_role = [r for r in event.roles if r.code == p.event_role.code][0]
except IndexError:
event_role = EventRole(event=event)
event_role.populate_from_attrs(p.event_role, role_attrs)
else:
click.echo(' using existing role {}'.format(event_role))
p.event_role = event_role
db.session.flush()
@cli.command()
def fix_event_role_acls():
"""Fixes ACLs referencing event roles from other events.
This happened due to a bug prior to 2.2.3 when cloning an event
which had event roles in its ACL.
"""
fixed_something = False
broken = (EventPrincipal.query
.join(EventRole, EventRole.id == EventPrincipal.event_role_id)
.filter(EventPrincipal.type == PrincipalType.event_role, EventPrincipal.event_id != EventRole.event_id)
.all())
_fix_role_principals(broken, lambda p: p.event)
fixed_something = fixed_something or bool(broken)
broken = (SessionPrincipal.query
.join(Session, Session.id == SessionPrincipal.session_id)
.join(EventRole, EventRole.id == SessionPrincipal.event_role_id)
.filter(SessionPrincipal.type == PrincipalType.event_role, Session.event_id != EventRole.event_id)
.all())
_fix_role_principals(broken, lambda p: p.session.event)
fixed_something = fixed_something or bool(broken)
broken = (ContributionPrincipal.query
.join(Contribution, Contribution.id == ContributionPrincipal.contribution_id)
.join(EventRole, EventRole.id == ContributionPrincipal.event_role_id)
.filter(ContributionPrincipal.type == PrincipalType.event_role,
Contribution.event_id != EventRole.event_id)
.all())
_fix_role_principals(broken, lambda p: p.contribution.event)
fixed_something = fixed_something or bool(broken)
broken = (AttachmentFolderPrincipal.query
.join(AttachmentFolder, AttachmentFolder.id == AttachmentFolderPrincipal.folder_id)
.join(EventRole, EventRole.id == AttachmentFolderPrincipal.event_role_id)
.filter(AttachmentFolderPrincipal.type == PrincipalType.event_role,
AttachmentFolder.event_id != EventRole.event_id)
.all())
_fix_role_principals(broken, lambda p: p.folder.event)
fixed_something = fixed_something or bool(broken)
broken = (AttachmentPrincipal.query
.join(Attachment, Attachment.id == AttachmentPrincipal.attachment_id)
.join(AttachmentFolder, AttachmentFolder.id == Attachment.folder_id)
.join(EventRole, EventRole.id == AttachmentPrincipal.event_role_id)
.filter(AttachmentPrincipal.type == PrincipalType.event_role,
AttachmentFolder.event_id != EventRole.event_id)
.all())
_fix_role_principals(broken, lambda p: p.attachment.folder.event)
fixed_something = fixed_something or bool(broken)
if not fixed_something:
click.secho('Nothing to fix :)', fg='green')
return
click.confirm(click.style('Do you want to commit the fixes shown above?', fg='white', bold=True),
default=True, abort=True)
db.session.commit()
click.secho('Success!', fg='green')
```
#### File: core/settings/util.py
```python
from __future__ import unicode_literals
from copy import copy
_not_in_db = object()
def _get_cache_key(proxy, name, kwargs):
return type(proxy), proxy.module, name, frozenset(kwargs.viewitems())
def _preload_settings(cls, proxy, cache, **kwargs):
settings = cls.get_all(proxy.module, **kwargs)
for name, value in settings.iteritems():
cache_key = _get_cache_key(proxy, name, kwargs)
cache[cache_key] = value
# cache missing entries as not in db
for name in proxy.defaults.viewkeys() - settings.viewkeys():
cache_key = _get_cache_key(proxy, name, kwargs)
cache[cache_key] = _not_in_db
return settings
def get_all_settings(cls, acl_cls, proxy, no_defaults, **kwargs):
"""Helper function for SettingsProxy.get_all"""
if no_defaults:
rv = cls.get_all(proxy.module, **kwargs)
if acl_cls and proxy.acl_names:
rv.update(acl_cls.get_all_acls(proxy.module, **kwargs))
return {k: proxy._convert_to_python(k, v) for k, v in rv.iteritems()}
settings = dict(proxy.defaults)
if acl_cls and proxy.acl_names:
settings.update({name: set() for name in proxy.acl_names})
settings.update({k: proxy._convert_to_python(k, v)
for k, v in cls.get_all(proxy.module, **kwargs).iteritems()
if not proxy.strict or k in proxy.defaults})
if acl_cls and proxy.acl_names:
settings.update(acl_cls.get_all_acls(proxy.module, **kwargs))
return settings
def get_setting(cls, proxy, name, default, cache, **kwargs):
"""Helper function for SettingsProxy.get"""
from indico.core.settings import SettingsProxyBase
cache_key = _get_cache_key(proxy, name, kwargs)
try:
value = cache[cache_key]
if value is not _not_in_db:
return proxy._convert_to_python(name, value)
except KeyError:
setting = _preload_settings(cls, proxy, cache, **kwargs).get(name, _not_in_db)
cache[cache_key] = setting
if setting is not _not_in_db:
return proxy._convert_to_python(name, setting)
# value is not_in_db, so use the default
# we always copy the proxy's default in case it's something mutable
return copy(proxy.defaults.get(name)) if default is SettingsProxyBase.default_sentinel else default
def get_setting_acl(cls, proxy, name, cache, **kwargs):
"""Helper function for ACLProxy.get"""
cache_key = _get_cache_key(proxy, name, kwargs)
try:
return cache[cache_key]
except KeyError:
cache[cache_key] = acl = cls.get_acl(proxy.module, name, **kwargs)
return acl
```
#### File: legacy/services/tools.py
```python
def toJsDate(datetime):
return "(new Date(%i,%i,%i,%i,%i,%i))" % (datetime.year,
datetime.month - 1,
datetime.day,
datetime.hour,
datetime.minute,
datetime.second)
```
#### File: modules/auth/blueprint.py
```python
from __future__ import unicode_literals
from flask import request
from indico.modules.auth.controllers import (RHAccounts, RHAdminImpersonate, RHLinkAccount, RHLogin, RHLoginForm,
RHLogout, RHRegister, RHRemoveAccount, RHResetPassword)
from indico.web.flask.util import make_compat_redirect_func
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('auth', __name__, template_folder='templates', virtual_template_folder='auth')
_bp.add_url_rule('/login/', 'login', RHLogin, methods=('GET', 'POST'))
_bp.add_url_rule('/login/<provider>/', 'login', RHLogin)
_bp.add_url_rule('/login/<provider>/form', 'login_form', RHLoginForm)
_bp.add_url_rule('/login/<provider>/link-account', 'link_account', RHLinkAccount, methods=('GET', 'POST'))
_bp.add_url_rule('/logout/', 'logout', RHLogout)
_bp.add_url_rule('/register/', 'register', RHRegister, methods=('GET', 'POST'), defaults={'provider': None})
_bp.add_url_rule('/register/<provider>', 'register', RHRegister, methods=('GET', 'POST'))
_bp.add_url_rule('/reset-password/', 'resetpass', RHResetPassword, methods=('GET', 'POST'))
_bp.add_url_rule('/admin/users/impersonate', 'admin_impersonate', RHAdminImpersonate, methods=('POST',))
with _bp.add_prefixed_rules('/user/<int:user_id>', '/user'):
_bp.add_url_rule('/accounts/', 'accounts', RHAccounts, methods=('GET', 'POST'))
_bp.add_url_rule('/accounts/<identity>/remove/', 'remove_account', RHRemoveAccount, methods=('POST',))
@_bp.url_defaults
def _add_user_id(endpoint, values):
if endpoint in {'auth.accounts', 'auth.remove_account'} and 'user_id' not in values:
values['user_id'] = request.view_args.get('user_id')
# Legacy URLs
auth_compat_blueprint = _compat_bp = IndicoBlueprint('compat_auth', __name__)
_compat_bp.add_url_rule('/user/login', 'login', make_compat_redirect_func(_bp, 'login'))
_compat_bp.add_url_rule('/user/register', 'register', make_compat_redirect_func(_bp, 'register'))
```
#### File: auth/models/registration_requests.py
```python
from __future__ import unicode_literals
from sqlalchemy.dialects.postgresql import ARRAY, JSONB
from werkzeug.datastructures import MultiDict
from indico.core.db import db
from indico.util.locators import locator_property
from indico.util.string import format_repr, return_ascii
class RegistrationRequest(db.Model):
__tablename__ = 'registration_requests'
__table_args__ = (
db.CheckConstraint('email = lower(email)', 'lowercase_email'),
{'schema': 'users'}
)
id = db.Column(
db.Integer,
primary_key=True
)
comment = db.Column(
db.Text,
nullable=False,
default=''
)
email = db.Column(
db.String,
unique=True,
nullable=False,
index=True
)
extra_emails = db.Column(
ARRAY(db.String),
nullable=False,
default=[]
)
user_data = db.Column(
JSONB,
nullable=False
)
_identity_data = db.Column(
'identity_data',
JSONB,
nullable=False
)
settings = db.Column(
JSONB,
nullable=False
)
@locator_property
def locator(self):
return {'request_id': self.id}
@property
def identity_data(self):
identity_data = self._identity_data.copy()
# if we have data in identity_data, it was converted from a
# MultiDict so we need to convert it back.
if 'data' in identity_data:
tmp = MultiDict()
tmp.update(self._identity_data['data'])
identity_data['data'] = tmp
return identity_data
@identity_data.setter
def identity_data(self, identity_data):
identity_data = identity_data.copy()
# `identity_data['data']` for multipass-based identities is a
# MultiDict, but json-encoding it would lose all extra values
# for a key, so we convert it to a dict of lists first
if 'data' in identity_data:
identity_data['data'] = dict(identity_data['data'].lists())
self._identity_data = identity_data
@return_ascii
def __repr__(self):
return format_repr(self, 'id', 'email')
```
#### File: abstracts/models/review_questions.py
```python
from __future__ import unicode_literals
from indico.core.db.sqlalchemy import db
from indico.core.db.sqlalchemy.review_questions import ReviewQuestionMixin
from indico.modules.events.reviewing_questions_fields import get_reviewing_field_types
from indico.util.locators import locator_property
class AbstractReviewQuestion(ReviewQuestionMixin, db.Model):
__tablename__ = 'abstract_review_questions'
__table_args__ = {'schema': 'event_abstracts'}
event_backref_name = 'abstract_review_questions'
# relationship backrefs:
# - ratings (AbstractReviewRating.question)
@locator_property
def locator(self):
return dict(self.event.locator, question_id=self.id)
@property
def field(self):
try:
impl = get_reviewing_field_types('abstracts')[self.field_type]
except KeyError:
return None
return impl(self)
```
#### File: controllers/backend/editable_list.py
```python
from __future__ import unicode_literals
import uuid
from flask import jsonify, request, session
from sqlalchemy.orm import joinedload
from werkzeug.exceptions import Forbidden
from indico.legacy.common.cache import GenericCache
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.editing.controllers.base import RHEditablesBase, RHEditableTypeManagementBase
from indico.modules.events.editing.models.editable import Editable
from indico.modules.events.editing.operations import assign_editor, generate_editables_zip, unassign_editor
from indico.modules.events.editing.schemas import EditableBasicSchema, EditingEditableListSchema
from indico.util.i18n import _
from indico.util.marshmallow import Principal
from indico.web.args import use_kwargs
from indico.web.flask.util import url_for
archive_cache = GenericCache('editables-archive')
class RHEditableList(RHEditableTypeManagementBase):
"""Return the list of editables of the event for a given type"""
def _process_args(self):
RHEditableTypeManagementBase._process_args(self)
self.contributions = (Contribution.query
.with_parent(self.event)
.options(joinedload('editables'))
.order_by(Contribution.friendly_id)
.all())
def _process(self):
return (EditingEditableListSchema(many=True, context={'editable_type': self.editable_type})
.jsonify(self.contributions))
class RHPrepareEditablesArchive(RHEditablesBase):
def _process(self):
key = unicode(uuid.uuid4())
data = [editable.id for editable in self.editables]
archive_cache.set(key, data, time=1800)
download_url = url_for('.download_archive', self.event, type=self.editable_type.name, uuid=key)
return jsonify(download_url=download_url)
class RHDownloadArchive(RHEditableTypeManagementBase):
def _process(self):
editable_ids = archive_cache.get(unicode(request.view_args['uuid']), [])
editables = Editable.query.filter(Editable.id.in_(editable_ids)).all()
return generate_editables_zip(editables)
class RHAssignEditor(RHEditablesBase):
@use_kwargs({
'editor': Principal(required=True)
})
def _process_args(self, editor):
RHEditablesBase._process_args(self)
if (not self.event.can_manage(editor, self.editable_type.editor_permission)
and not self.event.can_manage(editor, 'editing_manager')):
raise Forbidden(_('This user is not an editor of the {} type').format(self.editable_type.name))
self.editor = editor
def _process(self):
editables = [e for e in self.editables if e.editor != self.editor]
for editable in editables:
assign_editor(editable, self.editor)
return EditableBasicSchema(many=True).jsonify(editables)
class RHAssignMyselfAsEditor(RHEditablesBase):
def _check_access(self):
RHEditablesBase._check_access(self)
if (not self.event.can_manage(session.user, self.editable_type.editor_permission)
and not self.event.can_manage(session.user, 'editing_manager')):
raise Forbidden(_('You are not an editor of the {} type').format(self.editable_type.name))
def _process(self):
editables = [e for e in self.editables if e.editor != session.user]
for editable in editables:
assign_editor(editable, session.user)
return EditableBasicSchema(many=True).jsonify(editables)
class RHUnassignEditor(RHEditablesBase):
def _process(self):
editables = [e for e in self.editables if e.editor]
for editable in editables:
unassign_editor(editable)
return EditableBasicSchema(many=True).jsonify(editables)
```
#### File: papers/controllers/base.py
```python
from __future__ import unicode_literals
from flask import request, session
from werkzeug.exceptions import Forbidden, NotFound
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.controllers.base import RHDisplayEventBase
from indico.modules.events.management.controllers.base import ManageEventMixin
from indico.modules.events.util import check_event_locked
class RHPapersBase(RHDisplayEventBase):
"""Base class for all paper-related RHs"""
EVENT_FEATURE = 'papers'
def _check_access(self):
RHDisplayEventBase._check_access(self)
# Only let managers access the management versions.
if self.management and not self.event.cfp.is_manager(session.user):
raise Forbidden
@property
def management(self):
"""Whether the RH is currently used in the management area"""
return request.view_args.get('management', False)
class RHManagePapersBase(ManageEventMixin, RHPapersBase):
"""
Base class for all paper-related RHs that require full event
management permissions
"""
PERMISSION = 'paper_manager'
DENY_FRAMES = True
@property
def management(self):
"""Whether the RH is currently used in the management area"""
return request.view_args.get('management', True)
class RHJudgingAreaBase(RHPapersBase):
"""Base class for all paper-related RHs only available to judges/managers"""
def _check_access(self):
RHPapersBase._check_access(self)
if not session.user or not self.event.cfp.can_access_judging_area(session.user):
raise Forbidden
check_event_locked(self, self.event)
class RHPaperBase(RHPapersBase):
PAPER_REQUIRED = True
normalize_url_spec = {
'locators': {
lambda self: self.contribution
}
}
def _process_args(self):
RHPapersBase._process_args(self)
self.contribution = Contribution.get_or_404(request.view_args['contrib_id'], is_deleted=False)
self.paper = self.contribution.paper
if self.paper is None and self.PAPER_REQUIRED:
raise NotFound
def _check_access(self):
RHPapersBase._check_access(self)
if not self._check_paper_protection():
raise Forbidden
check_event_locked(self, self.event)
def _check_paper_protection(self):
"""Perform a permission check on the current paper.
Override this in case you want to check for more specific
privileges than the generic "can access".
"""
return self.contribution.can_access(session.user)
```
#### File: registration/testing/fixtures.py
```python
import pytest
from indico.modules.events.registration.models.forms import RegistrationForm
from indico.modules.events.registration.models.registrations import Registration, RegistrationState
from indico.modules.events.registration.util import create_personal_data_fields
@pytest.fixture
def dummy_regform(db, dummy_event):
"""Create a dummy registration form for the dummy event"""
regform = RegistrationForm(event=dummy_event, title='Registration Form', currency='USD')
create_personal_data_fields(regform)
# enable all fields
for field in regform.sections[0].fields:
field.is_enabled = True
db.session.add(regform)
db.session.flush()
return regform
@pytest.fixture
def dummy_reg(db, dummy_event, dummy_regform, dummy_user):
"""Create a dummy registration for the dummy event"""
reg = Registration(
registration_form_id=dummy_regform.id,
first_name="Guinea",
last_name="Pig",
checked_in=True,
state=RegistrationState.complete,
currency="USD",
email="<EMAIL>",
user=dummy_user
)
dummy_event.registrations.append(reg)
db.session.flush()
return reg
```
#### File: events/requests/__init__.py
```python
from __future__ import unicode_literals
from flask import session
from indico.core import signals
from indico.modules.events.requests.base import RequestDefinitionBase, RequestFormBase
from indico.modules.events.requests.util import get_request_definitions, is_request_manager
from indico.util.i18n import _
from indico.web.flask.util import url_for
from indico.web.menu import SideMenuItem
__all__ = ('RequestDefinitionBase', 'RequestFormBase')
@signals.app_created.connect
def _check_request_definitions(app, **kwargs):
# This will raise RuntimeError if the request type names are not unique
get_request_definitions()
@signals.menu.items.connect_via('event-management-sidemenu')
def _extend_event_management_menu(sender, event, **kwargs):
if not get_request_definitions():
return
if not event.can_manage(session.user) and not is_request_manager(session.user):
return
return SideMenuItem('requests', _('Logistics'), url_for('requests.event_requests', event), section='services')
@signals.event_management.management_url.connect
def _get_event_management_url(event, **kwargs):
if is_request_manager(session.user):
return url_for('requests.event_requests', event)
@signals.users.merged.connect
def _merge_users(target, source, **kwargs):
from indico.modules.events.requests.models.requests import Request
Request.find(created_by_id=source.id).update({Request.created_by_id: target.id})
Request.find(processed_by_id=source.id).update({Request.processed_by_id: target.id})
@signals.event.deleted.connect
def _event_deleted(event, **kwargs):
from indico.modules.events.requests.models.requests import Request, RequestState
query = (Request.query.with_parent(event)
.filter(Request.state.in_((RequestState.accepted, RequestState.pending))))
for req in query:
req.definition.withdraw(req, notify_event_managers=False)
```
#### File: events/sessions/fields.py
```python
from __future__ import unicode_literals
from indico.modules.events.fields import PersonLinkListFieldBase
from indico.modules.events.sessions.models.persons import SessionBlockPersonLink
from indico.modules.events.util import serialize_person_link
from indico.web.forms.widgets import JinjaWidget
class SessionBlockPersonLinkListField(PersonLinkListFieldBase):
person_link_cls = SessionBlockPersonLink
linked_object_attr = 'session_block'
widget = JinjaWidget('events/sessions/forms/session_person_link_widget.html')
def _serialize_person_link(self, principal, extra_data=None):
extra_data = extra_data or {}
return dict(extra_data, **serialize_person_link(principal))
def _convert_data(self, data):
return list({self._get_person_link(x) for x in data})
```
#### File: timetable/models/entries.py
```python
from __future__ import unicode_literals
from datetime import timedelta
from sqlalchemy import DDL
from sqlalchemy.event import listens_for
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm.base import NEVER_SET, NO_VALUE
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum, UTCDateTime
from indico.core.db.sqlalchemy.util.models import populate_one_to_one_backrefs
from indico.util.date_time import overlaps
from indico.util.i18n import _
from indico.util.locators import locator_property
from indico.util.string import format_repr, return_ascii
from indico.util.struct.enum import RichIntEnum
class TimetableEntryType(RichIntEnum):
__titles__ = [None, _("Session Block"), _("Contribution"), _("Break")]
# entries are uppercase since `break` is a keyword...
SESSION_BLOCK = 1
CONTRIBUTION = 2
BREAK = 3
def _make_check(type_, *cols):
all_cols = {'session_block_id', 'contribution_id', 'break_id'}
required_cols = all_cols & set(cols)
forbidden_cols = all_cols - required_cols
criteria = ['{} IS NULL'.format(col) for col in sorted(forbidden_cols)]
criteria += ['{} IS NOT NULL'.format(col) for col in sorted(required_cols)]
condition = 'type != {} OR ({})'.format(type_, ' AND '.join(criteria))
return db.CheckConstraint(condition, 'valid_{}'.format(type_.name.lower()))
class TimetableEntry(db.Model):
__tablename__ = 'timetable_entries'
@declared_attr
def __table_args__(cls):
return (db.Index('ix_timetable_entries_start_dt_desc', cls.start_dt.desc()),
_make_check(TimetableEntryType.SESSION_BLOCK, 'session_block_id'),
_make_check(TimetableEntryType.CONTRIBUTION, 'contribution_id'),
_make_check(TimetableEntryType.BREAK, 'break_id'),
db.CheckConstraint("type != {} OR parent_id IS NULL".format(TimetableEntryType.SESSION_BLOCK),
'valid_parent'),
{'schema': 'events'})
id = db.Column(
db.Integer,
primary_key=True
)
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
index=True,
nullable=False
)
parent_id = db.Column(
db.Integer,
db.ForeignKey('events.timetable_entries.id'),
index=True,
nullable=True,
)
session_block_id = db.Column(
db.Integer,
db.ForeignKey('events.session_blocks.id'),
index=True,
unique=True,
nullable=True
)
contribution_id = db.Column(
db.Integer,
db.ForeignKey('events.contributions.id'),
index=True,
unique=True,
nullable=True
)
break_id = db.Column(
db.Integer,
db.ForeignKey('events.breaks.id'),
index=True,
unique=True,
nullable=True
)
type = db.Column(
PyIntEnum(TimetableEntryType),
nullable=False
)
start_dt = db.Column(
UTCDateTime,
nullable=False
)
event = db.relationship(
'Event',
lazy=True,
backref=db.backref(
'timetable_entries',
order_by=lambda: TimetableEntry.start_dt,
cascade='all, delete-orphan',
lazy='dynamic'
)
)
session_block = db.relationship(
'SessionBlock',
lazy=False,
backref=db.backref(
'timetable_entry',
cascade='all, delete-orphan',
uselist=False,
lazy=True
)
)
contribution = db.relationship(
'Contribution',
lazy=False,
backref=db.backref(
'timetable_entry',
cascade='all, delete-orphan',
uselist=False,
lazy=True
)
)
break_ = db.relationship(
'Break',
cascade='all, delete-orphan',
single_parent=True,
lazy=False,
backref=db.backref(
'timetable_entry',
cascade='all, delete-orphan',
uselist=False,
lazy=True
)
)
children = db.relationship(
'TimetableEntry',
order_by='TimetableEntry.start_dt',
lazy=True,
backref=db.backref(
'parent',
remote_side=[id],
lazy=True
)
)
# relationship backrefs:
# - parent (TimetableEntry.children)
@property
def object(self):
if self.type == TimetableEntryType.SESSION_BLOCK:
return self.session_block
elif self.type == TimetableEntryType.CONTRIBUTION:
return self.contribution
elif self.type == TimetableEntryType.BREAK:
return self.break_
@object.setter
def object(self, value):
from indico.modules.events.contributions import Contribution
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.timetable.models.breaks import Break
self.session_block = self.contribution = self.break_ = None
if isinstance(value, SessionBlock):
self.session_block = value
elif isinstance(value, Contribution):
self.contribution = value
elif isinstance(value, Break):
self.break_ = value
elif value is not None:
raise TypeError('Unexpected object: {}'.format(value))
@hybrid_property
def duration(self):
return self.object.duration if self.object is not None else None
@duration.setter
def duration(self, value):
self.object.duration = value
@duration.expression
def duration(cls):
from indico.modules.events.contributions import Contribution
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.timetable.models.breaks import Break
return db.case({
TimetableEntryType.SESSION_BLOCK.value:
db.select([SessionBlock.duration])
.where(SessionBlock.id == cls.session_block_id)
.correlate_except(SessionBlock)
.as_scalar(),
TimetableEntryType.CONTRIBUTION.value:
db.select([Contribution.duration])
.where(Contribution.id == cls.contribution_id)
.correlate_except(Contribution)
.as_scalar(),
TimetableEntryType.BREAK.value:
db.select([Break.duration])
.where(Break.id == cls.break_id)
.correlate_except(Break)
.as_scalar(),
}, value=cls.type)
@hybrid_property
def end_dt(self):
if self.start_dt is None or self.duration is None:
return None
return self.start_dt + self.duration
@end_dt.expression
def end_dt(cls):
return cls.start_dt + cls.duration
@property
def session_siblings(self):
if self.type == TimetableEntryType.SESSION_BLOCK:
return [x for x in self.siblings
if x.session_block and x.session_block.session == self.session_block.session]
elif self.parent:
return self.siblings
else:
return []
@property
def siblings(self):
from indico.modules.events.timetable.util import get_top_level_entries, get_nested_entries
tzinfo = self.event.tzinfo
day = self.start_dt.astimezone(tzinfo).date()
siblings = (get_nested_entries(self.event)[self.parent_id]
if self.parent_id else
get_top_level_entries(self.event))
return [x for x in siblings if x.start_dt.astimezone(tzinfo).date() == day and x.id != self.id]
@property
def siblings_query(self):
tzinfo = self.event.tzinfo
day = self.start_dt.astimezone(tzinfo).date()
criteria = (TimetableEntry.id != self.id,
TimetableEntry.parent == self.parent,
db.cast(TimetableEntry.start_dt.astimezone(tzinfo), db.Date) == day)
return TimetableEntry.query.with_parent(self.event).filter(*criteria)
@locator_property
def locator(self):
return dict(self.event.locator, entry_id=self.id)
@return_ascii
def __repr__(self):
return format_repr(self, 'id', 'type', 'start_dt', 'end_dt', _repr=self.object)
def can_view(self, user):
"""Checks whether the user will see this entry in the timetable."""
if self.type in (TimetableEntryType.CONTRIBUTION, TimetableEntryType.BREAK):
return self.object.can_access(user)
elif self.type == TimetableEntryType.SESSION_BLOCK:
if self.object.can_access(user):
return True
return any(x.can_access(user) for x in self.object.contributions)
def extend_start_dt(self, start_dt):
assert start_dt < self.start_dt
extension = self.start_dt - start_dt
self.start_dt = start_dt
self.duration = self.duration + extension
def extend_end_dt(self, end_dt):
diff = end_dt - self.end_dt
if diff < timedelta(0):
raise ValueError("New end_dt is before current end_dt.")
self.duration += diff
def extend_parent(self, by_start=True, by_end=True):
"""Extend start/end of parent objects if needed.
No extension if performed for entries crossing a day boundary in the
event timezone.
:param by_start: Extend parent by start datetime.
:param by_end: Extend parent by end datetime.
"""
tzinfo = self.event.tzinfo
if self.start_dt.astimezone(tzinfo).date() != self.end_dt.astimezone(tzinfo).date():
return
if self.parent is None:
if by_start and self.start_dt < self.event.start_dt:
self.event.start_dt = self.start_dt
if by_end and self.end_dt > self.event.end_dt:
self.event.end_dt = self.end_dt
else:
extended = False
if by_start and self.start_dt < self.parent.start_dt:
self.parent.extend_start_dt(self.start_dt)
extended = True
if by_end and self.end_dt > self.parent.end_dt:
self.parent.extend_end_dt(self.end_dt)
extended = True
if extended:
self.parent.extend_parent(by_start=by_start, by_end=by_end)
def is_parallel(self, in_session=False):
siblings = self.siblings if not in_session else self.session_siblings
for sibling in siblings:
if overlaps((self.start_dt, self.end_dt), (sibling.start_dt, sibling.end_dt)):
return True
return False
def move(self, start_dt):
"""Move the entry to start at a different time.
This method automatically moves children of the entry to
preserve their start time relative to the parent's start time.
"""
if self.type == TimetableEntryType.SESSION_BLOCK:
diff = start_dt - self.start_dt
for child in self.children:
child.start_dt += diff
self.start_dt = start_dt
def move_next_to(self, sibling, position='before'):
if sibling not in self.siblings:
raise ValueError("Not a sibling")
if position not in ('before', 'after'):
raise ValueError("Invalid position")
if position == 'before':
start_dt = sibling.start_dt - self.duration
else:
start_dt = sibling.end_dt
self.move(start_dt)
@listens_for(TimetableEntry.__table__, 'after_create')
def _add_timetable_consistency_trigger(target, conn, **kw):
sql = """
CREATE CONSTRAINT TRIGGER consistent_timetable
AFTER INSERT OR UPDATE
ON {}
DEFERRABLE INITIALLY DEFERRED
FOR EACH ROW
EXECUTE PROCEDURE events.check_timetable_consistency('timetable_entry');
""".format(target.fullname)
DDL(sql).execute(conn)
@listens_for(TimetableEntry.session_block, 'set')
def _set_session_block(target, value, *unused):
target.type = TimetableEntryType.SESSION_BLOCK
@listens_for(TimetableEntry.contribution, 'set')
def _set_contribution(target, value, *unused):
target.type = TimetableEntryType.CONTRIBUTION
@listens_for(TimetableEntry.break_, 'set')
def _set_break(target, value, *unused):
target.type = TimetableEntryType.BREAK
@listens_for(TimetableEntry.start_dt, 'set')
def _set_start_dt(target, value, oldvalue, *unused):
from indico.modules.events.util import register_time_change
if oldvalue in (NEVER_SET, NO_VALUE):
return
if value != oldvalue and target.object is not None:
register_time_change(target)
populate_one_to_one_backrefs(TimetableEntry, 'session_block', 'contribution', 'break_')
```
#### File: modules/groups/legacy.py
```python
from __future__ import unicode_literals
from indico.legacy.fossils.user import IGroupFossil
from indico.modules.groups import GroupProxy
from indico.util.fossilize import Fossilizable, fossilizes
from indico.util.string import encode_utf8, return_ascii, to_unicode
class GroupWrapper(Fossilizable):
"""Group-like wrapper class that holds a DB-stored (or remote) group."""
fossilizes(IGroupFossil)
def __init__(self, group_id):
self.id = to_unicode(group_id).encode('utf-8')
@property
def group(self):
"""Returns the underlying GroupProxy
:rtype: indico.modules.groups.core.GroupProxy
"""
raise NotImplementedError
def getId(self):
return self.id
def getName(self):
raise NotImplementedError
def getEmail(self):
return ''
@return_ascii
def __repr__(self):
return u'<{} {}: {}>'.format(type(self).__name__, self.id, self.group)
class LocalGroupWrapper(GroupWrapper):
is_local = True
groupType = 'Default'
@encode_utf8
def getName(self):
return GroupProxy(self.id).name
class LDAPGroupWrapper(GroupWrapper):
is_local = False
groupType = 'LDAP'
def __init__(self, group_id, provider_name):
super(LDAPGroupWrapper, self).__init__(group_id)
self.provider_name = provider_name
@property
def provider(self):
return self.provider_name
@encode_utf8
def getName(self):
return self.id
```
#### File: modules/news/views.py
```python
from __future__ import unicode_literals
from indico.modules.admin.views import WPAdmin
from indico.util.i18n import _
from indico.web.views import WPDecorated, WPJinjaMixin
class WPNews(WPJinjaMixin, WPDecorated):
template_prefix = 'news/'
title = _('News')
def _get_body(self, params):
return self._get_page_content(params)
class WPManageNews(WPAdmin):
template_prefix = 'news/'
```
#### File: modules/users/views.py
```python
from __future__ import unicode_literals
from flask import request
from indico.modules.admin.views import WPAdmin
from indico.modules.users import User
from indico.util.i18n import _
from indico.web.breadcrumbs import render_breadcrumbs
from indico.web.views import WPDecorated, WPJinjaMixin
class WPUser(WPJinjaMixin, WPDecorated):
"""Base WP for user profile pages.
Whenever you use this, you MUST include `user` in the params passed to
`render_template`. Any RH using this should inherit from `RHUserBase`
which already handles user/admin access. In this case, simply add
``user=self.user`` to your `render_template` call.
"""
template_prefix = 'users/'
def __init__(self, rh, active_menu_item, **kwargs):
kwargs['active_menu_item'] = active_menu_item
WPDecorated.__init__(self, rh, **kwargs)
def _get_breadcrumbs(self):
if 'user_id' in request.view_args:
user = User.get(request.view_args['user_id'])
profile_breadcrumb = _('Profile of {name}').format(name=user.full_name)
else:
profile_breadcrumb = _('My Profile')
return render_breadcrumbs(profile_breadcrumb)
def _get_body(self, params):
return self._get_page_content(params)
class WPUserDashboard(WPUser):
bundles = ('module_users.dashboard.js',)
class WPUsersAdmin(WPAdmin):
template_prefix = 'users/'
bundles = ('module_users.js',)
```
#### File: modules/vc/__init__.py
```python
from __future__ import unicode_literals
from flask import has_request_context, render_template, session
from indico.core import signals
from indico.modules.events.layout.util import MenuEntryData
from indico.modules.users import User
from indico.modules.vc.forms import VCPluginSettingsFormBase
from indico.modules.vc.models.vc_rooms import VCRoom, VCRoomEventAssociation
from indico.modules.vc.plugins import VCPluginMixin
from indico.modules.vc.util import get_managed_vc_plugins, get_vc_plugins
from indico.util.i18n import _
from indico.web.flask.templating import get_overridable_template_name, template_hook
from indico.web.flask.util import url_for
from indico.web.menu import SideMenuItem, TopMenuItem
__all__ = ('VCPluginMixin', 'VCPluginSettingsFormBase', 'VCRoomEventAssociation')
@template_hook('event-header')
def _inject_event_header(event, **kwargs):
res = VCRoomEventAssociation.find_for_event(event, only_linked_to_event=True)
event_vc_rooms = [event_vc_room for event_vc_room in res.all() if event_vc_room.vc_room.plugin is not None]
if event_vc_rooms:
return render_template('vc/event_header.html', event=event, event_vc_rooms=event_vc_rooms)
@template_hook('vc-actions')
def _inject_vc_room_action_buttons(event, item, **kwargs):
event_vc_room = VCRoomEventAssociation.get_linked_for_event(event).get(item)
if event_vc_room and event_vc_room.vc_room.plugin:
plugin = event_vc_room.vc_room.plugin
name = get_overridable_template_name('vc_room_timetable_buttons.html', plugin, core_prefix='vc/')
return render_template(name, event=event, event_vc_room=event_vc_room, **kwargs)
@signals.menu.items.connect_via('event-management-sidemenu')
def _extend_event_management_menu(sender, event, **kwargs):
if not get_vc_plugins():
return
if not event.can_manage(session.user):
return
return SideMenuItem('videoconference', _('Videoconference'), url_for('vc.manage_vc_rooms', event),
section='services')
@signals.event.sidemenu.connect
def _extend_event_menu(sender, **kwargs):
def _visible(event):
return bool(get_vc_plugins()) and VCRoomEventAssociation.find_for_event(event).has_rows()
return MenuEntryData(_('Videoconference Rooms'), 'videoconference_rooms', 'vc.event_videoconference',
position=14, visible=_visible)
@signals.event.contribution_deleted.connect
@signals.event.session_block_deleted.connect
def _link_object_deleted(obj, **kwargs):
for event_vc_room in obj.vc_room_associations:
event_vc_room.link_object = obj.event
@signals.event.session_deleted.connect
def _session_deleted(sess, **kwargs):
for block in sess.blocks:
_link_object_deleted(block)
@signals.event.deleted.connect
def _event_deleted(event, **kwargs):
user = session.user if has_request_context() and session.user else User.get_system_user()
for event_vc_room in VCRoomEventAssociation.find_for_event(event, include_hidden=True, include_deleted=True):
event_vc_room.delete(user)
@signals.menu.items.connect_via('top-menu')
def _topmenu_items(sender, **kwargs):
if not session.user or not get_managed_vc_plugins(session.user):
return
return TopMenuItem('services-vc', _('Videoconference'), url_for('vc.vc_room_list'), section='services')
@signals.event_management.get_cloners.connect
def _get_vc_cloner(sender, **kwargs):
from indico.modules.vc.clone import VCCloner
return VCCloner
@signals.users.merged.connect
def _merge_users(target, source, **kwargs):
VCRoom.find(created_by_id=source.id).update({VCRoom.created_by_id: target.id})
```
#### File: indico/util/date_time_test.py
```python
from datetime import datetime, timedelta
import pytest
from pytz import timezone
from indico.util.date_time import as_utc, format_human_timedelta, iterdays, strftime_all_years
@pytest.mark.parametrize(('delta', 'granularity', 'expected'), (
(timedelta(days=0, hours=0, minutes=0, seconds=0), 'seconds', '0 seconds'),
(timedelta(days=0, hours=0, minutes=0, seconds=0), 'minutes', '0 minutes'),
(timedelta(days=0, hours=0, minutes=0, seconds=0), 'hours', '0 hours'),
(timedelta(days=0, hours=0, minutes=0, seconds=0), 'days', '0 days'),
(timedelta(days=0, hours=0, minutes=0, seconds=5), 'seconds', '5 seconds'),
(timedelta(days=0, hours=0, minutes=0, seconds=5), 'minutes', '5 seconds'),
(timedelta(days=0, hours=0, minutes=1, seconds=5), 'seconds', '1m 5s'),
(timedelta(days=0, hours=0, minutes=1, seconds=5), 'minutes', '1 minute'),
(timedelta(days=0, hours=0, minutes=1, seconds=5), 'hours', '1 minute'),
(timedelta(days=0, hours=1, minutes=10, seconds=0), 'hours', '1 hour'),
(timedelta(days=0, hours=1, minutes=30, seconds=0), 'minutes', '1h 30m'),
(timedelta(days=1, hours=1, minutes=0, seconds=0), 'minutes', '1d 1h'),
(timedelta(days=1, hours=1, minutes=10, seconds=0), 'minutes', '1d 1h 10m'),
(timedelta(days=1, hours=1, minutes=10, seconds=0), 'hours', '1d 1h'),
(timedelta(days=1, hours=1, minutes=0, seconds=0), 'days', '1 day'),
(timedelta(days=1, hours=12, minutes=0, seconds=0), 'days', '1 day'),
(timedelta(days=2, hours=0, minutes=0, seconds=0), 'days', '2 days'),
(timedelta(days=7, hours=0, minutes=0, seconds=0), 'days', '7 days')
))
def test_format_human_timedelta(delta, granularity, expected):
assert format_human_timedelta(delta, granularity) == expected
@pytest.mark.parametrize(('dt', 'fmt', 'expected'), (
(datetime(2015, 11, 12, 17, 30), '%Y-%m-%d', '2015-11-12'),
(datetime(1015, 11, 12, 17, 30), '%Y-%m-%d %H:%M', '1015-11-12 17:30'),
))
def test_strftime_all_years(dt, fmt, expected):
assert strftime_all_years(dt, fmt) == expected
dt = datetime
tz = timezone('Europe/Zurich')
iterdays_test_data = (
(dt(2015, 1, 1, 10, 30).date(), dt(2015, 1, 1, 12, 30), True, None, None, 1),
(dt(2015, 1, 1, 10, 30), dt(2014, 1, 1, 12, 30), True, None, None, 0),
(dt(2015, 1, 1, 10, 30), dt(2015, 1, 1, 12, 30), True, None, None, 1),
(dt(2017, 10, 13), dt(2017, 10, 19), True, None, None, 5),
(dt(2017, 10, 13), dt(2017, 10, 19), False, None, None, 7),
(dt(2017, 10, 13), dt(2017, 10, 19), True, [dt(2017, 10, 17).date()], None, 1),
(dt(2017, 10, 13), dt(2017, 10, 19), True, [dt(2017, 10, 14).date()], None, 0),
(dt(2017, 10, 13), dt(2017, 10, 19), False, [dt(2017, 10, 14).date()], None, 1),
(dt(2017, 10, 13), dt(2017, 10, 19), False, None, [dt(2017, 10, 14).date(), dt(2017, 10, 16).date()], 5),
(dt(2017, 10, 13), dt(2017, 10, 19), False, [dt(2017, 10, 15).date()], [dt(2017, 10, 14).date()], 1),
(dt(2017, 10, 28, 10, 30), dt(2017, 10, 31, 12, 30), True, None, [dt(2017, 10, 28, 10, 30)], 2),
(as_utc(dt(2017, 10, 28)).astimezone(tz), as_utc(dt(2017, 10, 31)).astimezone(tz), True, None, None, 2),
(as_utc(dt(2017, 3, 26)).astimezone(tz), as_utc(dt(2017, 3, 28)).astimezone(tz), True, None, None, 2),
)
@pytest.mark.parametrize(('from_', 'to', 'skip_weekends', 'day_whitelist', 'day_blacklist', 'expected'),
iterdays_test_data)
def test_iterdays(from_, to, skip_weekends, day_whitelist, day_blacklist, expected):
assert len(list(iterdays(from_, to, skip_weekends, day_whitelist, day_blacklist))) == expected
```
#### File: indico/util/rules.py
```python
from __future__ import unicode_literals
from indico.core import signals
from indico.util.decorators import classproperty
from indico.util.signals import named_objects_from_signal
class Condition(object):
"""Base class for conditions.
`Condition`s allow you to define criteria to match on and then evaluate
those criteria and check whether there is a match (as part of a rule).
"""
#: The name of the condition. Must be unique within the context
#: where the condition is used
name = None
#: Whether the condition must always be present
required = False
#: A short description of the condition.
description = None
#: {value: condition_name} containing conditions that are allowed for each value type
#: non-specified values are considered as compatible with all other conditions.
compatible_with = None
@classproperty
@classmethod
def friendly_name(cls):
return cls.name
@classmethod
def is_used(cls, rule):
"""Check whether the condition is used in a rule"""
return rule.get(cls.name) is not None
@classmethod
def is_none(cls, **kwargs):
"""Check whether the condition requires a null value.
Inheriting methods should overload this
"""
raise NotImplementedError
@classmethod
def get_available_values(cls, **kwargs):
"""Get a dict of values that can be used for the condition.
Subclasses are encouraged to explicitly specify the arguments
they expect instead of using ``**kwargs``.
The key of each item is the actual value that will be used
in the check while the value is what is going to be displayed.
:param kwargs: arguments specific to the condition's context
"""
raise NotImplementedError
@classmethod
def _clean_values(cls, values, **kwargs):
return list(cls.get_available_values(**kwargs).viewkeys() & set(values))
@classmethod
def check(cls, values, **kwargs):
"""Check whether the condition is matched
Subclasses are encouraged to explicitly specify the arguments
they expect instead of using ``**kwargs``.
This method is only called if the rule is active and if there
are valid values (as defined by `get_available_values`).
:param values: a collection of values that are accepted for a
match. it never contains values which are not
available anymore.
:param kwargs: arguments specific to the conditions's context
"""
raise NotImplementedError
def get_conditions(context, **kwargs):
"""Get a dict of available conditions.
:param context: the context where the conditions are used
:param kwargs: arguments specific to the context
"""
return named_objects_from_signal(signals.get_conditions.send(context, **kwargs))
def check_rule(context, rule, **kwargs):
"""Check whether a rule matches.
:param context: the context where the conditions are used
:param rule: the rule to check
:param kwargs: arguments specific to the context
"""
for name, condition in get_conditions(context, **kwargs).iteritems():
if not condition.is_used(rule):
if condition.required:
return False
else:
continue
values = condition._clean_values(rule[name], **kwargs)
if not values and condition.is_none(**kwargs):
# the property we're checking is null and the rule wants null
return True
elif not condition.check(values, **kwargs):
return False
return True
def get_missing_conditions(context, rule, **kwargs):
"""Get the set of missing required conditions.
:param context: the context where the conditions are used
:param rule: the rule to check
:param kwargs: arguments specific to the context
"""
rules = {condition for condition in get_conditions(context, **kwargs).itervalues() if condition.required}
return {condition.friendly_name for condition in rules if not condition.is_used(rule)}
```
#### File: indico/util/serializer.py
```python
from enum import Enum
from indico.core.errors import IndicoError
from indico.core.logger import Logger
class Serializer(object):
__public__ = []
def to_serializable(self, attr='__public__', converters=None):
serializable = {}
if converters is None:
converters = {}
for k in getattr(self, attr):
try:
if isinstance(k, tuple):
k, name = k
else:
k, name = k, k
v = getattr(self, k)
if callable(v): # to make it generic, we can get rid of it by properties
v = v()
if isinstance(v, Serializer):
v = v.to_serializable()
elif isinstance(v, list):
v = [e.to_serializable() for e in v]
elif isinstance(v, dict):
v = dict((k, vv.to_serializable() if isinstance(vv, Serializer) else vv)
for k, vv in v.iteritems())
elif isinstance(v, Enum):
v = v.name
if type(v) in converters:
v = converters[type(v)](v)
serializable[name] = v
except Exception:
msg = 'Could not retrieve {}.{}.'.format(self.__class__.__name__, k)
Logger.get('Serializer{}'.format(self.__class__.__name__)).exception(msg)
raise IndicoError(msg)
return serializable
```
#### File: web/forms/base.py
```python
from __future__ import unicode_literals
import weakref
from flask import flash, g, request, session
from flask_wtf import FlaskForm
from wtforms import ValidationError
from wtforms.csrf.core import CSRF
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.fields.core import FieldList
from wtforms.form import FormMeta
from wtforms.widgets.core import HiddenInput
from indico.core import signals
from indico.core.auth import multipass
from indico.util.i18n import _
from indico.util.signals import values_from_signal
from indico.util.string import return_ascii, strip_whitespace
from indico.web.flask.util import url_for
class _DataWrapper(object):
"""Wrapper for the return value of generated_data properties"""
def __init__(self, data):
self.data = data
@return_ascii
def __repr__(self):
return '<DataWrapper({!r})>'.format(self.data)
class generated_data(property):
"""property decorator for generated data in forms"""
def __get__(self, obj, objtype=None):
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
return _DataWrapper(self.fget(obj))
class IndicoFormMeta(FormMeta):
def __call__(cls, *args, **kwargs):
# If we are instantiating a form that was just extended, don't
# send the signal again - it's pointless to extend the extended
# form and doing so could actually result in infinite recursion
# if the signal receiver didn't specify a sender.
if kwargs.pop('__extended', False):
return super(IndicoFormMeta, cls).__call__(*args, **kwargs)
extra_fields = values_from_signal(signals.add_form_fields.send(cls))
# If there are no extra fields, we don't need any custom logic
# and simply create an instance of the original form.
if not extra_fields:
return super(IndicoFormMeta, cls).__call__(*args, **kwargs)
kwargs['__extended'] = True
ext_cls = type(b'_Extended' + cls.__name__, (cls,), {})
for name, field in extra_fields:
name = 'ext__' + name
if hasattr(ext_cls, name):
raise RuntimeError('Field name collision in {}: {}'.format(cls.__name__, name))
setattr(ext_cls, name, field)
return ext_cls(*args, **kwargs)
class IndicoFormCSRF(CSRF):
def generate_csrf_token(self, csrf_token_field):
return session.csrf_token
def validate_csrf_token(self, form, field):
if field.current_token == field.data:
return
if not g.get('flashed_csrf_message'):
# Only flash the message once per request. We may end up in here
# multiple times if `validate()` is called more than once
flash(_('It looks like there was a problem with your current session. Please submit the form again.'),
'error')
g.flashed_csrf_message = True
raise ValidationError(_('Invalid CSRF token'))
class IndicoForm(FlaskForm):
__metaclass__ = IndicoFormMeta
class Meta:
csrf = True
csrf_class = IndicoFormCSRF
def bind_field(self, form, unbound_field, options):
# We don't set default filters for query-based fields as it breaks them if no query_factory is set
# while the Form is instantiated. Also, it's quite pointless for those fields...
# FieldList simply doesn't support filters.
no_filter_fields = (QuerySelectField, FieldList)
filters = [strip_whitespace] if not issubclass(unbound_field.field_class, no_filter_fields) else []
filters += unbound_field.kwargs.get('filters', [])
bound = unbound_field.bind(form=form, filters=filters, **options)
bound.get_form = weakref.ref(form) # GC won't collect the form if we don't use a weakref
return bound
def __init__(self, *args, **kwargs):
csrf_enabled = kwargs.pop('csrf_enabled', None)
if csrf_enabled is not None:
# This is exactly what FlaskForm already does, but without
# a deprecation warning.
# Being able to set ``csrf_enabled=False`` is much nicer
# than ``meta={'csrf': False}`` and if we ever need to
# change it for some reason we can always replace it everywhere
kwargs['meta'] = kwargs.get('meta') or {}
kwargs['meta'].setdefault('csrf', csrf_enabled)
super(IndicoForm, self).__init__(*args, **kwargs)
self.ajax_response = None
def process_ajax(self):
"""
Check if the current request is an AJAX request related to a
field in this form and execute the field's AJAX logic.
The response is available in the `ajax_response` attribute
afterwards.
:return: Whether an AJAX response was processed.
"""
field_id = request.args.get('__wtf_ajax')
if not field_id:
return False
field = next((f for f in self._fields.itervalues() if f.id == field_id and isinstance(f, AjaxFieldMixin)), None)
if not field:
return False
rv = field.process_ajax()
self.ajax_response = rv
return True
def validate(self):
valid = super(IndicoForm, self).validate()
if not valid:
return False
if not all(values_from_signal(signals.form_validated.send(self), single_value=True)):
return False
self.post_validate()
return True
def post_validate(self):
"""Called after the form was successfully validated.
This method is a good place e.g. to override the data of fields in
certain cases without going through the hassle of generated_data.
"""
def populate_obj(self, obj, fields=None, skip=None, existing_only=False):
"""Populates the given object with form data.
If `fields` is set, only fields from that list are populated.
If `skip` is set, fields in that list are skipped.
If `existing_only` is True, only attributes that already exist on `obj` are populated.
Attributes starting with ``ext__`` are always skipped as they
are from plugin-defined fields which should always be handled
separately.
"""
def _included(field_name):
if fields and field_name not in fields:
return False
if skip and field_name in skip:
return False
if existing_only and not hasattr(obj, field_name):
return False
if field_name.startswith('ext__'):
return False
return True
# Populate data from actual fields
for name, field in self._fields.iteritems():
if not _included(name):
continue
field.populate_obj(obj, name)
# Populate generated data
for name, value in self.generated_data.iteritems():
if not _included(name):
continue
setattr(obj, name, value)
@property
def visible_fields(self):
"""A list containing all fields that are not hidden."""
return [field for field in self if not isinstance(field.widget, HiddenInput)]
@property
def error_list(self):
"""A list containing all errors, prefixed with the field's label.'"""
all_errors = []
for field_name, errors in self.errors.iteritems():
for error in errors:
if isinstance(error, dict) and isinstance(self[field_name], FieldList):
for field in self[field_name].entries:
all_errors += ['{}: {}'.format(self[field_name].label.text, sub_error)
for sub_error in field.form.error_list]
else:
all_errors.append('{}: {}'.format(self[field_name].label.text, error))
return all_errors
@property
def generated_data(self):
"""Returns a dict containing all generated data"""
cls = type(self)
return {field: getattr(self, field).data
for field in dir(cls)
if isinstance(getattr(cls, field), generated_data)}
@property
def data(self):
"""Extends form.data with generated data from properties"""
data = {k: v
for k, v in super(IndicoForm, self).data.iteritems()
if k != self.meta.csrf_field_name and not k.startswith('ext__')}
data.update(self.generated_data)
return data
class FormDefaults(object):
"""Simple wrapper to be used for Form(obj=...) default values.
It allows you to specify default values via kwargs or certain attrs from an object.
You can also set attributes directly on this object, which will act just like kwargs
:param obj: The object to get data from
:param attrs: Set of attributes that may be taken from obj
:param skip_attrs: Set of attributes which are never taken from obj
:param defaults: Additional values which are used only if not taken from obj
"""
def __init__(self, obj=None, attrs=None, skip_attrs=None, **defaults):
self.__obj = obj
self.__use_items = hasattr(obj, 'iteritems') and hasattr(obj, 'get') # smells like a dict
self.__obj_attrs = attrs
self.__obj_attrs_skip = skip_attrs
self.__defaults = defaults
def __valid_attr(self, name):
"""Checks if an attr may be retrieved from the object"""
if self.__obj is None:
return False
if self.__obj_attrs is not None and name not in self.__obj_attrs:
return False
if self.__obj_attrs_skip is not None and name in self.__obj_attrs_skip:
return False
return True
def __setitem__(self, key, value):
self.__defaults[key] = value
def __setattr__(self, key, value):
if key.startswith('_{}__'.format(type(self).__name__)):
object.__setattr__(self, key, value)
else:
self.__defaults[key] = value
def __getattr__(self, item):
if self.__valid_attr(item):
if self.__use_items:
return self.__obj.get(item, self.__defaults.get(item))
else:
return getattr(self.__obj, item, self.__defaults.get(item))
elif item in self.__defaults:
return self.__defaults[item]
else:
raise AttributeError(item)
def __contains__(self, item):
return hasattr(self, item)
class SyncedInputsMixin(object):
"""Mixin for a form having inputs using the ``SyncedInputWidget``.
This mixin will process the synced fields, adding them the necessary
attributes for them to render and work properly. The fields which
are synced are defined by ``multipass.synced_fields``.
:param synced_fields: set -- a subset of ``multipass.synced_fields``
which corresponds to the fields currently
being synchronized for the user.
:param synced_values: dict -- a map of all the synced fields (as
defined by ``multipass.synced_fields``) and
the values they would have if they were synced
(regardless of whether it is or not). Fields
not present in this dict do not show the sync
button at all.
"""
def __init__(self, *args, **kwargs):
synced_fields = kwargs.pop('synced_fields', set())
synced_values = kwargs.pop('synced_values', {})
super(SyncedInputsMixin, self).__init__(*args, **kwargs)
self.syncable_fields = set(synced_values)
for key in ('first_name', 'last_name'):
if not synced_values.get(key):
synced_values.pop(key, None)
self.syncable_fields.discard(key)
if self.is_submitted():
synced_fields = self.synced_fields
provider = multipass.sync_provider
provider_name = provider.title if provider is not None else 'unknown identity provider'
for field in multipass.synced_fields:
self[field].synced = self[field].short_name in synced_fields
self[field].synced_value = synced_values.get(field)
self[field].provider_name = provider_name
@property
def synced_fields(self):
"""The fields which are set as synced for the current request."""
return set(request.form.getlist('synced_fields')) & self.syncable_fields
class AjaxFieldMixin(object):
"""Mixin for a Field to be able to handle AJAX requests.
This mixin will allow you to handle AJAX requests during regular
form processing, e.g. when you have a field that needs an AJAX
callback to perform search operations.
To use this mixin, the controllers processing the form must
include the following code::
if form.process_ajax():
return form.ajax_response
It is a good idea to run this code as early as possible to avoid
doing expensive operations like loading a big list of objects
which may be never used when returning early due to the AJAX
request.
"""
def process_ajax(self):
raise NotImplementedError
def get_ajax_url(self, **url_args):
kwargs = dict(request.view_args, **request.args.to_dict(False))
kwargs.update(url_args)
kwargs['__wtf_ajax'] = self.id
return url_for(request.endpoint, **kwargs)
```
#### File: forms/fields/protection.py
```python
from __future__ import absolute_import, unicode_literals
from flask import render_template
from markupsafe import Markup
from indico.core.db import db
from indico.core.db.sqlalchemy.protection import ProtectionMode
from indico.util.i18n import _
from indico.web.forms.fields import IndicoEnumRadioField
from indico.web.forms.widgets import JinjaWidget
class IndicoProtectionField(IndicoEnumRadioField):
widget = JinjaWidget('forms/protection_widget.html', single_kwargs=True)
radio_widget = JinjaWidget('forms/radio_buttons_widget.html', orientation='horizontal', single_kwargs=True)
def __init__(self, *args, **kwargs):
self.protected_object = kwargs.pop('protected_object')(kwargs['_form'])
get_acl_message_url = kwargs.pop('acl_message_url', None)
self.acl_message_url = get_acl_message_url(kwargs['_form']) if get_acl_message_url else None
self.can_inherit_protection = self.protected_object.protection_parent is not None
if not self.can_inherit_protection:
kwargs['skip'] = {ProtectionMode.inheriting}
super(IndicoProtectionField, self).__init__(*args, enum=ProtectionMode, **kwargs)
def render_protection_message(self):
protected_object = self.get_form().protected_object
if hasattr(protected_object, 'get_non_inheriting_objects'):
non_inheriting_objects = protected_object.get_non_inheriting_objects()
else:
non_inheriting_objects = []
if isinstance(protected_object.protection_parent, db.m.Event):
parent_type = _('Event')
elif isinstance(protected_object.protection_parent, db.m.Category):
parent_type = _('Category')
else:
parent_type = _('Session')
rv = render_template('_protection_info.html', field=self, protected_object=protected_object,
parent_type=parent_type, non_inheriting_objects=non_inheriting_objects)
return Markup(rv)
```
#### File: http_api/metadata/xml.py
```python
import re
from datetime import datetime
import dateutil.parser
from lxml import etree
from pytz import timezone, utc
from indico.core.logger import Logger
from indico.util.string import to_unicode
from indico.web.http_api.metadata.serializer import Serializer
def _deserialize_date(date_dict):
dt = datetime.combine(dateutil.parser.parse(date_dict['date']).date(),
dateutil.parser.parse(date_dict['time']).time())
return timezone(date_dict['tz']).localize(dt).astimezone(utc)
class XMLSerializer(Serializer):
"""
Receives a fossil (or a collection of them) and converts them to XML
"""
_mime = 'text/xml'
def __init__(self, query_params, pretty=False, **kwargs):
self._typeMap = kwargs.pop('typeMap', {})
super(XMLSerializer, self).__init__(query_params, pretty, **kwargs)
def _convert(self, value, _control_char_re=re.compile(r'[\x00-\x08\x0b\x0c\x0e-\x1f]')):
if isinstance(value, datetime):
return value.isoformat()
elif isinstance(value, (int, long, float, bool)):
return str(value)
else:
value = to_unicode(value) if isinstance(value, str) else value
if isinstance(value, basestring):
# Get rid of control chars breaking XML conversion
value = _control_char_re.sub(u'', value)
return value
def _xmlForFossil(self, fossil, doc=None):
attribs = {}
id = None
if '_fossil' in fossil:
attribs['fossil'] = fossil['_fossil']
if 'id' in fossil:
id = attribs['id'] = str(fossil['id'])
if '_type' in fossil:
typeName = self._typeMap.get(fossil['_type'], fossil['_type'])
else:
typeName = 'collection'
felement = etree.Element(typeName.lower(),
attrib=attribs)
if doc:
doc.getroot().append(felement)
for k, v in fossil.iteritems():
if k in ['_fossil', '_type', 'id']:
continue
if isinstance(k, (int, float)) or (isinstance(k, basestring) and k.isdigit()):
elem = etree.SubElement(felement, 'entry', {'key': unicode(k)})
else:
elem = etree.SubElement(felement, k)
if isinstance(v, dict) and set(v.viewkeys()) == {'date', 'time', 'tz'}:
v = _deserialize_date(v)
if isinstance(v, (list, tuple)):
onlyDicts = all(isinstance(subv, dict) for subv in v)
if onlyDicts:
for subv in v:
elem.append(self._xmlForFossil(subv))
else:
for subv in v:
if isinstance(subv, dict):
elem.append(self._xmlForFossil(subv))
else:
subelem = etree.SubElement(elem, 'item')
subelem.text = self._convert(subv)
elif isinstance(v, dict):
elem.append(self._xmlForFossil(v))
else:
txt = self._convert(v)
try:
elem.text = txt
except Exception:
Logger.get('xmlSerializer').exception('Setting XML text value failed (id: %s, value %r)', id, txt)
return felement
def _execute(self, fossil, xml_declaration=True):
if isinstance(fossil, list):
# collection of fossils
doc = etree.ElementTree(etree.Element("collection"))
for elem in fossil:
self._xmlForFossil(elem, doc)
result = doc
else:
result = self._xmlForFossil(fossil)
return etree.tostring(result, pretty_print=self.pretty,
xml_declaration=xml_declaration, encoding='utf-8')
Serializer.register('xml', XMLSerializer)
```
|
{
"source": "jgrigonis/arcade",
"score": 3
}
|
#### File: arcade/arcade/sound.py
```python
import pyglet
import typing
# pyglet.lib.load_library('C:/Program Files/Python36/Lib/site-packages/arcade/Win32/avbin')
# pyglet.have_avbin=True
# pyglet.debug_lib=True
# pyglet.debug_trace=True
def load_sound_library():
"""
Special code for Windows so we grab the proper avbin from our directory.
Otherwise hope the correct package is installed.
"""
# lazy loading
if not load_sound_library._sound_library_loaded:
load_sound_library._sound_library_loaded = True
else:
return
import os
appveyor = not os.environ.get('APPVEYOR') is None
import platform
system = platform.system()
if system == 'Windows':
import sys
is64bit = sys.maxsize > 2**32
import site
packages = site.getsitepackages()
if appveyor:
if is64bit:
path = "Win64/avbin"
else:
path = "Win32/avbin"
else:
if is64bit:
path = packages[0] + "/lib/site-packages/arcade/Win64/avbin"
else:
path = packages[0] + "/lib/site-packages/arcade/Win32/avbin"
elif system == 'Darwin':
from distutils.sysconfig import get_python_lib
path = get_python_lib() + '/lib/site-packages/arcade/lib/libavbin.10.dylib'
pyglet.options['audio'] = ('openal', 'pulse', 'silent')
else:
path = "avbin"
pyglet.options['audio'] = ('openal', 'pulse', 'silent')
pyglet.lib.load_library(path)
pyglet.have_avbin = True
# Initialize static function variable
load_sound_library._sound_library_loaded = False
def load_sound(filename: str) -> typing.Any:
"""
Load a sound and get it ready to play.
"""
load_sound_library()
source = pyglet.media.load(filename, streaming=False)
return source
def play_sound(sound: typing.Any):
"""
Play a previously loaded sound.
"""
load_sound_library()
sound.play()
```
#### File: arcade/examples/pinball.py
```python
import arcade
import timeit
BALL_DRAG = 0.001
NO_FLIPPER = 0
FLIPPER_UP = 1
class MyApplication(arcade.Window):
""" Main application class. """
def __init__(self, width, height, resizable):
super().__init__(width, height, resizable=resizable)
self.sprite_list = arcade.SpriteList()
self.left_flipper_list = arcade.SpriteList()
self.right_flipper_list = arcade.SpriteList()
self.left_flipper_state = NO_FLIPPER
self.right_flipper_state = NO_FLIPPER
self.time = 0
arcade.set_background_color(arcade.color.DARK_SLATE_GRAY)
# Top wall
for x in range(20, 800, 40):
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [x, 980], [40, 40], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
# Left wall
for y in range(260, 980, 40):
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [20, y], [40, 40], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
# Right wall
for y in range(260, 980, 40):
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [780, y], [40, 40], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
# Left bottom slope
y = 260
for x in range(40, 280, 10):
y -= 5
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [x, y], [10, 10], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
# Right bottom slope
y = 260
for x in range(760, 520, -10):
y -= 5
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [x, y], [10, 10], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
# Left flipper
y = 135
for x in range(280, 350, 10):
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [x, y], [10, 10], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
self.left_flipper_list.append(wall)
y -= 5
# Right flipper
y = 135
for x in range(520, 440, -10):
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [x, y], [10, 10], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
self.right_flipper_list.append(wall)
y -= 5
# Bumpers
for row in range(2):
for column in range(2):
bumper = arcade.PhysicsCircle("images/bumper.png", [250 + 300 * column, 450 + 300 * row], 35, [0, 0], 1.5, 100, BALL_DRAG)
bumper.static = True
self.sprite_list.append(bumper)
wall = arcade.PhysicsAABB("images/python_logo.png", [400, 600], [150, 150], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
self.sprite_list.draw()
start_x = 20
start_y = 10
arcade.draw_text("Processing time: {:.3f}".format(self.time), start_x, start_y, arcade.color.BLACK, 12)
def update(self, x):
""" Move everything """
start_time = timeit.default_timer()
arcade.process_2d_physics_movement(self.sprite_list, gravity=0.08)
arcade.process_2d_physics_collisions(self.sprite_list)
# -- Left flipper control
if self.left_flipper_state == FLIPPER_UP and self.left_flipper_list[0].center_y < 145:
y = 2
y_change = 2
for sprite in self.left_flipper_list:
sprite.change_y = y
y += y_change
sprite.frozen = False
elif self.left_flipper_state == NO_FLIPPER and self.left_flipper_list[0].center_y > 135:
y = -2
y_change = -2
for sprite in self.left_flipper_list:
sprite.change_y = y
y += y_change
sprite.frozen = False
else:
for sprite in self.left_flipper_list:
sprite.change_y = 0
sprite.frozen = True
# -- Right flipper control
if self.right_flipper_state == FLIPPER_UP and self.right_flipper_list[0].center_y < 145:
y = 2
y_change = 2
for sprite in self.right_flipper_list:
sprite.change_y = y
y += y_change
sprite.frozen = False
elif self.right_flipper_state == NO_FLIPPER and self.right_flipper_list[0].center_y > 135:
y = -2
y_change = -2
for sprite in self.right_flipper_list:
sprite.change_y = y
y += y_change
sprite.frozen = False
else:
for sprite in self.right_flipper_list:
sprite.change_y = 0
sprite.frozen = True
for sprite in self.sprite_list:
if sprite.center_y < -20:
sprite.kill()
self.time = timeit.default_timer() - start_time
def on_key_press(self, key, modifiers):
"""
Called whenever the mouse moves.
"""
if key == arcade.key.LEFT:
self.left_flipper_state = FLIPPER_UP
elif key == arcade.key.RIGHT:
self.right_flipper_state = FLIPPER_UP
elif key == arcade.key.SPACE:
x = 720
y = 300
ball = arcade.PhysicsCircle("images/pool_cue_ball.png", [x, y], 15, [0, +20], 1, .25, BALL_DRAG)
self.sprite_list.append(ball)
def on_key_release(self, key, modifiers):
"""
Called when the user presses a mouse button.
"""
if key == arcade.key.LEFT:
self.left_flipper_state = NO_FLIPPER
elif key == arcade.key.RIGHT:
self.right_flipper_state = NO_FLIPPER
window = MyApplication(800, 1000, resizable=False)
window.set_size(700, 700)
arcade.run()
```
#### File: arcade/examples/snow.py
```python
import random
import math
import arcade
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
class Snowflake:
"""
Each instance of this class represents a single snowflake.
Based on drawing filled-circles.
"""
def __init__(self):
self.x = 0
self.y = 0
def reset_pos(self):
# Reset flake to random position above screen
self.y = random.randrange(SCREEN_HEIGHT, SCREEN_HEIGHT + 100)
self.x = random.randrange(SCREEN_WIDTH)
class MyAppWindow(arcade.Window):
""" Main application class. """
def __init__(self, width, height):
"""
Initializer
:param width:
:param height:
"""
# Calls "__init__" of parent class (arcade.Window) to setup screen
super().__init__(width, height)
# Sprite lists
self.snowflake_list = None
def start_snowfall(self):
""" Set up snowfall and initialize variables. """
self.snowflake_list = []
for i in range(50):
# Create snowflake instance
snowflake = Snowflake()
# Randomly position snowflake
snowflake.x = random.randrange(SCREEN_WIDTH)
snowflake.y = random.randrange(SCREEN_HEIGHT + 200)
# Set other variables for the snowflake
snowflake.size = random.randrange(4)
snowflake.speed = random.randrange(20, 40)
snowflake.angle = random.uniform(math.pi, math.pi * 2)
# Add snowflake to snowflake list
self.snowflake_list.append(snowflake)
# Don't show the mouse pointer
self.set_mouse_visible(False)
# Set the background color
arcade.set_background_color(arcade.color.BLACK)
def on_draw(self):
"""
Render the screen.
"""
# This command is necessary before drawing
arcade.start_render()
# Draw the current position of each snowflake
for snowflake in self.snowflake_list:
arcade.draw_circle_filled(snowflake.x, snowflake.y,
snowflake.size, arcade.color.WHITE)
def update(self, delta_time):
"""
All the logic to move, and the game logic goes here.
"""
# Animate all the snowflakes falling
for snowflake in self.snowflake_list:
snowflake.y -= snowflake.speed * delta_time
# Check if snowflake has fallen below screen
if snowflake.y < 0:
snowflake.reset_pos()
# Some math to make the snowflakes move side to side
snowflake.x += snowflake.speed * math.cos(snowflake.angle) * delta_time
snowflake.angle += 1 * delta_time
def main():
window = MyAppWindow(SCREEN_WIDTH, SCREEN_HEIGHT)
window.start_snowfall()
arcade.run()
if __name__ == "__main__":
main()
```
|
{
"source": "jgrindal/JGUtils",
"score": 3
}
|
#### File: JGUtils/jgutils/batchrename.py
```python
import glob, os, argparse
def parse_args():
parser = argparse.ArgumentParser(
description='Batch Renamer')
parser.add_argument('--dir', type=str, required=True, default='./',
help='Directory for things to be renamed')
parser.add_argument('--pattern', type=str, required=False, default='*',
help='pattern to choose')
parser.add_argument('--prefix', type=str, required=False, default='',
help='prefix to add to the filename')
parser.add_argument('--postfix', type=str, required=False, default='',
help='postfix to add to the filename')
return parser.parse_args()
def rename(dir, pattern, titlePattern):
for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):
title, ext = os.path.splitext(os.path.basename(pathAndFilename))
os.rename(pathAndFilename, os.path.join(dir, titlePattern % title + ext))
if __name__ == '__main__':
args = parse_args()
tp = str(args.prefix) + "%s" + str(args.postfix)
rename(args.dir, args.pattern, tp)
```
|
{
"source": "jgrip/dotfiles",
"score": 3
}
|
#### File: dotfiles/bin/dotty.py
```python
from __future__ import print_function
# Copyright (C) 2015 <NAME> <<EMAIL>>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import json
import os
import shutil
from sys import stderr
import argparse
# Fix Python 2.x.
try: input = raw_input
except NameError: pass
def ask_user(prompt):
valid = {"yes":True, 'y':True, '':True, "no":False, 'n':False}
while True:
print("{0} ".format(prompt),end="")
choice = input().lower()
if choice in valid:
return valid[choice]
else:
print("Enter a correct choice.", file=stderr)
def create_directory(path):
exp = os.path.expanduser(path)
if (not os.path.isdir(exp)):
print("{0} doesnt exist, creating.".format(exp))
os.makedirs(exp)
def create_symlink(src, dest, replace):
dest = os.path.expanduser(dest)
src = os.path.abspath(src)
broken_symlink = os.path.lexists(dest) and not os.path.exists(dest)
if os.path.lexists(dest):
if os.path.islink(dest) and os.readlink(dest) == src:
print("Skipping existing {0} -> {1}".format(dest, src))
return
elif replace or ask_user("{0} exists, delete it? [Y/n]".format(dest)):
if os.path.isfile(dest) or broken_symlink or os.path.islink(dest):
os.remove(dest)
else:
shutil.rmtree(dest)
else:
return
print("Linking {0} -> {1}".format(dest, src))
try:
os.symlink(src, dest)
except AttributeError:
import ctypes
symlink = ctypes.windll.kernel32.CreateSymbolicLinkW
symlink.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
symlink.restype = ctypes.c_ubyte
flags = 1 if os.path.isdir(src) else 0
symlink(dest, src, flags)
def copy_path(src, dest):
dest = os.path.expanduser(dest)
src = os.path.abspath(src)
if os.path.exists(dest):
if ask_user("{0} exists, delete it? [Y/n]".format(dest)):
if os.path.isfile(dest) or os.path.islink(dest):
os.remove(dest)
else:
shutil.rmtree(dest)
else:
return
print("Copying {0} -> {1}".format(src, dest))
if os.path.isfile(src):
shutil.copy(src, dest)
else:
shutil.copytree(src, dest)
def run_command(command):
print("Running {0}".format(command))
os.system(command)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("config", help="the JSON file you want to use")
parser.add_argument("-r", "--replace", action="store_true",
help="replace files/folders if they already exist")
args = parser.parse_args()
js = json.load(open(args.config))
os.chdir(os.path.expanduser(os.path.abspath(os.path.dirname(args.config))))
if 'directories' in js: [create_directory(path) for path in js['directories']]
if 'link' in js: [create_symlink(src, dst, args.replace) for src, dst in js['link'].items()]
if 'copy' in js: [copy_path(src, dst) for src, dst in js['copy'].items()]
if 'install' in js and 'install_cmd' in js:
packages = ' '.join(js['install'])
run_command("{0} {1}".format(js['install_cmd'], packages))
if 'commands' in js: [run_command(command) for command in js['commands']]
print("Done!")
if __name__ == "__main__":
main()
```
|
{
"source": "jgritans/torrent_finder",
"score": 3
}
|
#### File: jgritans/torrent_finder/torrents.py
```python
import pandas as pd
from web import soup
class EmptyTable(Exception):
def __init__(self, message):
self.message = message
class FailedDfCreation(Exception):
def __init__(self, message):
self.message = message
class HardToFixBetterSkip(Exception):
def __init__(self, message):
self.message = message
def get_torrent_table(movie, res_kwds, criteria):
try:
sizes = {
'M': 1,
'G': 1000
}
# get HTML table from 1337x.to
link = f"https://1337x.to/category-search/{movie.title} {movie.year}/Movies/1/".replace(" ", "+")
table_rows = soup(link).table.find_all('tr')
if len(table_rows) < 2:
raise Exception
table_rows.pop(0)
l = []
for tr in table_rows:
try:
td = tr.find_all('td')
row = [cell.text for cell in td] + [f"https://1337x.to{a['href']}" for a in tr.find_all(href=True)] +["","",""]
row.pop(6)
l.append(row)
except:
continue
# Create, clean up and sort downloads_df
downloads_df = pd.DataFrame(l, columns=['Title', 'Se', 'Le', 'Time', 'Size', 'Uploader', 'Item_link',
'User_link','Language','Id','Status']).dropna()
downloads_df.Size = [val.replace(',', '').split('B')[0].split(' ') for val in downloads_df.Size]
downloads_df[['Size', 'Unit']] = downloads_df.Size.tolist()
downloads_df.Unit = downloads_df.Unit.map(sizes)
downloads_df.Size = downloads_df.Size.map(float) * downloads_df.Unit
downloads_df.Se = downloads_df.Se.map(int)
downloads_df.Le = downloads_df.Le.map(int)
downloads_df.Size = downloads_df.Size.map(int)
downloads_df = downloads_df[downloads_df['Se'] >= criteria['seeders']].sort_values(['Size', 'Se'], ascending=False)
downloads_df['Year'] = movie.year
# Infer and add resolution to df
downloads_df['Resolution'] = ''
for res in res_kwds: # add resolutions to df
for i in range(len(downloads_df)):
for keyword in res_kwds[res]:
if keyword in downloads_df.iloc[i, downloads_df.columns.get_loc('Title')]:
downloads_df.iloc[i, downloads_df.columns.get_loc('Resolution')] = res
return downloads_df
except:
raise EmptyTable
def get_mirror(soup, mirror_site):
info_hash = next(p.text.split(' ')[-1] for p in soup.find_all('p') if 'hash' in p.text.lower())
return mirror_site.format(info_hash)
```
#### File: jgritans/torrent_finder/web.py
```python
from requests import get as requests_get
from bs4 import BeautifulSoup
HEADERS={"Accept-Language": "en-US,en;q=0.5",'User-Agent': 'Mozilla/5.0 (Platform; Security; OS-or-CPU; Localization; rv:1.4) Gecko/20030624 Netscape/7.1 (ax)'}
class noTorrent(Exception):
def __init__(self, message='error'):
self.message = message
def soup(link):
link = link.replace("'",'+')
r = requests_get(link, headers=HEADERS)
return BeautifulSoup(r.text, features='lxml')
def download_torrent(url, filename, download_path='C:'):
print(f" Attempting to download")
response = requests_get(url, headers=HEADERS)
if not filename:
filename = url.split('/')[-1]
file_path = f"{download_path}/{filename}.torrent".replace(' ','_')
if file_path[0] == '/':
file_path = file_path[1:]
with open(file_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
# else:
# print(f'{url} does not contain {url.split("/")[-1]}')
#
# raise noTorrent
print(' successful torrent download')
```
|
{
"source": "jgritchie/AlienInvaders",
"score": 3
}
|
#### File: AlienInvaders/src/button.py
```python
import pygame.font
class Button:
def __init__(self, ai_game, message):
self.screen = ai_game.screen
self.screen_rect = self.screen.get_rect()
self.width, self.height = 200, 50
self.button_color = (0, 255, 0)
self.text_color = (0, 255, 255)
self.font = pygame.font.SysFont(None, 48)
self.rect = pygame.Rect(0, 0, self.width, self.height)
self.rect.center = self.screen_rect.center
self.prep_message(message)
def prep_message(self, message):
self.message_image = self.font.render(message, True, self.text_color, self.button_color)
self.message_image_rect = self.message_image.get_rect()
self.message_image_rect.center = self.rect.center
def draw_button(self):
self.screen.fill(self.button_color, self.rect)
self.screen.blit(self.message_image, self.message_image_rect)
```
|
{
"source": "jg-rivera/cert-ripper",
"score": 3
}
|
#### File: cert-ripper/src/ripper.py
```python
from dotenv import load_dotenv
from PyPDF2 import PdfFileReader, PdfFileWriter
import os
import json
class CertRipper:
def __init__(
self,
start_page_index=0,
master_pdf_path=None,
json_points_path=None,
ripped_certs_path=None,
ripped_cert_file_name=None,
):
self.start_page_index = start_page_index
self.master_pdf_path = master_pdf_path
self.pdf = PdfFileReader(master_pdf_path)
self.pdf_length = self.pdf.getNumPages()
self.json_points_path = json_points_path
self.ripped_certs_path = ripped_certs_path
self.ripped_cert_file_name = ripped_cert_file_name
def process(self):
recipient_groups = self.get_recipient_groups_from_points()
self.extract_pdf_from_master(recipient_groups)
def extract_pdf_from_master(self, recipient_groups):
current_page_index = self.start_page_index
process_index = 0
for recipient_group in recipient_groups:
recipient_group_name = recipient_group["name"]
recipient_group_tag = recipient_group["tag"]
recipient_slugs = recipient_group["recipient_slugs"]
print(
f"[*] Ripping \x1b[93m{recipient_group_name}\x1b[0m group ...")
for recipient_slug in recipient_slugs:
page = self.pdf.getPage(current_page_index)
file_name = self.ripped_cert_file_name.format(
index=current_page_index + 1,
tag=recipient_group_tag,
recipient=recipient_slug
)
pdf_writer = PdfFileWriter()
pdf_writer.addPage(page)
output_file_name = f"{self.ripped_certs_path}\\{file_name}.pdf"
with open(output_file_name, "wb") as out:
pdf_writer.write(out)
print(
f"\x1b[95m[{process_index}]\x1b[0m Ripped \x1b[92m[{file_name}]\x1b[0m from \x1b[94mpage {current_page_index + 1}\x1b[0m of master")
current_page_index += 1
process_index += 1
def get_recipient_groups_from_points(self):
recipient_groups = []
total_recipients = 0
with open(self.json_points_path, "r") as json_file:
points = json.load(json_file)
for point in points:
point_name = point["name"]
point_tag = point["tag"]
point_recipients = point["recipients"]
point_recipient_slugs = []
for point_recipient in point_recipients:
recipient_name = point_recipient["name"]
recipient_name_slug = "_".join(recipient_name.split())
point_recipient_slugs.append(recipient_name_slug)
total_recipients += 1
recipient_groups.append({
"name": point_name,
"tag": point_tag,
"recipient_slugs": point_recipient_slugs
})
total_groups = len(recipient_groups)
self.__check_pdf_length(total_recipients)
print(
f"Read \x1b[95m{total_groups} groups(s)\x1b[0m and \x1b[95m{total_recipients} recipient(s)\x1b[0m from JSON points")
return recipient_groups
def __check_pdf_length(self, recipients_length):
pdf_length = self.pdf_length - (self.start_page_index)
if pdf_length != recipients_length:
raise ValueError(
f"Number of recipients ({recipients_length}) does not match with PDF length ({pdf_length})"
)
if __name__ == "__main__":
load_dotenv()
ripper = CertRipper(
start_page_index=os.getenv("START_PAGE_INDEX"),
master_pdf_path=os.getenv("MASTER_PDF_PATH"),
json_points_path=os.getenv("JSON_POINTS_PATH"),
ripped_certs_path=os.getenv("RIPPED_CERTS_PATH"),
ripped_cert_file_name=os.getenv("RIPPED_CERT_FILE_NAME"),
)
ripper.process()
```
#### File: src/utils/template_reader.py
```python
class TemplateReader:
def __init__(self, template_path):
self.template_path = template_path
def read(self):
print(f"Using template for email: \x1b[94m{self.template_path}\x1b[0m")
with open(self.template_path) as file:
template_html = file.read()
return template_html
```
|
{
"source": "jgrnt/disco",
"score": 3
}
|
#### File: disco/schemes/scheme_http.py
```python
from disco import comm
def open(url, task=None):
return comm.open_url(url)
def input_stream(fd, sze, url, params):
"""Opens the specified url using an http client."""
import disco.worker
file = open(url, task=disco.worker.active_task)
return file, len(file), file.url
```
#### File: disco/worker/__init__.py
```python
import os, sys, time, traceback, random
from disco.compat import basestring, force_utf8
from disco.error import DataError
from disco.fileutils import DiscoOutput, NonBlockingInput, Wait, AtomicFile
from disco.comm import open_url
# Maximum amount of time a task might take to finish.
DISCO_WORKER_MAX_TIME = 24 * 60 * 60
# Use active_task as a global variable.
# I will set this when a task is running, and then access it from utilities that
# need task data like ddfs directory, etc.
active_task = None
class MessageWriter(object):
def __init__(self, worker):
self.worker = worker
def write(self, string):
string = string.strip()
if string:
self.worker.send('MSG', force_utf8(string))
def isatty(self):
return False
def flush(self):
pass
class Worker(dict):
"""
A :class:`Worker` is a :class:`dict` subclass, with special
methods defined for serializing itself, and possibly
reinstantiating itself on the nodes where :term:`tasks <task>` are
run.
.. note:: The base worker tries to guess which modules are needed
automatically, for all of the :term:`job functions`
specified below, if the *required_modules* parameter is
not specified. It sends any local dependencies
(i.e. modules not included in the Python standard
library) to nodes by default.
If guessing fails, or you have other requirements, see
:mod:`disco.worker.modutil` for options.
The :class:`Worker` base class defines the following parameters:
:type save_results: bool
:param save_results: whether or not to save the output to :ref:`DDFS`.
:type save_info: string
:param save_info: the information about saving into a DFS.
:type profile: bool
:param profile: determines whether :meth:`run` will be profiled.
:type required_files: list of paths or dict
:param required_files: additional files that are required by the worker.
Either a list of paths to files to include,
or a dictionary which contains items of the form
``(filename, filecontents)``.
.. versionchanged:: 0.4
The worker includes *required_files* in :meth:`jobzip`,
so they are available relative to the working directory
of the worker.
:type required_modules: see :ref:`modspec`
:param required_modules: required modules to send with the worker.
"""
stderr = sys.stderr
def __init__(self, **kwargs):
super(Worker, self).__init__(self.defaults())
self.update(kwargs)
self.outputs = {}
@property
def bin(self):
"""
The path to the :term:`worker` binary, relative to the :term:`job home`.
Used to set :attr:`jobdict.worker` in :meth:`jobdict`.
"""
from inspect import getsourcefile, getmodule
return getsourcefile(getmodule(self)).strip('/')
def defaults(self):
"""
:return: dict of default values for the :class:`Worker`.
"""
return {'save_results': False,
'profile': False,
'required_files': {},
'required_modules': None}
def getitem(self, key, job, jobargs, default=None):
"""
Resolves ``key`` in the following order:
#. ``jobargs`` (parameters passed in during :meth:`disco.job.Job.run`)
#. ``job`` (attributes of the :class:`disco.job.Job`)
#. ``self`` (items in the :class:`Worker` dict itself)
#. ``default``
"""
if key in jobargs:
return jobargs[key]
elif hasattr(job, key):
return getattr(job, key)
return self.get(key, default)
def get_modules(self, job, **jobargs):
from disco.worker.modutil import find_modules
from disco.util import iterify
def get(key):
return self.getitem(key, job, jobargs)
from inspect import getsourcefile, getmodule
job_path = getsourcefile(getmodule(job))
return find_modules([obj
for key in self
for obj in iterify(get(key))
if callable(obj)],
job_path=job_path,
exclude=['Task'])
def jobdict(self, job, **jobargs):
"""
Creates a basic :ref:`jobdict` for the :class:`Worker`.
Makes use of the following parameters:
:type name: string
:param name: directly sets :attr:`jobdict.prefix`.
:type owner: string
:param owner: directly sets :attr:`jobdict.owner`.
If not specified, uses :envvar:`DISCO_JOB_OWNER`.
:return: :ref:`jobdict` dict.
"""
return {'prefix': self.getitem('name', job, jobargs),
'save_results': self.getitem('save_results', job, jobargs, False),
'save_info': self.getitem('save_info', job, jobargs, "ddfs"),
'scheduler': self.getitem('scheduler', job, jobargs, {}),
'owner': self.getitem('owner', job, jobargs,
job.settings['DISCO_JOB_OWNER'])}
def jobenvs(self, job, **jobargs):
"""
:return: :ref:`jobenvs` dict.
"""
envs = {'PYTHONPATH': ':'.join([path.strip('/') for path in sys.path])}
envs['LD_LIBRARY_PATH'] = 'lib'
envs['PYTHONPATH'] = ':'.join(('lib', envs.get('PYTHONPATH', '')))
return envs
def jobhome(self, job, **jobargs):
"""
:return: the :term:`job home` (serialized).
Calls :meth:`jobzip` to create the :class:`disco.fileutils.DiscoZipFile`.
"""
jobzip = self.jobzip(job, **jobargs)
jobzip.close()
return jobzip.dumps()
def jobzip(self, job, **jobargs):
"""
A hook provided by the :class:`Worker` for creating the
:term:`job home` zip. The base implementation creates a
minimal zip file containing the Disco standard library, and
any user-specified required files and modules.
:return: a :class:`disco.fileutils.DiscoZipFile`.
"""
# First, add the disco standard library.
from clx import __file__ as clxpath
from disco import __file__ as discopath
from disco.fileutils import DiscoZipFile
jobzip = DiscoZipFile()
jobzip.writepath(os.path.dirname(clxpath), exclude=('.pyc', '__pycache__'))
jobzip.writepath(os.path.dirname(discopath), exclude=('.pyc', '__pycache__'))
jobzip.writesource(job)
jobzip.writesource(self)
# Then, add any user-specified required files.
from disco.util import iskv
def get(key):
return self.getitem(key, job, jobargs)
if isinstance(get('required_files'), dict):
for path, bytes in get('required_files').items():
jobzip.writestr(path, bytes)
else:
for path in get('required_files'):
jobzip.write(path, os.path.join('lib', os.path.basename(path)))
if get('required_modules') is None:
self['required_modules'] = self.get_modules(job, **jobargs)
for mod in get('required_modules'):
if iskv(mod):
jobzip.writepath(mod[1])
# Done with basic minimal zip.
return jobzip
def input(self, task, merged=False, **kwds):
"""
:type task: :class:`disco.task.Task`
:param task: the task for which to retrieve input.
:type merged: bool
:param merged: if specified, returns a :class:`MergedInput`.
:type kwds: dict
:param kwds: additional keyword arguments for the :class:`Input`.
:return: an :class:`Input` to iterate over the inputs from the master.
"""
if merged:
return MergedInput(self.get_inputs(), task=task, **kwds)
return SerialInput(self.get_inputs(), task=task, **kwds)
def output(self, task, label=None, **kwds):
"""
:type task: :class:`disco.task.Task`
:param task: the task for which to create output.
:type label: int or None
:param label: the label of the output partition to get.
:type kwds: dict
:param kwds: additional keyword arguments for the :class:`Output`.
:return: the previously opened :class:`Output` for *label*,
or if necessary, a newly opened one.
"""
if label not in self.outputs:
self.outputs[label] = Output(task.output(label=label), **kwds)
return self.outputs[label]
def start(self, task, job, **jobargs):
from disco.sysutil import set_mem_limit
set_mem_limit(job.settings['DISCO_WORKER_MAX_MEM'])
task.makedirs()
if self.getitem('profile', job, jobargs):
from cProfile import runctx
name = 'profile-{0}'.format(task.uid)
path = task.path(name)
runctx('self.run(task, job, **jobargs)', globals(), locals(), path)
task.put(name, open(path, 'rb').read())
else:
self.run(task, job, **jobargs)
self.end(task, job, **jobargs)
def run(self, task, job, **jobargs):
"""
Called to do the actual work of processing the
:class:`disco.task.Task`. This method runs in the Disco
cluster, on a server that is executing one of the tasks in a
job submitted by a client.
"""
self.getitem(task.stage, job, jobargs)(task, job, **jobargs)
def end(self, task, job, **jobargs):
self.send_outputs()
self.send('MSG', "Results sent to master")
@classmethod
def main(cls):
"""
The main method used to bootstrap the :class:`Worker` when it is being executed.
It is enough for the module to define::
if __name__ == '__main__':
Worker.main()
.. note:: It is critical that subclasses check if they are executing
in the ``__main__`` module, before running :meth:`main`,
as the worker module is also generally imported on the client side.
"""
try:
sys.stdin = NonBlockingInput(sys.stdin,
timeout=3 * DISCO_WORKER_MAX_TIME)
sys.stdout = sys.stderr = MessageWriter(cls)
cls.send('WORKER', {'pid': os.getpid(), 'version': "1.1"})
task = cls.get_task()
job, jobargs = task.jobobjs
job.worker.start(task, job, **jobargs)
cls.send('DONE')
except (DataError, EnvironmentError, MemoryError) as e:
# check the number of open file descriptors (under proc), warn if close to max
# http://stackoverflow.com/questions/899038/getting-the-highest-allocated-file-descriptor
# also check for other known reasons for error, such as if disk is full
cls.send('ERROR', traceback.format_exc())
raise
except Exception as e:
cls.send('FATAL', force_utf8(traceback.format_exc()))
raise
@classmethod
def send(cls, type, payload=''):
from json import dumps, loads
body = dumps(payload)
cls.stderr.write('{0} {1} {2}\n'.format(type, len(body), body))
cls.stderr.flush()
spent, rtype = sys.stdin.t_read_until(' ')
spent, rsize = sys.stdin.t_read_until(' ', spent=spent)
spent, rbody = sys.stdin.t_read(int(rsize) + 1, spent=spent)
if type == 'ERROR':
raise ValueError(loads(rbody[:-1]))
return loads(rbody[:-1])
@classmethod
def get_input(cls, id):
done, inputs = cls.send('INPUT', ['include', [id]])
_id, status, _label, replicas = inputs[0]
if status == 'busy':
raise Wait
if status == 'failed':
raise DataError("Can't handle broken input", id)
return [(id, str(url)) for id, url in replicas]
@classmethod
def get_inputs(cls, done=False, exclude=[]):
while done != "done":
done, inputs = cls.send('INPUT', ['exclude', exclude])
for id, _status, label, _replicas in inputs:
if id not in exclude:
label = label if label == 'all' else int(label)
yield IDedInput((cls, id, label))
exclude.append(id)
@classmethod
def labelled_input_map(cls, task, inputs):
from disco.util import ispartitioned, read_index
from collections import defaultdict
def update_label_map(lm, i):
reps = [url for rid, url in i.replicas]
if ispartitioned(reps):
for l, url, size in read_index(reps[0]):
if i.label in ('all', l):
lm[l].append([url])
else:
lm[i.label].append(reps)
label_map = defaultdict(list)
for i in inputs:
update_label_map(label_map, i)
return label_map
@classmethod
def concat_input(cls, task, output_label, replicas):
output = AtomicFile(task.output_path(output_label))
BUFFER_SIZE = 1024*1024
for reps in replicas:
# Use only the first replica for now, since a set of one
# is the most common case.
# TODO: handle falling back to alternative replicas.
inp = open_url(reps[0])
buf = inp.read(BUFFER_SIZE)
while (len(buf) > 0):
output.write(buf)
buf = inp.read(BUFFER_SIZE)
inp.close()
output.close()
return output.path, output.size()
@classmethod
def get_task(cls):
from disco.task import Task
return Task(**dict((str(k), v) for k, v in cls.send('TASK').items()))
def send_outputs(self):
for output in self.outputs.values():
output.close()
self.send('OUTPUT', [output.label, output.path, output.size()])
class Params(object):
"""
Classic parameter container for tasks.
This object provides a way to contain custom parameters, or state,
in your tasks.
You can specify any number of ``key, value`` pairs to the
:class:`Params`. The pairs will be available to task functions
through the *params* argument. Each task receives its own copy of
the initial params object.
*key* must be a valid Python identifier. *value* can be any Python
object.
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class IDedInput(tuple):
@property
def worker(self):
return self[0]
@property
def id(self):
return self[1]
@property
def label(self):
return self[2]
@property
def replicas(self):
return self.worker.get_input(self.id)
@property
def isindex(self):
from disco.util import ispartitioned
return ispartitioned(self.locations)
@property
def locations(self):
return [r for rid, r in self.replicas]
def unavailable(self, tried):
return self.worker.send('INPUT_ERR', [self.id, list(tried)])
def __str__(self):
return '{0}'.format([url for rid, url in self.replicas])
class ReplicaIter(object):
def __init__(self, input):
self.input, self.used = input, set()
self.checked_local = False
def __iter__(self):
return self
def next(self):
from disco.util import urlsplit
replicas = dict(self.input.replicas)
repl_ids = list(set(replicas) - self.used)
if not self.checked_local and active_task: # Try to favor opening a local file
self.checked_local = True
for repl_id in repl_ids:
replica = replicas[repl_id]
scheme, netloc, rest = urlsplit(replica,
localhost=active_task.host,
ddfs_data=active_task.ddfs_data,
disco_data=active_task.disco_data)
if scheme == 'file':
self.used.add(repl_id)
if os.path.exists(rest): # file might not exist due to replica rebalancing
return replica
repl_ids.remove(repl_id)
if repl_ids:
repl_id = random.choice(repl_ids)
self.used.add(repl_id)
return replicas[repl_id]
self.input.unavailable(self.used)
raise StopIteration
def __next__(self):
return self.next()
class InputIter(object):
def __init__(self, input, task=None, open=None, start=0):
self.input = input
if isinstance(input, IDedInput):
self.urls = ReplicaIter(input)
elif isinstance(input, basestring):
self.urls = iter([input])
else:
self.urls = iter(input)
self.last = start - 1
self.open = open if open else Input.default_opener(task=task)
self.swap()
def __iter__(self):
return self
def next(self):
try:
self.last, item = next(self.iter)
return item
except DataError:
self.swap(traceback.format_exc())
raise Wait(0)
def __next__(self):
return self.next()
def swap(self, error=None):
try:
def skip(iter, N):
from itertools import dropwhile
return dropwhile(lambda n_rec: n_rec[0] < N, enumerate(iter))
self.iter = skip(self.open(next(self.urls)), self.last + 1)
except DataError:
self.swap(traceback.format_exc())
except StopIteration:
if error:
raise DataError("Exhausted all available replicas, "
"last error was:\n\n{0}".format(error), self.input)
raise DataError("Exhausted all available replicas", self.input)
class Input(object):
"""
An iterable over one or more :class:`Worker` inputs,
which can gracefully handle corrupted replicas or otherwise failed inputs.
:type open: function
:param open: a function with the following signature::
def open(url):
...
return file
used to open input files.
"""
def __init__(self, input, task=None, **kwds):
self.input, self.task, self.kwds = input, task, kwds
def __iter__(self):
iter = self.input_iter(self.input)
while iter:
try:
for item in iter:
yield item
iter = None
except Wait as w:
time.sleep(w.retry_after)
def input_iter(self, input):
return InputIter(self.input, task=self.task, **self.kwds)
@classmethod
def default_opener(cls, task):
from disco import schemes
def open(url):
return schemes.open(url, task=task)
return open
class BaseOutput(object):
def __init__(self, path_type_label):
self.path, self.type, label = path_type_label
self.label = 0 if label is None else int(label)
def size(self):
return os.path.getsize(self.path)
def close(self):
pass
class Output(BaseOutput):
"""
A container for outputs from :class:`workers <Worker>`.
:type open: function
:param open: a function with the following signature::
def open(url):
...
return file
used to open new output files.
.. attribute:: path
The path to the underlying output file.
.. attribute:: type
The type of output.
.. attribute:: label
The label for the output (or None).
.. attribute:: file
The underlying output file handle.
"""
def __init__(self, path_type_label, open=None):
super(Output, self).__init__(path_type_label)
self.open = open or DiscoOutput
self.file = self.open(self.path)
def close(self):
self.file.close()
class SerialInput(Input):
"""
Produces an iterator over the records in a list of sequential inputs.
"""
def __iter__(self):
for input in self.input:
for record in Input(input, task=self.task, **self.kwds):
yield record
class ParallelInput(Input):
"""
Produces an iterator over the unordered records in a set of inputs.
Usually require the full set of inputs (i.e. will block with streaming).
"""
BUSY_TIMEOUT = 1
def __iter__(self):
iters = [self.input_iter(input) for input in self.input]
while iters:
iter = iters.pop()
try:
for item in iter:
yield item
except Wait as w:
if not iters:
time.sleep(w.retry_after)
iters.insert(0, iter)
def couple(self, iters, heads, n):
while True:
if heads[n] is Wait:
self.fill(iters, heads, n=n)
head = heads[n]
heads[n] = Wait
yield head
def fetch(self, iters, heads, stop=all):
busy = 0
for n, head in enumerate(heads):
if head is Wait:
try:
heads[n] = next(iters[n])
except Wait:
if stop in (all, n):
busy += 1
except StopIteration:
if stop in (all, n):
raise
return busy
def fill(self, iters, heads, n=all, busy=True):
while busy:
busy = self.fetch(iters, heads, stop=n)
if busy:
time.sleep(self.BUSY_TIMEOUT)
return heads
class MergedInput(ParallelInput):
"""
Produces an iterator over the minimal head elements of the inputs.
"""
def __iter__(self):
from heapq import merge
iters = [self.input_iter(input) for input in self.input]
heads = [Wait] * len(iters)
return merge(*(self.couple(iters, heads, n) for n in range(len(iters))))
if __name__ == '__main__':
Worker.main()
```
|
{
"source": "jgrob1/core",
"score": 2
}
|
#### File: components/homekit/type_humidifiers.py
```python
import logging
from pyhap.const import CATEGORY_HUMIDIFIER
from homeassistant.components.humidifier.const import (
ATTR_HUMIDITY,
ATTR_MAX_HUMIDITY,
ATTR_MIN_HUMIDITY,
DEFAULT_MAX_HUMIDITY,
DEFAULT_MIN_HUMIDITY,
DEVICE_CLASS_DEHUMIDIFIER,
DEVICE_CLASS_HUMIDIFIER,
DOMAIN,
SERVICE_SET_HUMIDITY,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
UNIT_PERCENTAGE,
)
from homeassistant.core import callback
from homeassistant.helpers.event import async_track_state_change_event
from .accessories import TYPES, HomeAccessory
from .const import (
CHAR_ACTIVE,
CHAR_CURRENT_HUMIDIFIER_DEHUMIDIFIER,
CHAR_CURRENT_HUMIDITY,
CHAR_DEHUMIDIFIER_THRESHOLD_HUMIDITY,
CHAR_HUMIDIFIER_THRESHOLD_HUMIDITY,
CHAR_TARGET_HUMIDIFIER_DEHUMIDIFIER,
CONF_LINKED_HUMIDITY_SENSOR,
PROP_MAX_VALUE,
PROP_MIN_STEP,
PROP_MIN_VALUE,
SERV_HUMIDIFIER_DEHUMIDIFIER,
)
_LOGGER = logging.getLogger(__name__)
HC_HUMIDIFIER = 1
HC_DEHUMIDIFIER = 2
HC_HASS_TO_HOMEKIT_DEVICE_CLASS = {
DEVICE_CLASS_HUMIDIFIER: HC_HUMIDIFIER,
DEVICE_CLASS_DEHUMIDIFIER: HC_DEHUMIDIFIER,
}
HC_HASS_TO_HOMEKIT_DEVICE_CLASS_NAME = {
DEVICE_CLASS_HUMIDIFIER: "Humidifier",
DEVICE_CLASS_DEHUMIDIFIER: "Dehumidifier",
}
HC_DEVICE_CLASS_TO_TARGET_CHAR = {
HC_HUMIDIFIER: CHAR_HUMIDIFIER_THRESHOLD_HUMIDITY,
HC_DEHUMIDIFIER: CHAR_DEHUMIDIFIER_THRESHOLD_HUMIDITY,
}
HC_STATE_INACTIVE = 0
HC_STATE_IDLE = 1
HC_STATE_HUMIDIFYING = 2
HC_STATE_DEHUMIDIFYING = 3
@TYPES.register("HumidifierDehumidifier")
class HumidifierDehumidifier(HomeAccessory):
"""Generate a HumidifierDehumidifier accessory for a humidifier."""
def __init__(self, *args):
"""Initialize a HumidifierDehumidifier accessory object."""
super().__init__(*args, category=CATEGORY_HUMIDIFIER)
self.chars = []
state = self.hass.states.get(self.entity_id)
device_class = state.attributes.get(ATTR_DEVICE_CLASS, DEVICE_CLASS_HUMIDIFIER)
self._hk_device_class = HC_HASS_TO_HOMEKIT_DEVICE_CLASS[device_class]
self._target_humidity_char_name = HC_DEVICE_CLASS_TO_TARGET_CHAR[
self._hk_device_class
]
self.chars.append(self._target_humidity_char_name)
serv_humidifier_dehumidifier = self.add_preload_service(
SERV_HUMIDIFIER_DEHUMIDIFIER, self.chars
)
# Current and target mode characteristics
self.char_current_humidifier_dehumidifier = (
serv_humidifier_dehumidifier.configure_char(
CHAR_CURRENT_HUMIDIFIER_DEHUMIDIFIER, value=0
)
)
self.char_target_humidifier_dehumidifier = (
serv_humidifier_dehumidifier.configure_char(
CHAR_TARGET_HUMIDIFIER_DEHUMIDIFIER,
value=self._hk_device_class,
valid_values={
HC_HASS_TO_HOMEKIT_DEVICE_CLASS_NAME[
device_class
]: self._hk_device_class
},
)
)
# Current and target humidity characteristics
self.char_current_humidity = serv_humidifier_dehumidifier.configure_char(
CHAR_CURRENT_HUMIDITY, value=0
)
max_humidity = state.attributes.get(ATTR_MAX_HUMIDITY, DEFAULT_MAX_HUMIDITY)
max_humidity = round(max_humidity)
max_humidity = min(max_humidity, 100)
min_humidity = state.attributes.get(ATTR_MIN_HUMIDITY, DEFAULT_MIN_HUMIDITY)
min_humidity = round(min_humidity)
min_humidity = max(min_humidity, 0)
self.char_target_humidity = serv_humidifier_dehumidifier.configure_char(
self._target_humidity_char_name,
value=45,
properties={
PROP_MIN_VALUE: min_humidity,
PROP_MAX_VALUE: max_humidity,
PROP_MIN_STEP: 1,
},
)
# Active/inactive characteristics
self.char_active = serv_humidifier_dehumidifier.configure_char(
CHAR_ACTIVE, value=False
)
self.async_update_state(state)
serv_humidifier_dehumidifier.setter_callback = self._set_chars
self.linked_humidity_sensor = self.config.get(CONF_LINKED_HUMIDITY_SENSOR)
if self.linked_humidity_sensor:
humidity_state = self.hass.states.get(self.linked_humidity_sensor)
if humidity_state:
self._async_update_current_humidity(humidity_state)
async def run_handler(self):
"""Handle accessory driver started event.
Run inside the Home Assistant event loop.
"""
if self.linked_humidity_sensor:
async_track_state_change_event(
self.hass,
[self.linked_humidity_sensor],
self.async_update_current_humidity_event,
)
await super().run_handler()
@callback
def async_update_current_humidity_event(self, event):
"""Handle state change event listener callback."""
self._async_update_current_humidity(event.data.get("new_state"))
@callback
def _async_update_current_humidity(self, new_state):
"""Handle linked humidity sensor state change to update HomeKit value."""
if new_state is None:
_LOGGER.error(
"%s: Unable to update from linked humidity sensor %s: the entity state is None",
self.entity_id,
self.linked_humidity_sensor,
)
return
try:
current_humidity = float(new_state.state)
if self.char_current_humidity.value != current_humidity:
_LOGGER.debug(
"%s: Linked humidity sensor %s changed to %d",
self.entity_id,
self.linked_humidity_sensor,
current_humidity,
)
self.char_current_humidity.set_value(current_humidity)
except ValueError as ex:
_LOGGER.error(
"%s: Unable to update from linked humidity sensor %s: %s",
self.entity_id,
self.linked_humidity_sensor,
ex,
)
def _set_chars(self, char_values):
_LOGGER.debug("HumidifierDehumidifier _set_chars: %s", char_values)
if CHAR_TARGET_HUMIDIFIER_DEHUMIDIFIER in char_values:
hk_value = char_values[CHAR_TARGET_HUMIDIFIER_DEHUMIDIFIER]
if self._hk_device_class != hk_value:
_LOGGER.error(
"%s is not supported", CHAR_TARGET_HUMIDIFIER_DEHUMIDIFIER
)
if CHAR_ACTIVE in char_values:
self.call_service(
DOMAIN,
SERVICE_TURN_ON if char_values[CHAR_ACTIVE] else SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: self.entity_id},
f"{CHAR_ACTIVE} to {char_values[CHAR_ACTIVE]}",
)
if self._target_humidity_char_name in char_values:
humidity = round(char_values[self._target_humidity_char_name])
self.call_service(
DOMAIN,
SERVICE_SET_HUMIDITY,
{ATTR_ENTITY_ID: self.entity_id, ATTR_HUMIDITY: humidity},
f"{self._target_humidity_char_name} to "
f"{char_values[self._target_humidity_char_name]}{UNIT_PERCENTAGE}",
)
@callback
def async_update_state(self, new_state):
"""Update state without rechecking the device features."""
is_active = new_state.state == STATE_ON
# Update active state
if self.char_active.value != is_active:
self.char_active.set_value(is_active)
# Set current state
if is_active:
if self._hk_device_class == HC_HUMIDIFIER:
current_state = HC_STATE_HUMIDIFYING
else:
current_state = HC_STATE_DEHUMIDIFYING
else:
current_state = HC_STATE_INACTIVE
if self.char_current_humidifier_dehumidifier.value != current_state:
self.char_current_humidifier_dehumidifier.set_value(current_state)
# Update target humidity
target_humidity = new_state.attributes.get(ATTR_HUMIDITY)
if isinstance(target_humidity, (int, float)):
if self.char_target_humidity.value != target_humidity:
self.char_target_humidity.set_value(target_humidity)
```
#### File: components/image/__init__.py
```python
import asyncio
import logging
import pathlib
import secrets
import shutil
import typing
from PIL import Image, ImageOps, UnidentifiedImageError
from aiohttp import hdrs, web
from aiohttp.web_request import FileField
import voluptuous as vol
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.const import CONF_ID
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import collection
from homeassistant.helpers.storage import Store
import homeassistant.util.dt as dt_util
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
VALID_SIZES = {256, 512}
MAX_SIZE = 1024 * 1024 * 10
CREATE_FIELDS = {
vol.Required("file"): FileField,
}
UPDATE_FIELDS = {
vol.Optional("name"): vol.All(str, vol.Length(min=1)),
}
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Image integration."""
image_dir = pathlib.Path(hass.config.path(DOMAIN))
hass.data[DOMAIN] = storage_collection = ImageStorageCollection(hass, image_dir)
await storage_collection.async_load()
collection.StorageCollectionWebsocket(
storage_collection,
DOMAIN,
DOMAIN,
CREATE_FIELDS,
UPDATE_FIELDS,
).async_setup(hass, create_create=False)
hass.http.register_view(ImageUploadView)
hass.http.register_view(ImageServeView(image_dir, storage_collection))
return True
class ImageStorageCollection(collection.StorageCollection):
"""Image collection stored in storage."""
CREATE_SCHEMA = vol.Schema(CREATE_FIELDS)
UPDATE_SCHEMA = vol.Schema(UPDATE_FIELDS)
def __init__(self, hass: HomeAssistant, image_dir: pathlib.Path) -> None:
"""Initialize media storage collection."""
super().__init__(
Store(hass, STORAGE_VERSION, STORAGE_KEY),
logging.getLogger(f"{__name__}.storage_collection"),
)
self.async_add_listener(self._change_listener)
self.image_dir = image_dir
async def _process_create_data(self, data: typing.Dict) -> typing.Dict:
"""Validate the config is valid."""
data = self.CREATE_SCHEMA(dict(data))
uploaded_file: FileField = data["file"]
if not uploaded_file.content_type.startswith("image/"):
raise vol.Invalid("Only images are allowed")
data[CONF_ID] = secrets.token_hex(16)
data["filesize"] = await self.hass.async_add_executor_job(self._move_data, data)
data["content_type"] = uploaded_file.content_type
data["name"] = uploaded_file.filename
data["uploaded_at"] = dt_util.utcnow().isoformat()
return data
def _move_data(self, data):
"""Move data."""
uploaded_file: FileField = data.pop("file")
# Verify we can read the image
try:
image = Image.open(uploaded_file.file)
except UnidentifiedImageError:
raise vol.Invalid("Unable to identify image file")
# Reset content
uploaded_file.file.seek(0)
media_folder: pathlib.Path = self.image_dir / data[CONF_ID]
media_folder.mkdir(parents=True)
media_file = media_folder / "original"
# Raises if path is no longer relative to the media dir
media_file.relative_to(media_folder)
_LOGGER.debug("Storing file %s", media_file)
with media_file.open("wb") as target:
shutil.copyfileobj(uploaded_file.file, target)
image.close()
return media_file.stat().st_size
@callback
def _get_suggested_id(self, info: typing.Dict) -> str:
"""Suggest an ID based on the config."""
return info[CONF_ID]
async def _update_data(self, data: dict, update_data: typing.Dict) -> typing.Dict:
"""Return a new updated data object."""
return {**data, **self.UPDATE_SCHEMA(update_data)}
async def _change_listener(self, change_type, item_id, data):
"""Handle change."""
if change_type != collection.CHANGE_REMOVED:
return
await self.hass.async_add_executor_job(shutil.rmtree, self.image_dir / item_id)
class ImageUploadView(HomeAssistantView):
"""View to upload images."""
url = "/api/image/upload"
name = "api:image:upload"
async def post(self, request):
"""Handle upload."""
# Increase max payload
request._client_max_size = MAX_SIZE # pylint: disable=protected-access
data = await request.post()
item = await request.app["hass"].data[DOMAIN].async_create_item(data)
return self.json(item)
class ImageServeView(HomeAssistantView):
"""View to download images."""
url = "/api/image/serve/{image_id}/{filename}"
name = "api:image:serve"
requires_auth = False
def __init__(
self, image_folder: pathlib.Path, image_collection: ImageStorageCollection
):
"""Initialize image serve view."""
self.transform_lock = asyncio.Lock()
self.image_folder = image_folder
self.image_collection = image_collection
async def get(self, request: web.Request, image_id: str, filename: str):
"""Serve image."""
image_size = filename.split("-", 1)[0]
try:
parts = image_size.split("x", 1)
width = int(parts[0])
height = int(parts[1])
except (ValueError, IndexError):
raise web.HTTPBadRequest
if not width or width != height or width not in VALID_SIZES:
raise web.HTTPBadRequest
image_info = self.image_collection.data.get(image_id)
if image_info is None:
raise web.HTTPNotFound()
hass = request.app["hass"]
target_file = self.image_folder / image_id / f"{width}x{height}"
if not target_file.is_file():
async with self.transform_lock:
# Another check in case another request already finished it while waiting
if not target_file.is_file():
await hass.async_add_executor_job(
_generate_thumbnail,
self.image_folder / image_id / "original",
image_info["content_type"],
target_file,
(width, height),
)
return web.FileResponse(
target_file, headers={hdrs.CONTENT_TYPE: image_info["content_type"]}
)
def _generate_thumbnail(original_path, content_type, target_path, target_size):
"""Generate a size."""
image = ImageOps.exif_transpose(Image.open(original_path))
image.thumbnail(target_size)
image.save(target_path, format=content_type.split("/", 1)[1])
```
#### File: components/knx/cover.py
```python
from xknx.devices import Cover as XknxCover
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
DEVICE_CLASS_BLIND,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
SUPPORT_SET_TILT_POSITION,
SUPPORT_STOP,
CoverEntity,
)
from homeassistant.core import callback
from homeassistant.helpers.event import async_track_utc_time_change
from . import ATTR_DISCOVER_DEVICES, DATA_KNX
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up cover(s) for KNX platform."""
if discovery_info is not None:
async_add_entities_discovery(hass, discovery_info, async_add_entities)
@callback
def async_add_entities_discovery(hass, discovery_info, async_add_entities):
"""Set up covers for KNX platform configured via xknx.yaml."""
entities = []
for device_name in discovery_info[ATTR_DISCOVER_DEVICES]:
device = hass.data[DATA_KNX].xknx.devices[device_name]
entities.append(KNXCover(device))
async_add_entities(entities)
class KNXCover(CoverEntity):
"""Representation of a KNX cover."""
def __init__(self, device: XknxCover):
"""Initialize the cover."""
self.device = device
self._unsubscribe_auto_updater = None
@callback
def async_register_callbacks(self):
"""Register callbacks to update hass after device was changed."""
async def after_update_callback(device):
"""Call after device was updated."""
self.async_write_ha_state()
if self.device.is_traveling():
self.start_auto_updater()
self.device.register_device_updated_cb(after_update_callback)
async def async_added_to_hass(self):
"""Store register state change callback."""
self.async_register_callbacks()
async def async_update(self):
"""Request a state update from KNX bus."""
await self.device.sync()
@property
def name(self):
"""Return the name of the KNX device."""
return self.device.name
@property
def available(self):
"""Return True if entity is available."""
return self.hass.data[DATA_KNX].connected
@property
def should_poll(self):
"""No polling needed within KNX."""
return False
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
if self.device.supports_angle:
return DEVICE_CLASS_BLIND
return None
@property
def supported_features(self):
"""Flag supported features."""
supported_features = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_SET_POSITION
if self.device.supports_stop:
supported_features |= SUPPORT_STOP
if self.device.supports_angle:
supported_features |= SUPPORT_SET_TILT_POSITION
return supported_features
@property
def current_cover_position(self):
"""Return the current position of the cover.
None is unknown, 0 is closed, 100 is fully open.
"""
# In KNX 0 is open, 100 is closed.
try:
return 100 - self.device.current_position()
except TypeError:
return None
@property
def is_closed(self):
"""Return if the cover is closed."""
return self.device.is_closed()
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self.device.is_opening()
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self.device.is_closing()
async def async_close_cover(self, **kwargs):
"""Close the cover."""
await self.device.set_down()
async def async_open_cover(self, **kwargs):
"""Open the cover."""
await self.device.set_up()
async def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
knx_position = 100 - kwargs[ATTR_POSITION]
await self.device.set_position(knx_position)
async def async_stop_cover(self, **kwargs):
"""Stop the cover."""
await self.device.stop()
self.stop_auto_updater()
@property
def current_cover_tilt_position(self):
"""Return current tilt position of cover."""
if not self.device.supports_angle:
return None
try:
return 100 - self.device.current_angle()
except TypeError:
return None
async def async_set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position."""
knx_tilt_position = 100 - kwargs[ATTR_TILT_POSITION]
await self.device.set_angle(knx_tilt_position)
def start_auto_updater(self):
"""Start the autoupdater to update Home Assistant while cover is moving."""
if self._unsubscribe_auto_updater is None:
self._unsubscribe_auto_updater = async_track_utc_time_change(
self.hass, self.auto_updater_hook
)
def stop_auto_updater(self):
"""Stop the autoupdater."""
if self._unsubscribe_auto_updater is not None:
self._unsubscribe_auto_updater()
self._unsubscribe_auto_updater = None
@callback
def auto_updater_hook(self, now):
"""Call for the autoupdater."""
self.async_write_ha_state()
if self.device.position_reached():
self.stop_auto_updater()
self.hass.add_job(self.device.auto_stop_if_necessary())
```
|
{
"source": "jgroehl/PokerGame",
"score": 3
}
|
#### File: PokerGame/pokerlib/Evalutaor.py
```python
import numpy as np
from pokerlib.EvaluationStateMachine import FlushStateDevice, \
StraightStateDevice, CardValueStateDevice
ROYAL_FLUSH = 9
STRAIGHT_FLUSH = 8
FOUR_OF_A_KIND = 7
FULL_HOUSE = 6
FLUSH = 5
STRAIGHT = 4
THREE_OF_A_KIND = 3
TWO_PAIR = 2
PAIR = 1
HIGH_CARD = 0
def evaluate_cards(cards):
cards = np.asarray(cards)
cards.sort()
cards_values = np.asarray([cards[i].value for i in range(len(cards))])
cards_value_diff = [cards[i+1].value-cards[i].value for i in range(len(cards)-1)]
suits = np.asarray([cards[i].suit for i in range(len(cards))])
suits_indexes = np.argsort(suits)
suits.sort()
cards_suit_diff = [suits[i + 1] - suits[i] for i in range(len(suits) - 1)]
flush_state_machine = FlushStateDevice()
straight_state_machine = StraightStateDevice()
value_state_machine = CardValueStateDevice()
if len(cards) is not 7:
raise AssertionError("Need seven cards to evaluate")
for index in range(len(cards_suit_diff)):
flush_state_machine.on_event(cards_suit_diff[index], index)
flush_card_indexes = suits_indexes[flush_state_machine.used_card_indices]
is_flush = flush_state_machine.state.evaluate()
for index in range(len(cards_value_diff)):
straight_state_machine.on_event(cards_value_diff[index], index)
value_state_machine.on_event(cards_value_diff[index], index)
is_straight = straight_state_machine.state.evaluate()
straight_indexes = straight_state_machine.used_card_indices
is_ace_high_straight = False
if not is_straight and \
(12 in cards_values and
0 in cards_values and
1 in cards_values and
2 in cards_values and
3 in cards_values):
is_straight = True
is_ace_high_straight = True
ace_straight_values = [12, 0, 1, 2, 3]
straight_indexes = np.asarray([np.where(cards_values == ace_straight_values[i])[0][0]
for i in range(len(ace_straight_values))])
value_result = value_state_machine.state.evaluate()
value_indexes = value_state_machine.used_card_indices
sf_indexes = np.intersect1d(flush_card_indexes, straight_indexes)
index_diffs = np.abs([sf_indexes[i]-sf_indexes[i+1] for i in range(len(sf_indexes)-1)])
high_cards = list(np.copy(cards))
if is_flush \
and is_straight \
and (np.sum(index_diffs) == len(sf_indexes)-1 or is_ace_high_straight) \
and len(sf_indexes) > 4:
if (cards[sf_indexes][-1].value == 12 and
cards[sf_indexes][-2].value == 11): # Add the king in here for the ace to five straight flush check.
return [ROYAL_FLUSH, cards[sf_indexes], []]
if cards[sf_indexes][-1].value == 12:
sf_indexes = list(sf_indexes)
sf_indexes.insert(0, sf_indexes.pop(-1))
sf_indexes = np.asarray(sf_indexes)
return [STRAIGHT_FLUSH, cards[sf_indexes], []]
if value_result == FOUR_OF_A_KIND:
return_cards = cards[value_indexes]
if len(return_cards) > 4:
card_values = np.asarray([cards[index].value for index in value_indexes])
return_cards = return_cards[np.where(card_values == np.median(card_values))]
for card in return_cards:
high_cards.remove(card)
return [FOUR_OF_A_KIND, return_cards, get_high_cards(high_cards, 1)]
if value_result == FULL_HOUSE:
return_cards = cards[value_indexes]
return [FULL_HOUSE, return_cards[:], []]
if is_flush:
return_cards = cards[flush_card_indexes]
return [FLUSH, return_cards[-5:], []]
if is_straight:
return_cards = cards[straight_indexes]
return [STRAIGHT, return_cards[-5:], []]
if value_result == THREE_OF_A_KIND:
for card in cards[value_indexes]:
high_cards.remove(card)
return [THREE_OF_A_KIND, cards[value_indexes], get_high_cards(high_cards, 2)]
if value_result == TWO_PAIR:
return_cards = cards[value_indexes][-4:]
for card in return_cards:
high_cards.remove(card)
return [TWO_PAIR, return_cards, get_high_cards(high_cards, 1)]
if value_result == PAIR:
for card in cards[value_indexes]:
high_cards.remove(card)
return [PAIR, cards[value_indexes], get_high_cards(high_cards, 3)]
return [HIGH_CARD, [], cards[2:]]
def get_high_cards(cards, number):
if len(cards) < number:
raise AssertionError("cannot get so many high cards")
cards.sort()
return cards[(-1*number):]
```
|
{
"source": "jgroehl/simpa",
"score": 2
}
|
#### File: device_digital_twins/detection_geometries/ithera_invision_array.py
```python
import numpy as np
from simpa.core.device_digital_twins import DetectionGeometryBase
from simpa.utils import Tags
class iTheraInvision256TFDetectionGeometry(DetectionGeometryBase):
"""
This class represents a digital twin of a ultrasound detection device
with a curved detection geometry. The origin for this device is the center (focus) of the curved array.
"""
def __init__(self,
device_position_mm=None,
field_of_view_extent_mm=None):
"""
:param pitch_mm: In-plane distance between the beginning of one detector element to the next detector element.
:param radius_mm:
:param number_detector_elements:
:param detector_element_width_mm:
:param detector_element_length_mm:
:param center_frequency_hz:
:param bandwidth_percent:
:param sampling_frequency_mhz:
:param angular_origin_offset:
:param device_position_mm: Center (focus) of the curved array.
"""
super(iTheraInvision256TFDetectionGeometry, self).__init__(
number_detector_elements=256,
detector_element_width_mm=0.635,
detector_element_length_mm=15,
center_frequency_hz=5e6,
bandwidth_percent=55,
sampling_frequency_mhz=40,
device_position_mm=device_position_mm)
self.positions = (np.asarray([[0.02890019, -0.02837304, 0.],
[0.02941755, -0.02783627, 0.],
[0.02992494, -0.02729007, 0.],
[0.03042219, -0.02673463, 0.],
[0.03090913, -0.02617013, 0.],
[0.0313856, -0.02559676, 0.],
[0.03185144, -0.02501472, 0.],
[0.03230648, -0.0244242, 0.],
[0.03275058, -0.0238254, 0.],
[0.03318357, -0.02321854, 0.],
[0.03360533, -0.0226038, 0.],
[0.0340157, -0.02198141, 0.],
[0.03441454, -0.02135156, 0.],
[0.03480172, -0.02071449, 0.],
[0.03517711, -0.02007039, 0.],
[0.03554058, -0.0194195, 0.],
[0.03589201, -0.01876202, 0.],
[0.03623128, -0.01809819, 0.],
[0.03655827, -0.01742822, 0.],
[0.03687287, -0.01675235, 0.],
[0.03717498, -0.0160708, 0.],
[0.03746449, -0.01538381, 0.],
[0.03774131, -0.01469161, 0.],
[0.03800534, -0.01399442, 0.],
[0.0382565, -0.0132925, 0.],
[0.03849469, -0.01258607, 0.],
[0.03871983, -0.01187538, 0.],
[0.03893186, -0.01116066, 0.],
[0.0391307, -0.01044216, 0.],
[0.03931627, -0.00972012, 0.],
[0.03948853, -0.00899479, 0.],
[0.0396474, -0.00826641, 0.],
[0.03979284, -0.00753523, 0.],
[0.0399248, -0.0068015, 0.],
[0.04004323, -0.00606546, 0.],
[0.04014809, -0.00532737, 0.],
[0.04023935, -0.00458747, 0.],
[0.04031697, -0.00384602, 0.],
[0.04038093, -0.00310327, 0.],
[0.04043121, -0.00235946, 0.],
[0.04046779, -0.00161485, 0.],
[0.04049066, -0.0008697, 0.],
[0.04049981, -0.00012425, 0.],
[0.04049524, 0.00062124, 0.],
[0.04047694, 0.00136652, 0.],
[0.04044493, 0.00211133, 0.],
[0.04039921, 0.00285544, 0.],
[0.04033981, 0.00359857, 0.],
[0.04026674, 0.00434048, 0.],
[0.04018002, 0.00508093, 0.],
[0.04007969, 0.00581965, 0.],
[0.03996578, 0.0065564, 0.],
[0.03983833, 0.00729093, 0.],
[0.03969738, 0.00802299, 0.],
[0.03954297, 0.00875233, 0.],
[0.03937517, 0.0094787, 0.],
[0.03919403, 0.01020186, 0.],
[0.03899961, 0.01092157, 0.],
[0.03879197, 0.01163757, 0.],
[0.03857119, 0.01234963, 0.],
[0.03833734, 0.01305751, 0.],
[0.0380905, 0.01376096, 0.],
[0.03783075, 0.01445975, 0.],
[0.03755818, 0.01515364, 0.],
[0.03727289, 0.0158424, 0.],
[0.03697497, 0.01652579, 0.],
[0.03666452, 0.01720358, 0.],
[0.03634165, 0.01787554, 0.],
[0.03600646, 0.01854144, 0.],
[0.03565907, 0.01920106, 0.],
[0.0352996, 0.01985417, 0.],
[0.03492817, 0.02050056, 0.],
[0.0345449, 0.02114, 0.],
[0.03414993, 0.02177228, 0.],
[0.03374339, 0.02239718, 0.],
[0.03332542, 0.02301449, 0.],
[0.03289615, 0.023624, 0.],
[0.03245573, 0.02422551, 0.],
[0.03200432, 0.02481881, 0.],
[0.03154207, 0.0254037, 0.],
[0.03106913, 0.02597998, 0.],
[0.03058566, 0.02654746, 0.],
[0.03009182, 0.02710594, 0.],
[0.02958779, 0.02765524, 0.],
[0.02907374, 0.02819517, 0.],
[0.02854983, 0.02872555, 0.],
[0.02801625, 0.02924619, 0.],
[0.02747318, 0.02975692, 0.],
[0.02692079, 0.03025757, 0.],
[0.02635929, 0.03074797, 0.],
[0.02578886, 0.03122795, 0.],
[0.02520968, 0.03169735, 0.],
[0.02462197, 0.03215601, 0.],
[0.02402591, 0.03260377, 0.],
[0.02342171, 0.03304048, 0.],
[0.02280957, 0.033466, 0.],
[0.02218971, 0.03388018, 0.],
[0.02156233, 0.03428288, 0.],
[0.02092764, 0.03467397, 0.],
[0.02028586, 0.0350533, 0.],
[0.0196372, 0.03542076, 0.],
[0.0189819, 0.03577622, 0.],
[0.01832016, 0.03611955, 0.],
[0.01765221, 0.03645064, 0.],
[0.01697828, 0.03676939, 0.],
[0.0162986, 0.03707567, 0.],
[0.0156134, 0.0373694, 0.],
[0.01492291, 0.03765046, 0.],
[0.01422736, 0.03791876, 0.],
[0.01352699, 0.03817421, 0.],
[0.01282203, 0.03841673, 0.],
[0.01211273, 0.03864624, 0.],
[0.01139933, 0.03886265, 0.],
[0.01068206, 0.03906589, 0.],
[0.00996118, 0.03925589, 0.],
[0.00923692, 0.03943259, 0.],
[0.00850953, 0.03959593, 0.],
[0.00777926, 0.03974586, 0.],
[0.00704635, 0.03988231, 0.],
[0.00631105, 0.04000526, 0.],
[0.00557361, 0.04011465, 0.],
[0.00483429, 0.04021044, 0.],
[0.00409333, 0.04029261, 0.],
[0.00335098, 0.04036113, 0.],
[0.0026075, 0.04041597, 0.],
[0.00186313, 0.04045712, 0.],
[0.00111813, 0.04048456, 0.],
[0.00037275, 0.04049828, 0.],
[-0.00037275, 0.04049828, 0.],
[-0.00111813, 0.04048456, 0.],
[-0.00186313, 0.04045712, 0.],
[-0.0026075, 0.04041597, 0.],
[-0.00335098, 0.04036113, 0.],
[-0.00409333, 0.04029261, 0.],
[-0.00483429, 0.04021044, 0.],
[-0.00557361, 0.04011465, 0.],
[-0.00631105, 0.04000526, 0.],
[-0.00704635, 0.03988231, 0.],
[-0.00777926, 0.03974586, 0.],
[-0.00850953, 0.03959593, 0.],
[-0.00923692, 0.03943259, 0.],
[-0.00996118, 0.03925589, 0.],
[-0.01068206, 0.03906589, 0.],
[-0.01139933, 0.03886265, 0.],
[-0.01211273, 0.03864624, 0.],
[-0.01282203, 0.03841673, 0.],
[-0.01352699, 0.03817421, 0.],
[-0.01422736, 0.03791876, 0.],
[-0.01492291, 0.03765046, 0.],
[-0.0156134, 0.0373694, 0.],
[-0.0162986, 0.03707567, 0.],
[-0.01697828, 0.03676939, 0.],
[-0.01765221, 0.03645064, 0.],
[-0.01832016, 0.03611955, 0.],
[-0.0189819, 0.03577622, 0.],
[-0.0196372, 0.03542076, 0.],
[-0.02028586, 0.0350533, 0.],
[-0.02092764, 0.03467397, 0.],
[-0.02156233, 0.03428288, 0.],
[-0.02218971, 0.03388018, 0.],
[-0.02280957, 0.033466, 0.],
[-0.02342171, 0.03304048, 0.],
[-0.02402591, 0.03260377, 0.],
[-0.02462197, 0.03215601, 0.],
[-0.02520968, 0.03169735, 0.],
[-0.02578886, 0.03122795, 0.],
[-0.02635929, 0.03074797, 0.],
[-0.02692079, 0.03025757, 0.],
[-0.02747318, 0.02975692, 0.],
[-0.02801625, 0.02924619, 0.],
[-0.02854983, 0.02872555, 0.],
[-0.02907374, 0.02819517, 0.],
[-0.02958779, 0.02765524, 0.],
[-0.03009182, 0.02710594, 0.],
[-0.03058566, 0.02654746, 0.],
[-0.03106913, 0.02597998, 0.],
[-0.03154207, 0.0254037, 0.],
[-0.03200432, 0.02481881, 0.],
[-0.03245573, 0.02422551, 0.],
[-0.03289615, 0.023624, 0.],
[-0.03332542, 0.02301449, 0.],
[-0.03374339, 0.02239718, 0.],
[-0.03414993, 0.02177228, 0.],
[-0.0345449, 0.02114, 0.],
[-0.03492817, 0.02050056, 0.],
[-0.0352996, 0.01985417, 0.],
[-0.03565907, 0.01920106, 0.],
[-0.03600646, 0.01854144, 0.],
[-0.03634165, 0.01787554, 0.],
[-0.03666452, 0.01720358, 0.],
[-0.03697497, 0.01652579, 0.],
[-0.03727289, 0.0158424, 0.],
[-0.03755818, 0.01515364, 0.],
[-0.03783075, 0.01445975, 0.],
[-0.0380905, 0.01376096, 0.],
[-0.03833734, 0.01305751, 0.],
[-0.03857119, 0.01234963, 0.],
[-0.03879197, 0.01163757, 0.],
[-0.03899961, 0.01092157, 0.],
[-0.03919403, 0.01020186, 0.],
[-0.03937517, 0.0094787, 0.],
[-0.03954297, 0.00875233, 0.],
[-0.03969738, 0.00802299, 0.],
[-0.03983833, 0.00729093, 0.],
[-0.03996578, 0.0065564, 0.],
[-0.04007969, 0.00581965, 0.],
[-0.04018002, 0.00508093, 0.],
[-0.04026674, 0.00434048, 0.],
[-0.04033981, 0.00359857, 0.],
[-0.04039921, 0.00285544, 0.],
[-0.04044493, 0.00211133, 0.],
[-0.04047694, 0.00136652, 0.],
[-0.04049524, 0.00062124, 0.],
[-0.04049981, -0.00012425, 0.],
[-0.04049066, -0.0008697, 0.],
[-0.04046779, -0.00161485, 0.],
[-0.04043121, -0.00235946, 0.],
[-0.04038093, -0.00310327, 0.],
[-0.04031697, -0.00384602, 0.],
[-0.04023935, -0.00458747, 0.],
[-0.04014809, -0.00532737, 0.],
[-0.04004323, -0.00606546, 0.],
[-0.0399248, -0.0068015, 0.],
[-0.03979284, -0.00753523, 0.],
[-0.0396474, -0.00826641, 0.],
[-0.03948853, -0.00899479, 0.],
[-0.03931627, -0.00972012, 0.],
[-0.0391307, -0.01044216, 0.],
[-0.03893186, -0.01116066, 0.],
[-0.03871983, -0.01187538, 0.],
[-0.03849469, -0.01258607, 0.],
[-0.0382565, -0.0132925, 0.],
[-0.03800534, -0.01399442, 0.],
[-0.03774131, -0.01469161, 0.],
[-0.03746449, -0.01538381, 0.],
[-0.03717498, -0.0160708, 0.],
[-0.03687287, -0.01675235, 0.],
[-0.03655827, -0.01742822, 0.],
[-0.03623128, -0.01809819, 0.],
[-0.03589201, -0.01876202, 0.],
[-0.03554058, -0.0194195, 0.],
[-0.03517711, -0.02007039, 0.],
[-0.03480172, -0.02071449, 0.],
[-0.03441454, -0.02135156, 0.],
[-0.0340157, -0.02198141, 0.],
[-0.03360533, -0.0226038, 0.],
[-0.03318357, -0.02321854, 0.],
[-0.03275058, -0.0238254, 0.],
[-0.03230648, -0.0244242, 0.],
[-0.03185144, -0.02501472, 0.],
[-0.0313856, -0.02559676, 0.],
[-0.03090913, -0.02617013, 0.],
[-0.03042219, -0.02673463, 0.],
[-0.02992494, -0.02729007, 0.],
[-0.02941755, -0.02783627, 0.],
[-0.02890019, -0.02837304, 0.]]) * 1000)[:, [0, 2, 1]]
detector_positions = self.get_detector_element_positions_base_mm()
min_x_coordinate = np.min(detector_positions[:, 0])
max_x_coordinate = np.max(detector_positions[:, 0])
self.probe_width_mm = max_x_coordinate - min_x_coordinate
min_z_coordinate = np.min(detector_positions[:, 2])
max_z_coordinate = np.max(detector_positions[:, 2])
self.probe_height_mm = max_z_coordinate - min_z_coordinate
if field_of_view_extent_mm is None:
self.field_of_view_extent_mm = np.asarray([-self.probe_width_mm/2,
self.probe_width_mm/2,
0, 0, 0, 100])
else:
self.field_of_view_extent_mm = field_of_view_extent_mm
def check_settings_prerequisites(self, global_settings) -> bool:
if global_settings[Tags.DIM_VOLUME_Z_MM] < (self.probe_height_mm + 1):
self.logger.error("Volume z dimension is too small to encompass the device in simulation!"
"Must be at least {} mm but was {} mm"
.format((self.probe_height_mm + 1),
global_settings[Tags.DIM_VOLUME_Z_MM]))
return False
if global_settings[Tags.DIM_VOLUME_X_MM] < (self.probe_width_mm + 1):
self.logger.error("Volume x dimension is too small to encompass MSOT device in simulation!"
"Must be at least {} mm but was {} mm"
.format(self.probe_width_mm, global_settings[Tags.DIM_VOLUME_X_MM]))
return False
return True
def update_settings_for_use_of_model_based_volume_creator(self, global_settings):
pass
def get_detector_element_positions_base_mm(self) -> np.ndarray:
return self.positions
def get_detector_element_orientations(self) -> np.ndarray:
detector_positions = self.get_detector_element_positions_base_mm()
detector_orientations = np.subtract(0, detector_positions)
norm = np.linalg.norm(detector_orientations, axis=-1)
for dim in range(3):
detector_orientations[:, dim] = detector_orientations[:, dim] / norm
return detector_orientations
def serialize(self) -> dict:
serialized_device = self.__dict__
return {"iTheraInvision256TFDetectionGeometry": serialized_device}
@staticmethod
def deserialize(dictionary_to_deserialize):
deserialized_device = iTheraInvision256TFDetectionGeometry()
for key, value in dictionary_to_deserialize.items():
deserialized_device.__dict__[key] = value
return deserialized_device
```
#### File: simulation_modules/optical_simulation_module/__init__.py
```python
import numpy as np
from typing import Union, Dict
from abc import abstractmethod
import gc
from simpa.utils import Tags, Settings
from simpa.core import SimulationModule
from simpa.utils.dict_path_manager import generate_dict_path
from simpa.io_handling.io_hdf5 import save_hdf5, load_hdf5
from simpa.core.device_digital_twins import IlluminationGeometryBase, PhotoacousticDevice
from simpa.utils.quality_assurance.data_sanity_testing import assert_array_well_defined
class OpticalForwardModuleBase(SimulationModule):
"""
Use this class as a base for implementations of optical forward models.
This class has the attributes `self.temporary_output_files` which stores file paths that are temporarily created as
input to the optical simulator, e.g. MCX. The class attributes `nx, ny & nz` represent the volume dimensions
"""
def __init__(self, global_settings: Settings):
super(OpticalForwardModuleBase, self).__init__(global_settings=global_settings)
self.component_settings = self.global_settings.get_optical_settings()
self.nx = None
self.ny = None
self.nz = None
self.temporary_output_files = []
@abstractmethod
def forward_model(self,
absorption_cm: np.ndarray,
scattering_cm: np.ndarray,
anisotropy: np.ndarray,
illumination_geometry: IlluminationGeometryBase):
"""
A deriving class needs to implement this method according to its model.
:param absorption_cm: Absorption in units of per centimeter
:param scattering_cm: Scattering in units of per centimeter
:param anisotropy: Dimensionless scattering anisotropy
:param illumination_geometry: A device that represents a detection geometry
:return: Fluence in units of J/cm^2
"""
pass
def run(self, device: Union[IlluminationGeometryBase, PhotoacousticDevice]) -> None:
"""
runs optical simulations. Volumes are first loaded from HDF5 file and parsed to `self.forward_model`, the output
is aggregated in case multiple illuminations are defined by `device` and stored in the same HDF5 file.
:param device: Illumination or Photoacoustic device that defines the illumination geometry
:return: None
"""
self.logger.info("Simulating the optical forward process...")
properties_path = generate_dict_path(Tags.SIMULATION_PROPERTIES,
wavelength=self.global_settings[Tags.WAVELENGTH])
optical_properties = load_hdf5(self.global_settings[Tags.SIMPA_OUTPUT_PATH], properties_path)
absorption = optical_properties[Tags.DATA_FIELD_ABSORPTION_PER_CM][str(self.global_settings[Tags.WAVELENGTH])]
scattering = optical_properties[Tags.DATA_FIELD_SCATTERING_PER_CM][str(self.global_settings[Tags.WAVELENGTH])]
anisotropy = optical_properties[Tags.DATA_FIELD_ANISOTROPY][str(self.global_settings[Tags.WAVELENGTH])]
gruneisen_parameter = optical_properties[Tags.DATA_FIELD_GRUNEISEN_PARAMETER]
del optical_properties
gc.collect()
_device = None
if isinstance(device, IlluminationGeometryBase):
_device = device
elif isinstance(device, PhotoacousticDevice):
_device = device.get_illumination_geometry()
else:
raise TypeError(f"The optical forward modelling does not support devices of type {type(device)}")
results = self.run_forward_model(_device=_device,
device=device,
absorption=absorption,
scattering=scattering,
anisotropy=anisotropy)
fluence = results[Tags.DATA_FIELD_FLUENCE]
if not (Tags.IGNORE_QA_ASSERTIONS in self.global_settings and Tags.IGNORE_QA_ASSERTIONS):
assert_array_well_defined(fluence, assume_non_negativity=True, array_name="fluence")
if Tags.LASER_PULSE_ENERGY_IN_MILLIJOULE in self.component_settings:
units = Tags.UNITS_PRESSURE
# Initial pressure should be given in units of Pascale
conversion_factor = 1e6 # 1 J/cm^3 = 10^6 N/m^2 = 10^6 Pa
initial_pressure = (absorption * fluence * gruneisen_parameter *
(self.component_settings[Tags.LASER_PULSE_ENERGY_IN_MILLIJOULE] / 1000)
* conversion_factor)
else:
units = Tags.UNITS_ARBITRARY
initial_pressure = absorption * fluence
if not (Tags.IGNORE_QA_ASSERTIONS in self.global_settings and Tags.IGNORE_QA_ASSERTIONS):
assert_array_well_defined(initial_pressure, assume_non_negativity=True, array_name="initial_pressure")
results[Tags.DATA_FIELD_FLUENCE] = fluence
results[Tags.OPTICAL_MODEL_UNITS] = units
results[Tags.DATA_FIELD_INITIAL_PRESSURE] = initial_pressure
optical_output = {}
for k, item in results.items():
optical_output[k] = {self.global_settings[Tags.WAVELENGTH]: item}
optical_output_path = generate_dict_path(Tags.OPTICAL_MODEL_OUTPUT_NAME)
save_hdf5(optical_output, self.global_settings[Tags.SIMPA_OUTPUT_PATH], optical_output_path)
self.logger.info("Simulating the optical forward process...[Done]")
def run_forward_model(self,
_device,
device: Union[IlluminationGeometryBase, PhotoacousticDevice],
absorption: np.ndarray,
scattering: np.ndarray,
anisotropy: np.ndarray) -> Dict:
"""
runs `self.forward_model` as many times as defined by `device` and aggregates the results.
:param _device: device illumination geometry
:param device: class defining illumination
:param absorption: Absorption volume
:param scattering: Scattering volume
:param anisotropy: Dimensionless scattering anisotropy
:return:
"""
results = self.forward_model(absorption_cm=absorption,
scattering_cm=scattering,
anisotropy=anisotropy,
illumination_geometry=_device)
fluence = results[Tags.DATA_FIELD_FLUENCE]
return {Tags.DATA_FIELD_FLUENCE: fluence}
```
|
{
"source": "j-groeneveld/covid",
"score": 2
}
|
#### File: sim/lib/plot.py
```python
import time
import bisect
import numpy as np
import pandas as pd
import networkx as nx
import scipy
import scipy.optimize
from scipy.interpolate import interp1d
import scipy as sp
import random as rd
import os, math
from datetime import datetime
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.dates import date2num, num2date
from lib.measures import (MeasureList, BetaMultiplierMeasureBySite,
SocialDistancingForAllMeasure, BetaMultiplierMeasureByType,
SocialDistancingForPositiveMeasure, SocialDistancingByAgeMeasure, SocialDistancingForSmartTracing, ComplianceForAllMeasure)
from lib.rt import compute_daily_rts, R_T_RANGE
import numpy as np
import seaborn as sns
from matplotlib.colors import ListedColormap
TO_HOURS = 24.0
DPI = 200
NO_PLOT = False
TEST_LAG = 48.0 # hours
matplotlib.rcParams.update({
"figure.autolayout": False,
"figure.figsize": (6, 4),
"figure.dpi": 150,
"axes.linewidth": 0.8,
"xtick.major.width": 0.8,
"xtick.minor.width": 0.8,
"ytick.major.width": 0.8,
"ytick.minor.width": 0.8,
"text.usetex": True,
"font.family": "serif", # use serif rather than sans-serif
"font.serif": "Times New Roman", # use "Times New Roman" as the standard font
"font.size": 16,
"axes.titlesize": 16,
"axes.labelsize": 16,
"legend.fontsize": 14,
"legend.frameon": True,
"xtick.labelsize": 14,
"ytick.labelsize": 14,
"lines.linewidth": 2.0,
"lines.markersize": 4,
"grid.linewidth": 0.4,
})
def days_to_datetime(arr, start_date):
# timestamps
ts = arr * 24 * 60 * 60 + pd.Timestamp(start_date).timestamp()
return pd.to_datetime(ts, unit='s')
def lockdown_widget(lockdown_at, start_date, lockdown_label_y, ymax,
lockdown_label, ax, ls='--', xshift=0.0, zorder=None):
# Convert x-axis into posix timestamps and use pandas to plot as dates
lckdn_x = days_to_datetime(lockdown_at, start_date=start_date)
ax.plot([lckdn_x, lckdn_x], [0, ymax], linewidth=2.5, linestyle=ls,
color='black', label='_nolegend_', zorder=zorder)
lockdown_label_y = lockdown_label_y or ymax*0.4
ax.text(x=lckdn_x - pd.Timedelta(2.1 + xshift, unit='d'),
y=lockdown_label_y, s=lockdown_label, rotation=90)
def target_widget(show_target,start_date, ax, zorder=None):
txx = np.linspace(0, show_target.shape[0] - 1, num=show_target.shape[0])
txx = days_to_datetime(txx, start_date=start_date)
ax.plot(txx, show_target, linewidth=4, linestyle='', marker='X', ms=6,
color='red', label='COVID-19 case data', zorder=zorder)
class Plotter(object):
"""
Plotting class
"""
def __init__(self):
# plot constants
# check out https://colorhunt.co/
self.color_expo = '#ffcc00'
self.color_iasy = '#00a8cc'
self.color_ipre = '#005082'
self.color_isym = '#000839'
self.color_testing = '#ffa41b'
self.color_posi = '#21bf73'
self.color_nega = '#fd5e53'
self.color_all = '#ffa41b'
self.color_positive = '#00a8cc'
self.color_age = '#005082'
self.color_tracing = '#000839'
self.color_infected = '#000839'
self.filling_alpha = 0.5
self.color_different_scenarios = [
'#dc2ade',
'#21ff53',
'#323edd',
'#ff9021',
'#4d089a',
'#cc0066',
'#ff6666',
'#216353',
'#66cccc',
'#ff2222'
]
self.color_different_scenarios_alt = [
'#a1dab4',
'#41b6c4',
'#2c7fb8',
'#253494',
]
# sequential
# self.color_different_scenarios = [
# # '#ffffcc',
# '#c7e9b4',
# '#7fcdbb',
# '#41b6c4',
# '#2c7fb8',
# '#253494',
# '#000000'
# ]
# 2D visualization
self.density_alpha = 0.7
self.marker_home = "^"
self.marker_site = "o"
self.color_home = '#000839'
self.color_site = '#000000'
self.size_home = 80
self.size_site = 300
def __is_state_at(self, sim, r, state, t):
if state == 'posi' or state == 'nega':
return (sim.state_started_at[state][r] - TEST_LAG <= t) & (sim.state_ended_at[state][r] - TEST_LAG > t)
else:
return (sim.state_started_at[state][r] <= t) & (sim.state_ended_at[state][r] > t)
def __state_started_before(self, sim, r, state, t):
if state == 'posi' or state == 'nega':
return (sim.state_started_at[state][r] - TEST_LAG <= t)
else:
return (sim.state_started_at[state][r] <= t)
def __is_contained_at(self, sim, r, measure, t):
contained = np.zeros(sim.n_people, dtype='bool')
for i in range(sim.n_people):
if measure == 'SocialDistancingForAllMeasure':
contained[i] = sim.measure_list[r].is_contained_prob(SocialDistancingForAllMeasure, t=t, j=i)
elif measure == 'SocialDistancingForSmartTracing':
contained[i] = sim.measure_list[r].is_contained_prob(SocialDistancingForSmartTracing, t=t, j=i)
elif measure == 'SocialDistancingByAgeMeasure':
contained[i] = sim.measure_list[r].is_contained_prob(SocialDistancingByAgeMeasure, t=t, age=sim.people_age[r, i])
elif measure == 'SocialDistancingForPositiveMeasure':
contained[i] = sim.measure_list[r].is_contained_prob(SocialDistancingForPositiveMeasure,
t=t, j=i,
state_posi_started_at=sim.state_started_at['posi'][r, :],
state_posi_ended_at=sim.state_ended_at['posi'][r, :],
state_resi_started_at=sim.state_started_at['resi'][r, :],
state_dead_started_at=sim.state_started_at['dead'][r, :])
else:
raise ValueError('Social distancing measure unknown.')
return contained
def __comp_state_cumulative(self, sim, state, acc):
'''
Computes `state` variable over time [0, self.max_time] with given accuracy `acc
'''
ts, means, stds = [], [], []
for t in np.linspace(0.0, sim.max_time, num=acc, endpoint=True):
restarts = [np.sum(self.__state_started_before(sim, r, state, t))
for r in range(sim.random_repeats)]
ts.append(t/TO_HOURS)
means.append(np.mean(restarts))
stds.append(np.std(restarts))
return np.array(ts), np.array(means), np.array(stds)
def __comp_state_over_time(self, sim, state, acc):
'''
Computes `state` variable over time [0, self.max_time] with given accuracy `acc
'''
ts, means, stds = [], [], []
for t in np.linspace(0.0, sim.max_time, num=acc, endpoint=True):
restarts = [np.sum(self.__is_state_at(sim, r, state, t))
for r in range(sim.random_repeats)]
ts.append(t/TO_HOURS)
means.append(np.mean(restarts))
stds.append(np.std(restarts))
return np.array(ts), np.array(means), np.array(stds)
def __comp_contained_over_time(self, sim, measure, acc):
'''
Computes `state` variable over time [0, self.max_time] with given accuracy `acc
'''
ts, means, stds = [], [], []
for t in np.linspace(0.0, sim.max_time, num=acc, endpoint=True):
restarts = [np.sum(self.__is_contained_at(sim, r, measure, t))
for r in range(sim.random_repeats)]
ts.append(t/TO_HOURS)
means.append(np.mean(restarts))
stds.append(np.std(restarts))
return np.array(ts), np.array(means), np.array(stds)
def plot_cumulative_infected(self, sim, title='Example', filename='daily_inf_0',
figsize=(6, 5), errorevery=20, acc=1000, ymax=None,
lockdown_label='Lockdown', lockdown_at=None,
lockdown_label_y=None, show_target=None,
start_date='1970-01-01',
subplot_adjust=None, legend_loc='upper right'):
''''
Plots daily infected split by group
averaged over random restarts, using error bars for std-dev
'''
if acc > sim.max_time:
acc = int(sim.max_time)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ts, iasy_mu, iasy_sig = self.__comp_state_cumulative(sim, 'iasy', acc)
# _, ipre_mu, ipre_sig = self.__comp_state_cumulative(sim, 'ipre', acc)
_, isym_mu, isym_sig = self.__comp_state_cumulative(sim, 'isym', acc)
# _, expo_mu, iexpo_sig = self.__comp_state_cumulative(sim, 'expo', acc)
# _, posi_mu, posi_sig = self.__comp_state_cumulative(sim, 'posi', acc)
line_xaxis = np.zeros(ts.shape)
line_iasy = iasy_mu
line_isym = iasy_mu + isym_mu
error_isym = np.sqrt(iasy_sig**2 + isym_sig**2)
# Convert x-axis into posix timestamps and use pandas to plot as dates
ts = days_to_datetime(ts, start_date=start_date)
# lines
ax.plot(ts, line_iasy, c='black', linestyle='-')
ax.errorbar(ts, line_isym, yerr=error_isym, c='black', linestyle='-',
elinewidth=0.8, errorevery=errorevery, capsize=3.0)
# filling
ax.fill_between(ts, line_xaxis, line_iasy, alpha=self.filling_alpha, label='Asymptomatic',
edgecolor=self.color_iasy, facecolor=self.color_iasy, linewidth=0, zorder=0)
ax.fill_between(ts, line_iasy, line_isym, alpha=self.filling_alpha, label='Symptomatic',
edgecolor=self.color_isym, facecolor=self.color_isym, linewidth=0, zorder=0)
# limits
if ymax is None:
ymax = 1.5 * np.max(iasy_mu + isym_mu)
ax.set_ylim((0, ymax))
# ax.set_xlabel('Days')
ax.set_ylabel('People')
# extra
if lockdown_at is not None:
lockdown_widget(lockdown_at, start_date,
lockdown_label_y, ymax,
lockdown_label, ax)
if show_target is not None:
target_widget(show_target, start_date, ax)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
#set ticks every week
ax.xaxis.set_major_locator(mdates.WeekdayLocator())
#set major ticks format
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
fig.autofmt_xdate(bottom=0.2, rotation=0, ha='center')
# legend
ax.legend(loc=legend_loc, borderaxespad=0.5)
subplot_adjust = subplot_adjust or {'bottom':0.14, 'top': 0.98, 'left': 0.12, 'right': 0.96}
plt.subplots_adjust(**subplot_adjust)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def plot_daily_infected(self, sim, title='Example', filename='daily_inf_0',
figsize=(6, 5), errorevery=20, acc=1000, ymax=None,
lockdown_label='Lockdown', lockdown_at=None,
lockdown_label_y=None, show_target=None,
lockdown_end=None,
start_date='1970-01-01',
subplot_adjust=None, legend_loc='upper right'):
''''
Plots daily infected split by group
averaged over random restarts, using error bars for std-dev
'''
if acc > sim.max_time:
acc = int(sim.max_time)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ts, iasy_mu, iasy_sig = self.__comp_state_over_time(sim, 'iasy', acc)
_, ipre_mu, ipre_sig = self.__comp_state_over_time(sim, 'ipre', acc)
_, isym_mu, isym_sig = self.__comp_state_over_time(sim, 'isym', acc)
# _, expo_mu, iexpo_sig = self.__comp_state_over_time(sim, 'expo', acc)
# _, posi_mu, posi_sig = self.__comp_state_over_time(sim, 'posi', acc)
line_xaxis = np.zeros(ts.shape)
line_iasy = iasy_mu
line_ipre = iasy_mu + ipre_mu
line_isym = iasy_mu + ipre_mu + isym_mu
error_isym = np.sqrt(iasy_sig**2 + ipre_sig**2 + isym_sig**2)
# Convert x-axis into posix timestamps and use pandas to plot as dates
ts = days_to_datetime(ts, start_date=start_date)
# lines
ax.plot(ts, line_iasy,
c='black', linestyle='-')
ax.plot(ts, line_ipre,
c='black', linestyle='-')
ax.errorbar(ts, line_isym, yerr=error_isym, c='black', linestyle='-',
elinewidth=0.8, errorevery=errorevery, capsize=3.0)
# filling
ax.fill_between(ts, line_xaxis, line_iasy, alpha=self.filling_alpha, label='Asymptomatic',
edgecolor=self.color_iasy, facecolor=self.color_iasy, linewidth=0, zorder=0)
ax.fill_between(ts, line_iasy, line_ipre, alpha=self.filling_alpha, label='Pre-symptomatic',
edgecolor=self.color_ipre, facecolor=self.color_ipre, linewidth=0, zorder=0)
ax.fill_between(ts, line_ipre, line_isym, alpha=self.filling_alpha, label='Symptomatic',
edgecolor=self.color_isym, facecolor=self.color_isym, linewidth=0, zorder=0)
# limits
if ymax is None:
ymax = 1.5 * np.max(iasy_mu + ipre_mu + isym_mu)
ax.set_ylim((0, ymax))
# ax.set_xlabel('Days')
ax.set_ylabel('People')
# extra
if lockdown_at is not None:
lockdown_widget(lockdown_at, start_date,
lockdown_label_y, ymax,
lockdown_label, ax)
if lockdown_end is not None:
lockdown_widget(lockdown_at=lockdown_end, start_date=start_date,
lockdown_label_y=lockdown_label_y, ymax=ymax,
lockdown_label='End of lockdown', ax=ax, ls='dotted')
if show_target is not None:
target_widget(show_target, start_date, ax)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
#set ticks every week
ax.xaxis.set_major_locator(mdates.WeekdayLocator())
#set major ticks format
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
fig.autofmt_xdate(bottom=0.2, rotation=0, ha='center')
# legend
ax.legend(loc=legend_loc, borderaxespad=0.5)
subplot_adjust = subplot_adjust or {'bottom':0.14, 'top': 0.98, 'left': 0.12, 'right': 0.96}
plt.subplots_adjust(**subplot_adjust)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def plot_daily_tested(self, sim, title='Example', filename='daily_tested_0', figsize=(10, 10), errorevery=20,
acc=1000, ymax=None):
''''
Plots daily tested, positive daily tested, negative daily tested
averaged over random restarts, using error bars for std-dev
'''
if acc > sim.max_time:
acc = int(sim.max_time)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
# automatically shifted by `test_lag` in the function
ts, posi_mu, posi_sig = self.__comp_state_over_time(sim, 'posi', acc)
_, nega_mu, nega_sig = self.__comp_state_over_time(sim, 'nega', acc)
line_xaxis = np.zeros(ts.shape)
line_posi = posi_mu
line_nega = posi_mu + nega_mu
error_posi = posi_sig
error_nega = nega_sig + posi_sig
T = posi_mu.shape[0]
# lines
ax.errorbar(ts, posi_mu, yerr=posi_sig, elinewidth=0.8, errorevery=errorevery,
c='black', linestyle='-')
ax.errorbar(ts, nega_mu, yerr=nega_sig, elinewidth=0.8, errorevery=errorevery,
c='black', linestyle='-')
# filling
ax.fill_between(ts, line_xaxis, posi_mu, alpha=self.filling_alpha, label=r'Positive tests',
edgecolor=self.color_posi, facecolor=self.color_posi, linewidth=0, zorder=0)
ax.fill_between(ts, posi_mu, nega_mu, alpha=self.filling_alpha, label=r'Negative tests',
edgecolor=self.color_nega, facecolor=self.color_nega, linewidth=0, zorder=0)
# axis
ax.set_xlim((0, np.max(ts)))
if ymax is None:
ymax = 1.5 * np.max(posi_mu + nega_mu)
ax.set_ylim((0, ymax))
ax.set_xlabel(r'$t$ [days]')
ax.set_ylabel(r'[people]')
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# legend
fig.legend(loc='center right', borderaxespad=0.1)
# Adjust the scaling factor to fit your legend text completely outside the plot
plt.subplots_adjust(right=0.70)
ax.set_title(title, pad=20)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def plot_daily_at_home(self, sim, title='Example', filename='daily_at_home_0', figsize=(10, 10), errorevery=20, acc=1000, ymax=None):
''''
Plots daily tested, positive daily tested, negative daily tested
averaged over random restarts, using error bars for std-dev
'''
if acc > sim.max_time:
acc = int(sim.max_time)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ts, all_mu, all_sig = self.__comp_contained_over_time(sim, 'SocialDistancingForAllMeasure', acc)
_, positive_mu, positive_sig = self.__comp_contained_over_time(sim, 'SocialDistancingForPositiveMeasure', acc)
_, age_mu, age_sig = self.__comp_contained_over_time(sim, 'SocialDistancingByAgeMeasure', acc)
_, tracing_mu, tracing_sig = self.__comp_contained_over_time(sim, 'SocialDistancingForSmartTracing', acc)
_, iasy_mu, iasy_sig = self.__comp_state_over_time(sim, 'iasy', acc)
_, ipre_mu, ipre_sig = self.__comp_state_over_time(sim, 'ipre', acc)
_, isym_mu, isym_sig = self.__comp_state_over_time(sim, 'isym', acc)
line_xaxis = np.zeros(ts.shape)
line_all = all_mu
line_positive = positive_mu
line_age = age_mu
line_tracing = tracing_mu
line_infected = iasy_mu + ipre_mu + isym_mu
error_all = all_sig
error_positive = positive_sig
error_age = age_sig
error_tracing = tracing_sig
error_infected = np.sqrt(np.square(iasy_sig) + np.square(ipre_sig) + np.square(isym_sig))
# lines
ax.errorbar(ts, line_infected, label=r'Total infected', errorevery=errorevery, c=self.color_infected, linestyle='--', yerr=error_infected)
ax.errorbar(ts, line_all, yerr=error_all, elinewidth=0.8, errorevery=errorevery,
c='black', linestyle='-')
ax.errorbar(ts, line_positive, yerr=error_positive, elinewidth=0.8, errorevery=errorevery,
c='black', linestyle='-')
ax.errorbar(ts, line_age, yerr=error_age, elinewidth=0.8, errorevery=errorevery,
c='black', linestyle='-')
ax.errorbar(ts, line_tracing, yerr=error_tracing, elinewidth=0.8, errorevery=errorevery,
c='black', linestyle='-')
# filling
ax.fill_between(ts, line_xaxis, line_all, alpha=self.filling_alpha, label=r'SD for all',
edgecolor=self.color_all, facecolor=self.color_all, linewidth=0, zorder=0)
ax.fill_between(ts, line_xaxis, line_positive, alpha=self.filling_alpha, label=r'SD for positively tested',
edgecolor=self.color_positive, facecolor=self.color_positive, linewidth=0, zorder=0)
ax.fill_between(ts, line_xaxis, line_age, alpha=self.filling_alpha, label=r'SD for age group',
edgecolor=self.color_age, facecolor=self.color_age, linewidth=0, zorder=0)
ax.fill_between(ts, line_xaxis, line_tracing, alpha=self.filling_alpha, label=r'SD for traced contacts',
edgecolor=self.color_tracing, facecolor=self.color_tracing, linewidth=0, zorder=0)
# axis
ax.set_xlim((0, np.max(ts)))
if ymax is None:
ymax = 1.5 * np.max([all_mu, positive_mu, age_mu, tracing_mu])
ax.set_ylim((0, ymax))
ax.set_xlabel(r'$t$ [days]')
ax.set_ylabel(r'[people]')
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# legend
fig.legend(loc='center right', borderaxespad=0.1)
# Adjust the scaling factor to fit your legend text completely outside the plot
plt.subplots_adjust(right=0.70)
ax.set_title(title, pad=20)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def compare_total_infections(self, sims, titles, figtitle='Title',
filename='compare_inf_0', figsize=(10, 10), errorevery=20, acc=1000, ymax=None,
lockdown_label='Lockdown', lockdown_at=None, lockdown_label_y=None,
show_positives=False, show_legend=True, legendYoffset=0.0, legend_is_left=False,
subplot_adjust=None, start_date='1970-01-01', first_one_dashed=False):
''''
Plots total infections for each simulation, named as provided by `titles`
to compare different measures/interventions taken. Colors taken as defined in __init__, and
averaged over random restarts, using error bars for std-dev
'''
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
for i in range(len(sims)):
if acc > sims[i].max_time:
acc = sims[i].max_time
ts, iasy_mu, iasy_sig = self.__comp_state_over_time(sims[i], 'iasy', acc)
_, ipre_mu, ipre_sig = self.__comp_state_over_time(sims[i], 'ipre', acc)
_, isym_mu, isym_sig = self.__comp_state_over_time(sims[i], 'isym', acc)
_, posi_mu, posi_sig = self.__comp_state_over_time(sims[i], 'posi', acc)
# Convert x-axis into posix timestamps and use pandas to plot as dates
ts = days_to_datetime(ts, start_date=start_date)
line_xaxis = np.zeros(ts.shape)
line_infected = iasy_mu + ipre_mu + isym_mu
error_infected = np.sqrt(np.square(iasy_sig) + np.square(ipre_sig) + np.square(isym_sig))
# lines
if show_positives:
ax.errorbar(ts, line_infected, yerr=error_infected, label='[Infected] ' + titles[i], errorevery=errorevery,
c=self.color_different_scenarios[i], linestyle='-')
T = posi_mu.shape[0]
ax.errorbar(ts, posi_mu, yerr=posi_sig, label='[Tested positive]', errorevery=errorevery,
c=self.color_different_scenarios[i], linestyle='--', elinewidth=0.8)
else:
ax.errorbar(ts, line_infected, yerr=error_infected, label=titles[i], errorevery=errorevery, elinewidth=0.8,
capsize=3.0, c=self.color_different_scenarios[i], linestyle='--' if i == 0 and first_one_dashed else '-')
# axis
# ax.set_xlim((0, np.max(ts)))
if ymax is None:
ymax = 1.5 * np.max(iasy_mu + ipre_mu + isym_mu)
ax.set_ylim((0, ymax))
# ax.set_xlabel('Days')
ax.set_ylabel('People')
if lockdown_at is not None:
lockdown_widget(lockdown_at, start_date,
lockdown_label_y, ymax,
lockdown_label, ax, xshift=0.5)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
#set ticks every week
# ax.xaxis.set_major_locator(mdates.WeekdayLocator())
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=2))
#set major ticks format
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
fig.autofmt_xdate(bottom=0.2, rotation=0, ha='center')
if show_legend:
# legend
if legend_is_left:
leg = ax.legend(loc='upper left', borderaxespad=0.5)
else:
leg = ax.legend(loc='upper right', borderaxespad=0.5)
if legendYoffset != 0.0:
# Get the bounding box of the original legend
bb = leg.get_bbox_to_anchor().inverse_transformed(ax.transAxes)
# Change to location of the legend.
bb.y0 += legendYoffset
bb.y1 += legendYoffset
leg.set_bbox_to_anchor(bb, transform = ax.transAxes)
subplot_adjust = subplot_adjust or {'bottom':0.14, 'top': 0.98, 'left': 0.12, 'right': 0.96}
plt.subplots_adjust(**subplot_adjust)
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def compare_total_fatalities_and_hospitalizations(self, sims, titles, figtitle=r'Hospitalizations and Fatalities',
filename='compare_inf_0', figsize=(10, 10), errorevery=20, acc=1000, ymax=None, lockdown_at=None,
subplot_adjust=None, start_date='1970-01-01', first_one_dashed=False):
''''
Plots total fatalities and hospitalizations for each simulation, named as provided by `titles`
to compare different measures/interventions taken. Colors taken as defined in __init__, and
averaged over random restarts, using error bars for std-dev
'''
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
# hospitalizations
for i in range(len(sims)):
if acc > sims[i].max_time:
acc = sims[i].max_time
ts, hosp_mu, hosp_sig = self.__comp_state_over_time(
sims[i], 'hosp', acc)
ts, dead_mu, dead_sig = self.__comp_state_over_time(
sims[i], 'dead', acc)
# Convert x-axis into posix timestamps and use pandas to plot as dates
ts = days_to_datetime(ts, start_date=start_date)
# lines
ax.errorbar(ts, hosp_mu, yerr=hosp_sig, label=titles[i], errorevery=errorevery,
c=self.color_different_scenarios[i], linestyle='-', elinewidth=0.8, capsize=3.0)
ax.errorbar(ts, dead_mu, yerr=dead_sig, errorevery=errorevery,
c=self.color_different_scenarios[i], linestyle='--', elinewidth=0.8, capsize=3.0)
# axis
if ymax is None:
ymax = 1.5 * np.max(iasy_mu + ipre_mu + isym_mu)
ax.set_ylim((0, ymax))
# ax.set_xlabel('Days')
ax.set_ylabel('People')
if lockdown_at is not None:
ax.plot(lockdown_at * np.ones(acc), np.linspace(0, ymax, num=acc),
linewidth=1, linestyle='--', color='black', zorder=10)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
#set ticks every week
# ax.xaxis.set_major_locator(mdates.WeekdayLocator())
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=2))
#set major ticks format
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
fig.autofmt_xdate(bottom=0.2, rotation=0, ha='center')
# legend
# ax.legend(loc='upper right', borderaxespad=0.5)
ax.legend(loc='upper left', borderaxespad=0.5)
subplot_adjust = subplot_adjust or {
'bottom': 0.14, 'top': 0.98, 'left': 0.12, 'right': 0.96}
plt.subplots_adjust(**subplot_adjust)
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def plot_2d_infections_at_time(self, sim, at_time, density_bandwidth=1.0, restart=0,
title='Example', filename='2d_inf_0', figsize=(10, 10), acc=1000, ymax=None):
'''
Plots 2d visualization using mobility object. The bandwidth set by `density_bandwidth`
determines the bandwidth of the RBF kernel in KDE used to generate the plot.
Smaller means more affected by local changes. Set the colors and markers in the __init__ function
'''
if acc > sim.max_time:
acc = int(sim.max_time)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
# infections
r = restart
is_expo = self.__is_state_at(sim, r, 'expo', at_time)
is_iasy = self.__is_state_at(sim, r, 'iasy', at_time)
is_ipre = self.__is_state_at(sim, r, 'ipre', at_time)
is_isym = self.__is_state_at(sim, r, 'isym', at_time)
is_infected = is_iasy | is_ipre | is_isym
no_state = (1 - is_infected) & (1 - is_expo)
idx_expo = np.where(is_expo)[0]
idx_infected = np.where(is_infected)[0]
idx_none = np.where(no_state)[0]
# self.color_isym = 'red'
# self.color_expo= 'yellow'
### sites
site_loc = sim.site_loc
ax.scatter(site_loc[:, 0], site_loc[:, 1], alpha=self.filling_alpha, label='public sites',
marker=self.marker_site, color=self.color_site, facecolors=self.color_site, s=self.size_site)
### home locations and their states
home_loc = sim.home_loc
# no state
ax.scatter(home_loc[idx_none, 0], home_loc[idx_none, 1],
marker=self.marker_home, color=self.color_home,
facecolors='none', s=self.size_home)
try:
# expo
ax.scatter(home_loc[idx_expo, 0], home_loc[idx_expo, 1],
marker=self.marker_home, color=self.color_home,
facecolors=self.color_expo, s=self.size_home, label='exposed households')
sns.kdeplot(home_loc[idx_expo, 0], home_loc[idx_expo, 1], shade=True, alpha=self.density_alpha,
shade_lowest=False, cbar=False, ax=ax, color=self.color_expo, bw=density_bandwidth, zorder=0)
# infected
ax.scatter(home_loc[idx_infected, 0], home_loc[idx_infected, 1],
marker=self.marker_home, color=self.color_home,
facecolors=self.color_isym, s=self.size_home, label='infected households')
sns.kdeplot(home_loc[idx_infected, 0], home_loc[idx_infected, 1], shade=True, alpha=self.density_alpha,
shade_lowest=False, cbar=False, ax=ax, color=self.color_isym, bw=density_bandwidth, zorder=0)
except:
print('KDE failed, likely no exposed and infected at this time. Try different timing.')
plt.close()
return
# axis
ax.set_xlim((-0.1, 1.1))
ax.set_ylim((-0.1, 1.1))
plt.axis('off')
# legend
fig.legend(loc='center right', borderaxespad=0.1)
# Adjust the scaling factor to fit your legend text completely outside the plot
plt.subplots_adjust(right=0.85)
ax.set_title(title, pad=20)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def compare_hospitalizations_over_time(self, sims, titles, figtitle='Hospitalizations', filename='compare_hosp_0',
capacity_line_at=20, figsize=(10, 10), errorevery=20, acc=1000, ymax=None):
''''
Plots total hospitalizations for each simulation, named as provided by `titles`
to compare different measures/interventions taken. Colors taken as defined in __init__, and
averaged over random restarts, using error bars for std-dev.
The value of `capacity_line_at` defines the y-intercept of the hospitalization capacity line
'''
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
for i in range(len(sims)):
if acc > sims[i].max_time:
acc = sims[i].max_time
ts, line_hosp, error_sig = self.__comp_state_over_time(
sims[i], 'hosp', acc)
line_xaxis = np.zeros(ts.shape)
# lines
ax.errorbar(ts, line_hosp, yerr=error_sig, errorevery=errorevery,
c='black', linestyle='-', elinewidth=0.8)
# filling
ax.fill_between(ts, line_xaxis, line_hosp, alpha=self.filling_alpha, zorder=0,
label=r'Hospitalized under: ' + titles[i], edgecolor=self.color_different_scenarios[i],
facecolor=self.color_different_scenarios[i], linewidth=0)
# capacity line
ax.plot(ts, capacity_line_at * np.ones(ts.shape[0]), label=r'Max. hospitalization capacity',
c='red', linestyle='--', linewidth=4.0)
# axis
ax.set_xlim((0, np.max(ts)))
if ymax is None:
ymax = 1.5 * np.max(line_hosp + error_sig)
ax.set_ylim((0, ymax))
ax.set_xlabel(r'$t$ [days]')
ax.set_ylabel(r'[people]')
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# legend
fig.legend(loc='center right', borderaxespad=0.1)
# Adjust the scaling factor to fit your legend text completely outside the plot
plt.subplots_adjust(right=0.70)
ax.set_title(figtitle, pad=20)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def plot_positives_vs_target(self, sim, targets, title='Example',
filename='inference_0', figsize=(6, 5), errorevery=1, acc=17, ymax=None,
start_date='1970-01-01', lockdown_label='Lockdown', lockdown_at=None,
lockdown_label_y=None, subplot_adjust=None):
''''
Plots daily tested averaged over random restarts, using error bars for std-dev
together with targets from inference
'''
if acc > sim.max_time:
acc = int(sim.max_time)
fig, ax = plt.subplots(figsize=figsize)
# inference
# automatically shifted by `test_lag` in the function
ts, posi_mu, posi_sig = self.__comp_state_over_time(sim, 'posi', acc)
T = posi_mu.shape[0]
xx = days_to_datetime(ts, start_date=start_date)
ax.plot(xx, posi_mu, c='k', linestyle='-',
label='COVID-19 simulated case data')
ax.fill_between(xx, posi_mu - posi_sig, posi_mu + posi_sig,
color='grey', alpha=0.1, linewidth=0.0)
# target
target_widget(targets, start_date, ax)
# axis
#ax.set_xlim((0, np.max(ts)))
if ymax is None:
ymax = 1.5 * np.max(posi_mu)
ax.set_ylim((0, ymax))
# ax.set_xlabel('Days')
ax.set_ylabel('Positive cases')
if lockdown_at is not None:
lockdown_widget(lockdown_at, start_date,
lockdown_label_y, ymax,
lockdown_label, ax)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
#set ticks every week
ax.xaxis.set_major_locator(mdates.WeekdayLocator())
#set major ticks format
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
fig.autofmt_xdate(bottom=0.2, rotation=0, ha='center')
# legend
ax.legend(loc='upper left', borderaxespad=0.5)
subplot_adjust = subplot_adjust or {'bottom':0.14, 'top': 0.98, 'left': 0.12, 'right': 0.96}
plt.subplots_adjust(**subplot_adjust)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI)#, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def plot_daily_rts(self, sims, filename, start_date, titles=None, sigma=None,
r_t_range=R_T_RANGE, window=3, figsize=(6, 5),
subplot_adjust=None, lockdown_label='Lockdown',
lockdown_at=None, lockdown_label_y=None, ymax=None,
colors=['grey'], fill_between=True, draw_dots=True,
errorevery=1, show_legend=False, xtick_interval=1, ci=0.9):
# If a single summary is provided
if not isinstance(sims, list):
sims = [sims]
sigma = [sigma]
results = list()
for i, sim in enumerate(sims):
res = compute_daily_rts(sim, start_date, sigma[i], r_t_range, window, ci)
results.append(res)
# Colors
ABOVE = [1,0,0]
MIDDLE = [1,1,1]
BELOW = [0,0,0]
cmap = ListedColormap(np.r_[
np.linspace(BELOW,MIDDLE,25),
np.linspace(MIDDLE,ABOVE,25)
])
color_mapped = lambda y: np.clip(y, .5, 1.5)-.5
ymax_computed = 0.0 # Keep track of max y to set limit
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
for i, result in enumerate(results):
index = result['ML'].index
values = result['ML'].values
# Plot dots and line
ax.plot(index, values, c=colors[i], zorder=1, alpha=1.0)
if draw_dots:
ax.scatter(index, values, s=40, lw=0.0,
c=cmap(color_mapped(values)),
edgecolors='k', zorder=2)
# Aesthetically, extrapolate credible interval by 1 day either side
lowfn = interp1d(date2num(index), result[f'Low_{ci*100:.0f}'].values,
bounds_error=False, fill_value='extrapolate')
highfn = interp1d(date2num(index), result[f'High_{ci*100:.0f}'].values,
bounds_error=False, fill_value='extrapolate')
extended = pd.date_range(start=index[0], end=index[-1])
error_low = lowfn(date2num(extended))
error_high = highfn(date2num(extended))
if fill_between:
ax.fill_between(extended, error_low, error_high,
color=colors[i], alpha=0.1, linewidth=0.0)
else:
# Ignore first value which is just prior, not informed by data
ax.errorbar(x=index[1:], y=values[1:], label=titles[i],
yerr=np.vstack((result[f'Low_{ci*100:.0f}'], result[f'High_{ci*100:.0f}']))[:,1:],
color=colors[i], linewidth=1.0,
elinewidth=0.8, capsize=3.0,
errorevery=errorevery)
ymax_computed = max(ymax_computed, np.max(error_high))
# Plot horizontal line at R_t = 1
ax.axhline(1.0, c='k', lw=1, alpha=.25);
# limits
ymax = ymax or 1.2 * ymax_computed
ax.set_ylim((0, ymax_computed))
if show_legend:
ax.legend(loc='upper left', borderaxespad=0.5)
# extra
if lockdown_at is not None:
lockdown_widget(lockdown_at, start_date,
lockdown_label_y, ymax,
lockdown_label, ax, zorder=-200)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# Set label
ax.set_ylabel(r'$R_t$')
#set ticks every week
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=xtick_interval))
#set major ticks format
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
fig.autofmt_xdate(bottom=0.2, rotation=0, ha='center')
subplot_adjust = subplot_adjust or {'bottom':0.14, 'top': 0.98, 'left': 0.12, 'right': 0.96}
plt.subplots_adjust(**subplot_adjust)
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI)#, bbox_inches='tight')
if NO_PLOT:
plt.close()
```
#### File: sim/lib/utils.py
```python
from functools import wraps
def enforce_init_run(fn):
@wraps(fn)
def wrapped(self, *args, **kwargs):
if self._is_init:
return fn(self, *args, **kwargs)
else:
raise Exception(('Model is not properly set. '
'`set_data` must be called first.'))
return wrapped
```
|
{
"source": "j-groeneveld/delivery-kitchen",
"score": 4
}
|
#### File: delivery-kitchen/src/order.py
```python
from enum import Enum
from time import time
class OrderStatus(Enum):
waiting = 1
delivered = 2
discarded = 3
class Order:
""" Represents each order and in charge of computing current value.
Attributes:
name:
temp:
decay_rate:
shelf_life:
created_at:
status:
overflow_in:
overflow_out:
_discarded_at:
_delivered_at:
### ~ @property[ies] ~ ###
terminated_at:
delivered_at:
discarded_at:
status:
total_decay:
order_age:
value:
infant_value:
regular_decay:
overflow_decay:
Public methods:
has_value: Checks to see whether :attr: value is > 0
"""
def __init__(self, data: dict, overflow: bool = False):
try:
self.name = data["name"]
self.temp = data["temp"]
self.decay_rate = float(data["decayRate"])
self.shelf_life = int(data["shelfLife"])
except KeyError as e:
# TODO meh bug report https://bugs.python.org/issue2651
raise KeyError(
"Inadequate data to create Order.\n"
f"Missing key: {e}\n"
f"data: {data}\n"
)
self.created_at = time()
self.status = OrderStatus.waiting
self.overflow_in = time() if overflow else None
self.overflow_out = None
self._discarded_at = None
self._delivered_at = None
def __repr__(self) -> str:
return self.__str__()
def __str__(self) -> str:
return (
f"~~ {self.name} ~~\n"
# f"status: {self.status.name}\n"
# f"temp: {self.temp}\n"
# f"shelf_life: {self.shelf_life}\n"
f"value: {self.value}\n"
# f"\tinfant_value: {self.infant_value}\n"
# f"total_decay: {self.total_decay}\n"
# f"\ttemp: {self.regular_decay}\n"
# f"\toverflow_decay: {self.overflow_decay}\n"
# f"\tdecay_rate: {self.decay_rate}\n"
# f"created_at: {self.created_at}\n"
# f"\toverflow_in: {self.overflow_in}\n"
# f"\toverflow_out: {self.overflow_out}\n"
)
@property
def terminated_at(self) -> float:
"""Basically serves to freeze the aging process of an order if terminated. """
return self.delivered_at or self.discarded_at
@property
def delivered_at(self) -> float:
return self._delivered_at
@property
def discarded_at(self) -> float:
return self._discarded_at
@property
def status(self) -> float:
return self._status
@property
def total_decay(self) -> float:
return self.regular_decay + self.overflow_decay
@property
def order_age(self) -> float:
return self._compute_order_age()
@property
def value(self) -> float:
return self._get_value()
@property
def infant_value(self) -> float:
return self.shelf_life
@property
def regular_decay(self) -> float:
return self._compute_regular_decay()
@property
def overflow_decay(self) -> float:
return self._compute_overflow_decay()
@delivered_at.setter
def delivered_at(self, delivered_at: float) -> None:
self._delivered_at = delivered_at
self.status = OrderStatus.delivered
@discarded_at.setter
def discarded_at(self, discarded_at: float) -> None:
self._discarded_at = discarded_at
self.status = OrderStatus.discarded
@status.setter
def status(self, status: OrderStatus) -> None:
if type(status) != OrderStatus:
raise ValueError("status must be of type OrderStatus")
self._status = status
###########################
### ~ Public methods ~ ###
###########################
def has_value(self) -> bool:
return self.value > 0
###########################
### ~ Private methods ~ ###
###########################
def _compute_overflow_decay(self) -> float:
""" Computes decay attributed to being on overflow shelf. """
return 2 * self.decay_rate * self._compute_overflow_shelf_age()
def _compute_regular_decay(self) -> float:
""" Computes decay attributed to being on a regular shelf. """
return self.decay_rate * self._compute_regular_shelf_age()
def _compute_order_age(self) -> float:
start = self.created_at
end = self.terminated_at if self._is_terminated() else time()
return end - start
def _compute_overflow_shelf_age(self) -> float:
if not self._has_overflow():
return 0.0
overflow_out = time() if self.overflow_out is None else self.overflow_out
return overflow_out - self.overflow_in
def _compute_regular_shelf_age(self) -> float:
return self.order_age - self._compute_overflow_shelf_age()
def _currently_on_overflow_shelf(self) -> bool:
return self._has_overflow() and self.overflow_out is None
def _get_value(self) -> bool:
""" Compute value based on following formula:
value = ([shelf life] - [order age]) - ([overflow shelf decay]) - ([regular shelf decay])
"""
return self.infant_value - self.order_age - self.total_decay
def _has_overflow(self) -> bool:
return self.overflow_in is not None
def _is_terminated(self) -> bool:
return self.terminated_at is not None
```
|
{
"source": "j-groeneveld/streaming-serverless",
"score": 2
}
|
#### File: py/lib/validator_helpers.py
```python
import os
import re
import json
import yaml
EVENTS = yaml.load(open("./events.yml"))
def create_py_compliant_event(event):
original_variables = event.get("variables")
old_values = ["$"]
old_values.extend(original_variables)
new_values = [""]
new_values.extend([_convert_camel_to_underscore(x) for x in original_variables])
py_compliant_event_str = _multi_replace(str(event), old_values, new_values)
return eval(py_compliant_event_str)
def create_sns_params(event):
notifications = {} if not event else _create_notification_params(event)
return (
{}
if not notifications
else dict(Message=json.dumps(notifications), MessageStructure="json")
)
def handle_validator_results(sns, sns_params, topic_name):
return (
"Not a valid event"
if not sns_params
else _publish_to_topic(sns, sns_params, topic_name)
)
def interpolate_vars(event, **resource_data):
variable_map = _handle_variables(event, **resource_data)
event["variable_map"] = variable_map
notifications = event.get("notifications")
for medium, config in notifications.items():
assert isinstance(config, dict)
notifications[medium] = {
attr_key: attr_val.format(**variable_map)
for attr_key, attr_val in config.items()
}
return event
def parse_validator_method(event_name):
return f"validate_{event_name.lower()}"
""" Methods to be used internally within this module"""
def _convert_camel_to_underscore(key):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", key)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
def _create_notification_params(event):
notifications = event.get("notifications")
variable_map = event.get("variable_map")
assert isinstance(notifications, dict)
assert isinstance(variable_map, dict)
for key in notifications.keys():
notifications[key]["to"] = variable_map.get(f"{key}_to")
notifications[key][
"body"
] += f". Sent from my {os.environ['AWS_EXECUTION_ENV']}!"
return {"default": dict(notifications=notifications)}
def _get_topic_arn(sns, topic_name):
topics = sns.list_topics().get("Topics")
return next(
topic["TopicArn"]
for topic in topics
if _get_topic_name_from_ARN(topic.get("TopicArn")) == topic_name
)
def _get_topic_name_from_ARN(arn):
"""
Returns undefined if arn is invalid
Example
arn: "arn:aws:sns:us-east-1:123456789012:notifications"
"""
return arn.split(":")[-1]
def _handle_variables(event, **resource_data):
def get_subject_task(**kwargs):
return kwargs.get("new_task") or kwargs.get("old_task") or dict()
mappings = dict(
sms_to=lambda **kwargs: get_subject_task(**kwargs).get("assignee_phone"),
email_to=lambda **kwargs: get_subject_task(**kwargs).get("assignee_email"),
task_title=lambda **kwargs: get_subject_task(**kwargs).get("title"),
)
return {key: mappings[key](**resource_data) for key in event.get("variables")}
def _multi_replace(s, old_values, new_values):
"""
Given a string and replace_me with_me lists, it returns the replaced string.
:param str string: string to execute replacements on
:param old_values old values to be replaced
:param new_values values to be substituted in place of old values
:rtype: str
"""
old_values = [re.escape(v) for v in old_values]
replacements = dict(zip(old_values, new_values))
pattern = re.compile("|".join(replacements.keys()))
s = pattern.sub(lambda match: replacements[re.escape(match.group())], s)
return s
def _publish_to_topic(sns, sns_params, topic_name):
topic_arn = _get_topic_arn(sns, topic_name)
return sns.publish(TopicArn=topic_arn, **sns_params)
```
|
{
"source": "jgromero/football",
"score": 3
}
|
#### File: gfootball/env/football_env.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import importlib
import logging
from gfootball.env import config as cfg
from gfootball.env import constants
from gfootball.env import football_action_set
from gfootball.env import football_env_wrapper
import gym
import gym.spaces # @jgromero: added to support spaces in Google Colaboratory
import numpy as np
class FootballEnv(gym.Env):
"""Allows multiple players to play in the same environment."""
def __init__(self, config):
self._config = config
player_config = {'index': 0}
# There can be at most one agent at a time. We need to remember its
# team and the index on the team to generate observations appropriately.
self._agent = None
self._agent_left_team = True
self._agent_index = -1
self._left_players = self._construct_players(config['left_players'],
player_config, True)
self._right_players = self._construct_players(config['right_players'],
player_config, False)
self._env = football_env_wrapper.FootballEnvWrapper(self._config)
self._num_actions = len(football_action_set.get_action_set(self._config))
self.last_observation = None
self._last_swapped_sides = False
@property
def action_space(self):
if self._config.number_of_players_agent_controls() > 1:
return gym.spaces.MultiDiscrete(
[self._num_actions] * self._config.number_of_players_agent_controls())
return gym.spaces.Discrete(self._num_actions)
def _construct_players(self, definitions, config, left_team):
result = []
position = 0
for definition in definitions:
(name, d) = cfg.parse_player_definition(definition)
config_name = 'player_{}'.format(name)
if config_name in config:
config[config_name] += 1
else:
config[config_name] = 0
try:
player_factory = importlib.import_module(
'gfootball.env.players.{}'.format(name))
except ImportError as e:
logging.warning('Failed loading player "%s"', name)
logging.warning(e)
exit(1)
player_config = copy.deepcopy(config)
player_config.update(d)
player = player_factory.Player(player_config, self._config)
if name == 'agent':
assert not self._agent, 'Only one \'agent\' player allowed'
self._agent = player
self._agent_left_team = left_team
self._agent_index = len(result)
self._agent_position = position
result.append(player)
position += player.num_controlled_players()
config['index'] += 1
return result
def _convert_observations(self, original, left_team, player, player_position):
"""Converts generic observations returned by the environment to
the player specific observations.
Args:
original: original observations from the environment.
left_team: is the player on the left team or not.
player: player for which to generate observations.
player_position: index into observation corresponding to the player.
"""
observations = {}
for v in constants.EXPOSED_OBSERVATIONS:
# Active and sticky_actions are added below.
if v != 'active' and v != 'sticky_actions':
observations[v] = copy.deepcopy(original[v])
length = player.num_controlled_players()
if left_team:
observations['active'] = copy.deepcopy(
original['left_agent_controlled_player'][
player_position:player_position + length])
observations['sticky_actions'] = copy.deepcopy(
original['left_agent_sticky_actions'][
player_position:player_position + length])
observations['is_active_left'] = True
else:
observations['active'] = copy.deepcopy(
original['right_agent_controlled_player'][
player_position:player_position + length])
observations['sticky_actions'] = copy.deepcopy(
original['right_agent_sticky_actions'][
player_position:player_position + length])
observations['is_active_left'] = False
diff = constants.EXPOSED_OBSERVATIONS.difference(observations.keys())
assert not diff or (len(diff) == 1 and 'frame' in observations)
if 'frame' in original:
observations['frame'] = original['frame']
return observations
def _get_actions(self):
obs = self._env.observation()
actions = []
player_position = 0
for player in self._left_players:
adopted_obs = self._convert_observations(obs, True, player,
player_position)
player_position += player.num_controlled_players()
a = player.take_action(adopted_obs)
if isinstance(a, np.ndarray):
a = a.tolist()
elif not isinstance(a, list):
a = [a]
assert len(adopted_obs['active']) == len(
a), 'Player returned {} actions instead of {}.'.format(
len(a), len(adopted_obs['active']))
actions.extend(a)
player_position = 0
for player in self._right_players:
adopted_obs = self._convert_observations(obs, False, player,
player_position)
player_position += player.num_controlled_players()
a = player.take_action(adopted_obs)
if isinstance(a, np.ndarray):
a = a.tolist()
elif not isinstance(a, list):
a = [a]
assert len(adopted_obs['active']) == len(
a), 'Player returned {} actions instead of {}.'.format(
len(a), len(adopted_obs['active']))
actions.extend(a)
return actions
def step(self, action):
if self._agent:
self._agent.set_action(action)
observation, reward, done = self._env.step(self._get_actions())
if self._agent:
observation = self._convert_observations(observation,
self._agent_left_team,
self._agent,
self._agent_position)
if not self._agent_left_team:
reward = -reward
self.last_observation = observation
return observation, np.array(reward, dtype=np.float32), done, {}
def reset(self):
self._env.reset()
if self._config['swap_sides'] != self._last_swapped_sides:
self._left_players, self._right_players = (
self._right_players, self._left_players)
self._agent_left_team = not self._agent_left_team
self._last_swapped_sides = self._config['swap_sides']
for player in self._left_players + self._right_players:
player.reset()
observation = self._env.observation()
if self._agent:
observation = self._convert_observations(observation,
self._agent_left_team,
self._agent,
self._agent_position)
self.last_observation = observation
return observation
def write_dump(self, name):
return self._env.write_dump(name)
def close(self):
self._env.close()
```
|
{
"source": "JGroselle/dispatch",
"score": 2
}
|
#### File: dispatch/auth/service.py
```python
import logging
from typing import Optional
from fastapi import HTTPException, Depends
from fastapi.encoders import jsonable_encoder
from starlette.requests import Request
from starlette.status import HTTP_401_UNAUTHORIZED
from sqlalchemy.exc import IntegrityError
from dispatch.plugins.base import plugins
from dispatch.config import (
DISPATCH_AUTHENTICATION_PROVIDER_SLUG,
DISPATCH_AUTHENTICATION_DEFAULT_USER,
)
from dispatch.organization import service as organization_service
from dispatch.project import service as project_service
from dispatch.enums import UserRoles
from .models import (
DispatchUser,
DispatchUserOrganization,
DispatchUserProject,
UserOrganization,
UserProject,
UserRegister,
UserUpdate,
)
log = logging.getLogger(__name__)
InvalidCredentialException = HTTPException(
status_code=HTTP_401_UNAUTHORIZED, detail="Could not validate credentials"
)
def get(*, db_session, user_id: int) -> Optional[DispatchUser]:
"""Returns a user based on the given user id."""
return db_session.query(DispatchUser).filter(DispatchUser.id == user_id).one_or_none()
def get_by_email(*, db_session, email: str) -> Optional[DispatchUser]:
"""Returns a user object based on user email."""
return db_session.query(DispatchUser).filter(DispatchUser.email == email).one_or_none()
def create_or_update_project_role(*, db_session, user: DispatchUser, role_in: UserProject):
"""Creates a new project role or updates an existing role."""
if not role_in.project.id:
project = project_service.get_by_name(db_session=db_session, name=role_in.project.name)
project_id = project.id
else:
project_id = role_in.project.id
project_role = (
db_session.query(DispatchUserProject)
.filter(
DispatchUserProject.dispatch_user_id == user.id,
)
.filter(DispatchUserProject.project_id == project_id)
.one_or_none()
)
if not project_role:
return DispatchUserProject(
project_id=project_id,
role=role_in.role,
)
project_role.role = role_in.role
return project_role
def create_or_update_organization_role(
*, db_session, user: DispatchUser, role_in: UserOrganization
):
"""Creates a new organization role or updates an existing role."""
if not role_in.organization.id:
organization = organization_service.get_by_name(
db_session=db_session, name=role_in.organization.name
)
organization_id = organization.id
else:
organization_id = role_in.organization.id
organization_role = (
db_session.query(DispatchUserOrganization)
.filter(
DispatchUserOrganization.dispatch_user_id == user.id,
)
.filter(DispatchUserOrganization.organization_id == organization_id)
.one_or_none()
)
if not organization_role:
return DispatchUserOrganization(
organization_id=organization.id,
role=role_in.role,
)
organization_role.role = role_in.role
return organization_role
def create(*, db_session, organization: str, user_in: UserRegister) -> DispatchUser:
"""Creates a new dispatch user."""
# pydantic forces a string password, but we really want bytes
password = bytes(user_in.password, "utf-8")
# create the user
user = DispatchUser(
**user_in.dict(exclude={"password", "organizations", "projects"}), password=password
)
org = organization_service.get_by_slug(db_session=db_session, slug=organization)
# add the user to the default organization
user.organizations.append(
DispatchUserOrganization(organization=org, role=UserRoles.member.value)
)
# get the default project
default_project = project_service.get_default(db_session=db_session)
# add the user to the default project
user.projects.append(DispatchUserProject(project=default_project, role=UserRoles.member.value))
db_session.add(user)
db_session.commit()
return user
def get_or_create(*, db_session, organization: str, user_in: UserRegister) -> DispatchUser:
"""Gets an existing user or creates a new one."""
try:
return create(db_session=db_session, organization=organization, user_in=user_in)
except IntegrityError:
db_session.rollback()
return get_by_email(db_session=db_session, email=user_in.email)
def update(*, db_session, user: DispatchUser, user_in: UserUpdate) -> DispatchUser:
"""Updates a user."""
user_data = jsonable_encoder(user)
update_data = user_in.dict(exclude={"password"}, skip_defaults=True)
for field in user_data:
if field in update_data:
setattr(user, field, update_data[field])
if user_in.password:
password = bytes(user_in.password, "<PASSWORD>")
user.password = password
if user_in.organizations:
roles = []
for role in user_in.organizations:
roles.append(
create_or_update_organization_role(db_session=db_session, user=user, role_in=role)
)
db_session.add(user)
db_session.commit()
return user
def get_current_user(request: Request) -> DispatchUser:
"""Attempts to get the current user depending on the configured authentication provider."""
if DISPATCH_AUTHENTICATION_PROVIDER_SLUG:
auth_plugin = plugins.get(DISPATCH_AUTHENTICATION_PROVIDER_SLUG)
user_email = auth_plugin.get_current_user(request)
else:
log.debug("No authentication provider. Default user will be used")
user_email = DISPATCH_AUTHENTICATION_DEFAULT_USER
if not user_email:
log.exception(
f"Unable to determine user email based on configured auth provider or no default auth user email defined. Provider: {DISPATCH_AUTHENTICATION_PROVIDER_SLUG}"
)
raise InvalidCredentialException
return get_or_create(
db_session=request.state.db,
organization=request.state.organization,
user_in=UserRegister(email=user_email),
)
def get_current_role(
request: Request, current_user: DispatchUser = Depends(get_current_user)
) -> UserRoles:
"""Attempts to get the current user depending on the configured authentication provider."""
return current_user.get_organization_role(organization_name=request.state.organization)
```
#### File: dispatch/tag/views.py
```python
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from dispatch.database.core import get_db, get_class_by_tablename
from dispatch.database.service import common_parameters, search_filter_sort_paginate
from dispatch.tag.recommender import get_recommendations
from .models import (
TagCreate,
TagPagination,
TagRead,
TagUpdate,
)
from .service import create, delete, get, update
router = APIRouter()
@router.get("", response_model=TagPagination)
def get_tags(*, common: dict = Depends(common_parameters)):
"""
Get all tags, or only those matching a given search term.
"""
return search_filter_sort_paginate(model="Tag", **common)
@router.get("/{tag_id}", response_model=TagRead)
def get_tag(*, db_session: Session = Depends(get_db), tag_id: str):
"""
Given its unique ID, retrieve details about a single tag.
"""
tag = get(db_session=db_session, tag_id=tag_id)
if not tag:
raise HTTPException(status_code=404, detail="The requested tag does not exist.")
return tag
@router.post("", response_model=TagRead)
def create_tag(*, db_session: Session = Depends(get_db), tag_in: TagCreate):
"""
Create a new tag.
"""
tag = create(db_session=db_session, tag_in=tag_in)
return tag
@router.put("/{tag_id}", response_model=TagRead)
def update_tag(*, db_session: Session = Depends(get_db), tag_id: int, tag_in: TagUpdate):
"""
Given its unique ID, update details of an tag.
"""
tag = get(db_session=db_session, tag_id=tag_id)
if not tag:
raise HTTPException(status_code=404, detail="An tag with this ID does not exist.")
tag = update(db_session=db_session, tag=tag, tag_in=tag_in)
return tag
@router.delete("/{tag_id}")
def delete_tag(*, db_session: Session = Depends(get_db), tag_id: int):
"""
Delete an tag, returning only an HTTP 200 OK if successful.
"""
tag = get(db_session=db_session, tag_id=tag_id)
if not tag:
raise HTTPException(status_code=404, detail="An tag with this ID does not exist.")
delete(db_session=db_session, tag_id=tag_id)
@router.get("/recommendations/{model_name}/{id}", response_model=TagPagination)
def get_tag_recommendations(*, db_session: Session = Depends(get_db), model_name: str, id: int):
"""
Retrieves a tag recommendation based on the model and model id.
"""
model_object = get_class_by_tablename(model_name)
model = db_session.query(model_object).filter(model_object.id == id).one_or_none()
project_slug = model.project.slug
organization_slug = model.project.organization.slug
if not model:
raise HTTPException(
status_code=404, detail=f"No model found. ModelName: {model_name} Id: {id}"
)
tags = get_recommendations(
db_session, [t.id for t in model.tags], organization_slug, project_slug, model_name
)
return {"items": tags, "total": len(tags)}
```
|
{
"source": "jgross11/generic-group-project",
"score": 2
}
|
#### File: routes/errors/routes.py
```python
from flask import session
from flask.templating import render_template
from bless_this_chess.Utils import Map, create_blueprint
error_bp = create_blueprint('error_bp', __name__)
@error_bp.app_errorhandler(404)
@error_bp.app_errorhandler(405)
@error_bp.app_errorhandler(500)
def pageNotFound(error):
map = Map()
map.put("error", error)
return render_template('error.jinja2', map=map)
```
|
{
"source": "jgrossmac/dss",
"score": 2
}
|
#### File: dss/build/make-certs.py
```python
import argparse
import itertools
import glob
import os
import shutil
import subprocess
class CockroachCluster(object):
def __init__(self, cluster, ca_cert_to_join=None):
self._ca_cert_to_join = ca_cert_to_join
self._cluster = cluster
@property
def ca_cert_to_join(self):
return self._ca_cert_to_join
@property
def namespace(self):
return 'dss-main'
@property
def directory(self):
return os.path.join('workspace', self._cluster)
@property
def ca_certs_file(self):
return os.path.join(self.ca_certs_dir, 'ca.crt')
@property
def ca_key_dir(self):
return os.path.join(self.directory, 'ca_key_dir')
@property
def ca_key_file(self):
return os.path.join(self.ca_key_dir, 'ca.key')
@property
def ca_certs_dir(self):
return os.path.join(self.directory, 'ca_certs_dir')
@property
def client_certs_dir(self):
return os.path.join(self.directory, 'client_certs_dir')
@property
def node_certs_dir(self):
return os.path.join(self.directory, 'node_certs_dir')
def parse_args():
parser = argparse.ArgumentParser(
description='Creates certificates for a new Cockroachdb cluster')
parser.add_argument('--cluster', metavar='CLUSTER',
help='kubernetes cluster context name')
parser.add_argument('--node-address', metavar='ADDRESS', nargs='*',
default=[], help='extra addresses to add to the node certificate')
parser.add_argument('--ca-cert-to-join', metavar='FILENAME',
help='file containing an existing CA cert of a cluster to join.')
return parser.parse_args()
def main():
args = parse_args()
cr = CockroachCluster(args.cluster, args.ca_cert_to_join)
# Create the generated directories.
try:
os.mkdir('workspace')
except OSError:
pass
try:
os.mkdir(cr.directory)
except OSError:
pass
# Create a new CA.
# Delete and recreate the ca_certs_dir.
shutil.rmtree(cr.ca_certs_dir, ignore_errors=True)
shutil.rmtree(cr.ca_key_dir, ignore_errors=True)
os.mkdir(cr.ca_certs_dir)
os.mkdir(cr.ca_key_dir)
# Build node and client certs.
# Delete and recreate the directories.
shutil.rmtree(cr.node_certs_dir, ignore_errors=True)
shutil.rmtree(cr.client_certs_dir, ignore_errors=True)
os.mkdir(cr.client_certs_dir)
os.mkdir(cr.node_certs_dir)
# Create the CA.
subprocess.check_call([
'cockroach', 'cert', 'create-ca',
'--certs-dir', cr.ca_certs_dir,
'--ca-key', cr.ca_key_file])
# Copy out the CA cert for generation, we delete these copy later.
shutil.copy(cr.ca_certs_file, cr.client_certs_dir)
shutil.copy(cr.ca_certs_file, cr.node_certs_dir)
# We slightly abuse the rotate certs feature:
# https://www.cockroachlabs.com/docs/stable/rotate-certificates.html
if cr.ca_cert_to_join:
with open(cr.ca_certs_file, 'a') as new_certs_fh:
with open(cr.ca_cert_to_join) as join_ca_cert_fh:
new_certs_fh.write(join_ca_cert_fh.read())
new_certs_fh.write('\n')
print('Created new CA certificate in {}'.format(cr.ca_certs_dir))
subprocess.check_call([
'cockroach', 'cert', 'create-client', 'root',
'--certs-dir', cr.client_certs_dir,
'--ca-key', cr.ca_key_file])
print('Created new client certificate in {}'.format(cr.client_certs_dir))
node_addresses = ['localhost']
node_addresses.extend(args.node_address)
node_addresses.extend([
'cockroachdb-balanced.%s' % cr.namespace,
'cockroachdb-balanced.%s.svc.cluster.local' % cr.namespace,
'*.cockroachdb',
'*.cockroachdb.%s' % cr.namespace,
'cockroachdb.%s' % cr.namespace,
'*.cockroachdb.%s.svc.cluster.local' % cr.namespace
])
subprocess.check_call([
'cockroach', 'cert', 'create-node',
'--certs-dir', cr.node_certs_dir,
'--ca-key', cr.ca_key_file]
+ node_addresses)
os.remove(os.path.join(cr.node_certs_dir, 'ca.crt'))
os.remove(os.path.join(cr.client_certs_dir, 'ca.crt'))
print('Created new node certificate in {}'.format(cr.node_certs_dir))
if __name__ == '__main__':
main()
```
#### File: monitoring/prober/test_subscription_isa_interactions.py
```python
import datetime
import common
def test_create_isa(session, isa1_uuid):
time_start = datetime.datetime.utcnow()
time_end = time_start + datetime.timedelta(minutes=60)
resp = session.put(
'/identification_service_areas/{}'.format(isa1_uuid),
json={
'extents': {
'spatial_volume': {
'footprint': {
'vertices': common.VERTICES,
},
'altitude_lo': 20,
'altitude_hi': 400,
},
'time_start': time_start.strftime(common.DATE_FORMAT),
'time_end': time_end.strftime(common.DATE_FORMAT),
},
'flights_url': 'https://example.com/dss',
})
assert resp.status_code == 200
def test_create_subscription(session, isa1_uuid, sub1_uuid):
time_start = datetime.datetime.utcnow()
time_end = time_start + datetime.timedelta(minutes=60)
resp = session.put(
'/subscriptions/{}'.format(sub1_uuid),
json={
'extents': {
'spatial_volume': {
'footprint': {
'vertices': common.VERTICES,
},
'altitude_lo': 20,
'altitude_hi': 400,
},
'time_start': time_start.strftime(common.DATE_FORMAT),
'time_end': time_end.strftime(common.DATE_FORMAT),
},
'callbacks': {
'identification_service_area_url': 'https://example.com/foo'
},
})
assert resp.status_code == 200
# The response should include our ISA.
data = resp.json()
assert data['subscription']['notification_index'] == 0
assert isa1_uuid in [x['id'] for x in data['service_areas']]
def test_modify_isa(session, isa1_uuid, sub1_uuid):
# GET the ISA first to find its version.
resp = session.get('/identification_service_areas/{}'.format(isa1_uuid))
assert resp.status_code == 200
version = resp.json()['service_area']['version']
# Then modify it.
resp = session.put(
'/identification_service_areas/{}/{}'.format(isa1_uuid, version),
json={
'extents': {
'spatial_volume': {
'footprint': {
'vertices': common.VERTICES,
},
'altitude_lo': 12345,
'altitude_hi': 67890,
},
},
'flights_url': 'https://example.com/dss',
})
assert resp.status_code == 200
# The response should include our subscription.
data = resp.json()
assert {
'url':
'https://example.com/foo',
'subscriptions': [{
'notification_index': 1,
'subscription_id': sub1_uuid,
},],
} in data['subscribers']
def test_delete_isa(session, isa1_uuid, sub1_uuid):
# GET the ISA first to find its version.
resp = session.get('/identification_service_areas/{}'.format(isa1_uuid))
assert resp.status_code == 200
version = resp.json()['service_area']['version']
# Then delete it.
resp = session.delete('/identification_service_areas/{}/{}'.format(
isa1_uuid, version))
assert resp.status_code == 200
# The response should include our subscription.
data = resp.json()
assert {
'url':
'https://example.com/foo',
'subscriptions': [{
'notification_index': 2,
'subscription_id': sub1_uuid,
},],
} in data['subscribers']
def test_delete_subscription(session, sub1_uuid):
# GET the sub first to find its version.
resp = session.get('/subscriptions/{}'.format(sub1_uuid))
assert resp.status_code == 200
data = resp.json()
version = data['subscription']['version']
assert data['subscription']['notification_index'] == 2
# Then delete it.
resp = session.delete('/subscriptions/{}/{}'.format(sub1_uuid, version))
assert resp.status_code == 200
```
#### File: test/interoperability/interop.py
```python
import os
import sys
import argparse
import clients
import datetime
import uuid
from interop_test_suite import InterOpTestSuite
from typing import Dict
def parseArgs() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Test Interoperability of DSSs")
parser.add_argument("OAuth", help="URI to the OAuth Server.")
# When using Password OAuth flow, Username, Password, and Clients-id are
# necessary for authentication
parser.add_argument("--username", help="Username used to get OAuth Token")
parser.add_argument("--password", help="Password used to get OAuth Token")
parser.add_argument(
"--client-id",
help="Client ID used to get OAuth Token, used with Username and Password",
)
# When using Service Account OAuth flow, only the Service Account JSON File
# is required to request Token.
parser.add_argument(
"--service-account",
"--svc",
help="Path to Service Account Credentials file used to get OAuth Token",
)
parser.add_argument(
"DSS", help="List of URIs to DSS Servers. At least 2 DSSs", nargs="+"
)
return parser.parse_args()
def main() -> int:
args = parseArgs()
if args.service_account:
oauth_client = clients.OAuthClient(
args.OAuth,
clients.AuthType.SERVICE_ACCOUNT,
service_account_json=args.service_account,
)
elif args.username:
assert args.password, "Password is required when using Username"
assert args.client_id, "Client ID is required when authenticating with Password"
oauth_client = clients.OAuthClient(
args.OAuth,
clients.AuthType.PASSWORD,
username=args.username,
password=args.password,
client_id=args.client_id,
)
else:
oauth_client = clients.OAuthClient(args.OAuth, clients.AuthType.NONE)
oauth_client.parameterized_url = True
dss_clients: Dict[str, clients.DSSClient] = {}
for dss in args.DSS:
dss_clients[dss] = clients.DSSClient(host=dss, oauth_client=oauth_client)
# Begin Tests
tests = InterOpTestSuite(dss_clients)
tests.startTest()
return os.EX_OK
if __name__ == "__main__":
sys.exit(main())
```
|
{
"source": "jgrovegeo/dea-notebooks",
"score": 3
}
|
#### File: dea-notebooks/Scripts/notebookapp_crophealth.py
```python
from ipyleaflet import (
Map,
GeoJSON,
DrawControl,
basemaps
)
import datetime as dt
import datacube
import ogr
import matplotlib as mpl
import matplotlib.pyplot as plt
import rasterio
from rasterio.features import geometry_mask
import xarray as xr
from IPython.display import display
import warnings
import ipywidgets as widgets
# Load utility functions
from dea_datahandling import load_ard
from dea_spatialtools import transform_geojson_wgs_to_epsg
from dea_bandindices import calculate_indices
def load_crophealth_data():
"""
Loads Sentinel-2 analysis-ready data (ARD) product for the crop health
case-study area. The ARD product is provided for the last year.
Last modified: January 2020
outputs
ds - data set containing combined, masked data from Sentinel-2a and -2b.
Masked values are set to 'nan'
"""
# Suppress warnings
warnings.filterwarnings('ignore')
# Initialise the data cube. 'app' argument is used to identify this app
dc = datacube.Datacube(app='Crophealth-app')
# Specify latitude and longitude ranges
latitude = (-24.974997, -24.995971)
longitude = (152.429994, 152.395805)
# Specify the date range
# Calculated as today's date, subtract 90 days to match NRT availability
# Dates are converted to strings as required by loading function below
end_date = dt.date.today()
start_date = end_date - dt.timedelta(days=365)
time = (start_date.strftime("%Y-%m-%d"), end_date.strftime("%Y-%m-%d"))
# Construct the data cube query
products = ["s2a_ard_granule", "s2b_ard_granule"]
query = {
'x': longitude,
'y': latitude,
'time': time,
'measurements': [
'nbart_red',
'nbart_green',
'nbart_blue',
'nbart_nir_1',
'nbart_swir_2',
'nbart_swir_3'
],
'output_crs': 'EPSG:3577',
'resolution': (-10, 10)
}
# Load the data and mask out bad quality pixels
ds_s2 = load_ard(dc, products=products, min_gooddata=0.5, **query)
# Calculate the normalised difference vegetation index (NDVI) across
# all pixels for each image.
# This is stored as an attribute of the data
ds_s2 = calculate_indices(ds_s2, index='NDVI', collection='ga_s2_1')
# Return the data
return(ds_s2)
def run_crophealth_app(ds):
"""
Plots an interactive map of the crop health case-study area and allows
the user to draw polygons. This returns a plot of the average NDVI value
in the polygon area.
Last modified: January 2020
inputs
ds - data set containing combined, masked data from Sentinel-2a and -2b.
Must also have an attribute containing the NDVI value for each pixel
"""
# Suppress warnings
warnings.filterwarnings('ignore')
# Update plotting functionality through rcParams
mpl.rcParams.update({'figure.autolayout': True})
# Define the bounding box that will be overlayed on the interactive map
# The bounds are hard-coded to match those from the loaded data
geom_obj = {
"type": "Feature",
"properties": {
"style": {
"stroke": True,
"color": 'red',
"weight": 4,
"opacity": 0.8,
"fill": True,
"fillColor": False,
"fillOpacity": 0,
"showArea": True,
"clickable": True
}
},
"geometry": {
"type": "Polygon",
"coordinates": [
[
[152.395805, -24.995971],
[152.395805, -24.974997],
[152.429994, -24.974997],
[152.429994, -24.995971],
[152.395805, -24.995971]
]
]
}
}
# Create a map geometry from the geom_obj dictionary
# center specifies where the background map view should focus on
# zoom specifies how zoomed in the background map should be
loadeddata_geometry = ogr.CreateGeometryFromJson(str(geom_obj['geometry']))
loadeddata_center = [
loadeddata_geometry.Centroid().GetY(),
loadeddata_geometry.Centroid().GetX()
]
loadeddata_zoom = 14
# define the study area map
studyarea_map = Map(
center=loadeddata_center,
zoom=loadeddata_zoom,
basemap=basemaps.Esri.WorldImagery
)
# define the drawing controls
studyarea_drawctrl = DrawControl(
polygon={"shapeOptions": {"fillOpacity": 0}},
marker={},
circle={},
circlemarker={},
polyline={},
)
# add drawing controls and data bound geometry to the map
studyarea_map.add_control(studyarea_drawctrl)
studyarea_map.add_layer(GeoJSON(data=geom_obj))
# Index to count drawn polygons
polygon_number = 0
# Define widgets to interact with
instruction = widgets.Output(layout={'border': '1px solid black'})
with instruction:
print("Draw a polygon within the red box to view a plot of "
"average NDVI over time in that area.")
info = widgets.Output(layout={'border': '1px solid black'})
with info:
print("Plot status:")
fig_display = widgets.Output(layout=widgets.Layout(
width="50%", # proportion of horizontal space taken by plot
))
with fig_display:
plt.ioff()
fig, ax = plt.subplots(figsize=(8, 6))
ax.set_ylim([-1, 1])
colour_list = plt.rcParams['axes.prop_cycle'].by_key()['color']
# Function to execute each time something is drawn on the map
def handle_draw(self, action, geo_json):
nonlocal polygon_number
# Execute behaviour based on what the user draws
if geo_json['geometry']['type'] == 'Polygon':
info.clear_output(wait=True) # wait=True reduces flicker effect
with info:
print("Plot status: polygon sucessfully added to plot.")
# Convert the drawn geometry to pixel coordinates
geom_selectedarea = transform_geojson_wgs_to_epsg(
geo_json,
EPSG=3577 # hard-coded to be same as case-study data
)
# Construct a mask to only select pixels within the drawn polygon
mask = geometry_mask(
[geom_selectedarea for geoms in [geom_selectedarea]],
out_shape=ds.geobox.shape,
transform=ds.geobox.affine,
all_touched=False,
invert=True
)
masked_ds = ds.NDVI.where(mask)
masked_ds_mean = masked_ds.mean(dim=['x', 'y'], skipna=True)
colour = colour_list[polygon_number % len(colour_list)]
# Add a layer to the map to make the most recently drawn polygon
# the same colour as the line on the plot
studyarea_map.add_layer(
GeoJSON(
data=geo_json,
style={
'color': colour,
'opacity': 1,
'weight': 4.5,
'fillOpacity': 0.0
}
)
)
# add new data to the plot
xr.plot.plot(
masked_ds_mean,
marker='*',
color=colour,
ax=ax
)
# reset titles back to custom
ax.set_title("Average NDVI from Sentinel-2")
ax.set_xlabel("Date")
ax.set_ylabel("NDVI")
# refresh display
fig_display.clear_output(wait=True) # wait=True reduces flicker effect
with fig_display:
display(fig)
# Iterate the polygon number before drawing another polygon
polygon_number = polygon_number + 1
else:
info.clear_output(wait=True)
with info:
print("Plot status: this drawing tool is not currently "
"supported. Please use the polygon tool.")
# call to say activate handle_draw function on draw
studyarea_drawctrl.on_draw(handle_draw)
with fig_display:
# TODO: update with user friendly something
display(widgets.HTML(""))
# Construct UI:
# +-----------------------+
# | instruction |
# +-----------+-----------+
# | map | plot |
# | | |
# +-----------+-----------+
# | info |
# +-----------------------+
ui = widgets.VBox([instruction,
widgets.HBox([studyarea_map, fig_display]),
info])
display(ui)
```
|
{
"source": "j-grover/autonormalize",
"score": 4
}
|
#### File: autonormalize/autonormalize/normalize.py
```python
import pandas as pd
from .classes import Dependencies
def normalize(dependencies, df):
"""
Normalizes the dependency relationships in dependencies into new
groups by breaking up all partial and transitive dependencies.
Arguments:
dependencies (Dependencies) : the dependencies to be split up
Returns:
new_groups (list[Dependencies]) : list of new dependencies objects
representing the new groups
"""
dependencies.remove_implied_extroneous()
no_part_deps = remove_part_deps(dependencies, df)
no_trans_deps = []
for grp in no_part_deps:
no_trans_deps += remove_trans_deps(grp, df)
return no_trans_deps
class DepDF(object):
"""
Represents dataframe and functional dependencies between columns in it.
Used in the normalization process.
Attributes:
deps
df
parent
children
index
"""
def __init__(self, deps, df, index, parent=None):
"""
Creates a DepDF.
Arguments:
deps (Dependencies) : dependenies among the df
df (pd.DataFrame) : dataframe for the object
index (list[str]) : index columns for dataframe
parent (DepDF, optional) : parent DepDF object
"""
self.deps = deps
self.df = df
self.parent = parent
self.children = []
self.index = index
def return_dfs(self):
"""
Returns the dataframes stored in self and all its descendents.
Returns:
dfs (list[pd.DataFrame]) : dataframes
"""
if self.children == []:
return [self.df]
result = [self.df]
for child in self.children:
result += child.return_dfs()
return result
def make_indexes(depdf):
"""
Goes through depdf, and all of its descendents, and if any have primary keys
of more than one attribute, creates a new index column, and replaces the
old primary key columns with the new column in the parent df.
Arguments:
depdf (DepDF) : depDF to make indexes for
"""
prim_key = depdf.deps.get_prim_key()
if len(prim_key) > 1:
depdf.df.insert(0, '_'.join(prim_key), range(0, len(depdf.df)))
depdf.index = ['_'.join(prim_key)]
# now need to replace it in the parent df...
if depdf.parent is not None:
add = [None] * len(depdf.parent.df)
indices = depdf.parent.df.groupby(prim_key).indices
for name in indices:
mask = None
for i in range(len(prim_key)):
m = depdf.df[prim_key[i]] == name[i]
if mask is None:
mask = m
else:
mask = mask & m
new_val = depdf.df[mask]['_'.join(prim_key)].item()
for index in indices[name]:
add[index] = new_val
depdf.parent.df.drop(columns=prim_key, inplace=True)
depdf.parent.df.insert(len(depdf.parent.df.columns), '_'.join(prim_key), add)
for child in depdf.children:
make_indexes(child)
def normalize_dataframe(depdf):
"""
Normalizes the dataframe represetned by depdf, created descendents
as needed.
Arguments:
depdf (DepDF) : depdf to normalize
"""
part_deps = depdf.deps.find_partial_deps()
filter(part_deps, depdf.df)
if part_deps != []:
split_on = find_most_comm(part_deps, depdf.deps, depdf.df)
split_up(split_on, depdf)
return
trans_deps = depdf.deps.find_trans_deps()
filter(trans_deps, depdf.df)
if trans_deps != []:
split_on = find_most_comm(trans_deps, depdf.deps, depdf.df)
split_up(split_on, depdf)
return
def split_up(split_on, depdf):
"""
Breaks off a depdf and forms its child. Recursively calls normalize on
the original depdf, and its newly formed child.
Arguments:
split_on (list[str]) : attributes to split the dataframe on
depdf (DepDF) : the depdf ot split
"""
parent_deps, child_deps = split_on_dep(split_on, depdf.deps)
child = DepDF(child_deps, form_child(depdf.df, child_deps), split_on, depdf)
depdf.deps = parent_deps
depdf.df = depdf.df.drop(columns=list(set(depdf.df.columns).difference(parent_deps.all_attrs())))
depdf.children.append(child)
normalize_dataframe(depdf)
normalize_dataframe(child)
def form_child(df, deps):
"""
Returns a new dataframe based off of the dependencies in deps.
Arguments:
df (pd.DataFrame) : dataframe to create new dataframe from
deps (Dependencies) : dependencies to base new dataframe off of
"""
attrs = deps.all_attrs()
drops = set(df.columns).difference(attrs)
new_df = df.drop(columns=list(drops))
new_df = drop_primary_dups(new_df, deps.get_prim_key())
return new_df
def remove_part_deps(dependencies, df):
"""
Breaks up the dependency relations in dependencies into new groups of
relations so that there are no more partial dependencies.
Arguments:
dependencies (Dependncies) : the dependencies to be split up
Returns:
new_groups (list[Dependencies]) : list of new dependencies objects
representing the new groups with no partial depenencies
"""
part_deps = dependencies.find_partial_deps()
filter(part_deps, df)
if part_deps == []:
return [dependencies]
new_deps = split_on_dep(find_most_comm(part_deps, dependencies), dependencies)
return remove_part_deps(new_deps[0], df) + remove_part_deps(new_deps[1], df)
def remove_trans_deps(dependencies, df):
"""
Breaks up the dependency relations in dependencies into new groups of
relations so that there are no more transitive dependencies.
Arguments:
dependencies (Dependencies) : the dependencies to be split up
Returns:
new_groups (list[Dependencies]): list of new dependencies objects
representing the new groups with no transitive depenencies
"""
trans_deps = dependencies.find_trans_deps()
filter(trans_deps, df)
if trans_deps == []:
return [dependencies]
new_deps = split_on_dep(find_most_comm(trans_deps, dependencies), dependencies)
return remove_trans_deps(new_deps[0], df) + remove_trans_deps(new_deps[1], df)
def find_most_comm(deps, dependencies, df=None):
"""
Given a list of dependency relations, finds the most common set of
LHS attributes. If more than one LHS set occurs the same amount of
times, chooses the set with the least number of attributes.
Arguments:
deps (list[(set[str], str)]) : list of tuples representing relations
where the lhs is a set of attribute names, and the rhs is an attribute.
Returns:
most_comm (set[str]) : the most common lhs set of attributes
"""
positions = {}
priority_lst = []
for lhs, rhs in deps:
if frozenset(lhs) in positions:
ind = positions[frozenset(lhs)]
score = priority_lst[ind][0] + 1
while ind != 0 and priority_lst[ind - 1][0] < score:
priority_lst[ind] = priority_lst[ind - 1]
positions[frozenset(priority_lst[ind - 1][1])] = ind
ind -= 1
priority_lst[ind] = (score, lhs)
positions[frozenset(lhs)] = ind
else:
priority_lst.append((1, lhs))
positions[frozenset(lhs)] = len(priority_lst) - 1
# IF THEY ARE THE SAME, CHOOSE ONE WITH SHORTEST LENGHT
options = [item[1] for item in priority_lst if item[0] == priority_lst[0][0]]
max_lhs = choose_index(options, df)
# max_lhs = priority_lst[0][1]
# scr = priority_lst[0][0]
# i = 1
# while i < len(priority_lst) and priority_lst[i][0] == scr:
# if len(priority_lst[i][1]) < len(max_lhs):
# max_lhs = priority_lst[i][1]
# i += 1
for i in range(len(max_lhs)):
for key in dependencies.get_prim_key():
if dependencies.equiv_attrs(max_lhs[i], key):
max_lhs[i] = key
return max_lhs
def split_on_dep(lhs_dep, dependencies):
"""
Given the LHS attributes of a dependency, breaks up the dependency
relations in dependencies into two groups so that the LHS given is
the primary key of the new group. The old group keeps the same
primary key.
Arguments:
lhs_dep (list[str]) : set of attributes to be the new group's
primary key
dependencies (Dependencies) : dependency relations to be split up
Returns:
new_groups ((Dependencies, Dependencies)) : the new groups
"""
new_deps = {}
old_deps = dependencies.serialize()
new_rhs = set()
# new primary key
for attr in lhs_dep:
new_deps[attr] = old_deps[attr][:]
for rhs in list(old_deps.keys()):
for lhs in old_deps[rhs]:
if set(lhs).issubset(lhs_dep):
# if lhs_dep in old_deps[rhs]:
new_deps[rhs] = old_deps[rhs]
old_deps.pop(rhs)
new_rhs.add(rhs)
break
for rhs in old_deps:
for lhs in old_deps[rhs][:]:
if len(new_rhs.intersection(lhs)) != 0:
old_deps[rhs].remove(lhs)
old_rhs = set(list(old_deps.keys()))
for attr in lhs_dep:
old_rhs.remove(attr)
for rhs in new_deps:
for lhs in new_deps[rhs][:]:
if len(old_rhs.intersection(lhs)) != 0:
new_deps[rhs].remove(lhs)
return (Dependencies(old_deps, dependencies.get_prim_key()), Dependencies(new_deps, lhs_dep))
def drop_primary_dups(df, prim_key):
"""
Drops all duplicates based off of the columns in prim_key. If there isn't a
unique value for the other columns, for every unique instance of columns in
prim_key, keeps the "mode" of the unique instances' occurance.
Arguments:
df (pd.DataFrame) : dataframe to drop duplicates of
prim_key (list[str]) : columns that form the primary key of the dataframe
Returns:
new_df (pd.DataFrame) : dataframe with duplicates dropped
"""
df_lst = []
if df.drop_duplicates(prim_key).shape[0] == df.shape[0]:
return df
groups = df.groupby(prim_key)
for name, group in groups:
df_lst.append(group.mode().iloc[0])
# new_df = new_df.append(group.mode().iloc[0], ignore_index=True)
result = (pd.DataFrame(df_lst, columns=df.columns)).reset_index(drop=True)
return result.astype(dict(df.dtypes))
def choose_index(keys, df):
"""
Chooses key from a list of keys. Order of priority:
1) shortest length
2) has "id" in some form in name of an attribute
3) has attribute furthest to the left in table
Arguments:
keys (list[set[str]]) : list of keys to choose from
df (pd.DataFrame) : pandas dataframe keys are for
Returns:
index (list[str]) : chosen key
"""
sort_key = sorted(keys, key=len)
m = len(sort_key[0])
options = [key for key in sort_key if len(key) == m]
for key in options:
for attr in key:
if "_id" in attr.lower() or " id" in attr.lower() or "id _" in attr.lower() or "id " in attr.lower():
return list(key)
if df is None:
return list(options[0])
for col in df.columns:
includes = [option for option in options if col in option]
if len(includes) == 1:
return list(includes[0])
if len(includes) > 1:
options = includes
return list(options[0])
def filter(keys, df):
"""
Filters out any keys that contain attributes that are not strings, ints, or
categories from a list of relations.
Arguments:
keys (list[(list[str], str)]) : relationships to filter out
df (pd.DataFrame) : dataframe attributes in keys are from
"""
for key, rhs in keys[:]:
for attr in key:
if df[attr].dtypes.name not in set(['category', 'int64', 'object']):
keys.remove((key, rhs))
break
```
|
{
"source": "jg-rp/Flask-Liquid",
"score": 2
}
|
#### File: Flask-Liquid/flask_liquid/flask_liquid.py
```python
from __future__ import annotations
from contextlib import contextmanager
from itertools import chain
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import Mapping
from typing import Optional
from typing import Type
from flask import Flask
from flask import current_app
from flask import signals_available
from flask import template_rendered
from flask import before_render_template
from flask import _request_ctx_stack
from liquid import Environment
from liquid import Mode
from liquid import Undefined
from liquid.template import BoundTemplate
from liquid.loaders import BaseLoader
from liquid.loaders import FileSystemLoader
TemplateContextProcessorCallable = Callable[[], Dict[str, Any]]
class Liquid:
"""The Liquid template extension for Flask.
:param tag_start_string: The sequence of characters indicating the start of a
liquid tag. Defaults to ``{%``.
:type tag_start_string: str
:param tag_end_string: The sequence of characters indicating the end of a liquid
tag. Defaults to ``%}``.
:type tag_end_string: str
:param statement_start_string: The sequence of characters indicating the start of
an output statement. Defaults to ``{{``.
:type statement_start_string: str
:param statement_end_string: The sequence of characters indicating the end of an
output statement. Defaults to ``}}``
:type statement_end_string: str
:param template_comments: If ``True``, enable template comments. Where, by default,
anything between ``{#`` and ``#}`` is considered a comment. Defaults to
``False``.
:type template_comments: bool
:param comment_start_string: The sequence of characters indicating the start of a
comment. Defaults to ``{#``. ``template_comments`` must be ``True`` for
``comment_start_string`` to have any effect.
:type comment_start_string: str
:param comment_end_string: The sequence of characters indicating the end of a
comment. Defaults to ``#}``. ``template_comments`` must be ``True`` for
``comment_end_string`` to have any effect.
:type comment_end_string: str
:param tolerance: Indicates how tolerant to be of errors. Must be one of
``Mode.LAX``, ``Mode.WARN`` or ``Mode.STRICT``. Defaults to ``Mode.STRICT``.
:type tolerance: Mode
:param loader: A template loader. If you want to use the builtin "render" or
"include" tags, a loader must be configured. Defaults to an empty
:class:`liquid.loaders.DictLoader`.
:type loader: liquid.loaders.BaseLoader
:param undefined: A subclass of :class:`Undefined` that represents undefined values.
Could be one of the built-in undefined types, :class:`Undefined`,
:class:`DebugUndefined` or :class:`StrictUndefined`. Defaults to
:class:`Undefined`, an undefined type that silently ignores undefined values.
:type undefined: liquid.Undefined
:param strict_filters: If ``True``, will raise an exception upon finding an
undefined filter. Otherwise undefined filters are silently ignored. Defaults to
``True``.
:type strict_filters: bool
:param autoescape: If `True`, all context values will be HTML-escaped before output
unless they've been explicitly marked as "safe". Requires the package
Markupsafe. Defaults to ``False``.
:type autoescape: bool
:param auto_reload: If `True`, loaders that have an ``uptodate`` callable will
reload template source data automatically. For deployments where template
sources don't change between service reloads, setting auto_reload to `False` can
yield an increase in performance by avoiding calls to ``uptodate``. Defaults to
``True``.
:type auto_reload: bool
:param cache_size: The capacity of the template cache in number of templates.
Defaults to 300. If ``cache_size`` is ``None`` or less than ``1``, it has the
effect of setting ``auto_reload`` to ``False``.
:type cache_size: int
:param expression_cache_size: The capacity of each of the common expression caches.
Defaults to ``0``, disabling expression caching.
:type expression_cache_size: int
:param globals: An optional mapping that will be added to the context of any
template loaded from this environment. Defaults to ``None``.
:type globals: dict
:param flask_context_processors: If set to `True` Flask context processors
will be applied to Liquid every render context. Defaults to `False`.
:param flask_signals: If set to `True` the `template_rendered` and
`before_template_rendered` signals will be emitted for Liquid templates.
"""
# pylint: disable=redefined-builtin too-many-arguments too-many-locals
def __init__(
self,
app: Optional[Flask] = None,
*,
tag_start_string: str = r"{%",
tag_end_string: str = r"%}",
statement_start_string: str = r"{{",
statement_end_string: str = r"}}",
template_comments: bool = False,
comment_start_string: str = "{#",
comment_end_string: str = "#}",
tolerance: Mode = Mode.STRICT,
loader: Optional[BaseLoader] = None,
undefined: Type[Undefined] = Undefined,
strict_filters: bool = True,
autoescape: bool = True,
auto_reload: bool = True,
globals: Optional[Mapping[str, object]] = None,
flask_context_processors: bool = False,
flask_signals: bool = True,
cache_size: int = 300,
expression_cache_size: int = 0,
):
self.app = app
self.env = Environment(
tag_start_string=tag_start_string,
tag_end_string=tag_end_string,
statement_start_string=statement_start_string,
statement_end_string=statement_end_string,
tolerance=tolerance,
loader=loader,
undefined=undefined,
strict_filters=strict_filters,
autoescape=autoescape,
auto_reload=auto_reload,
globals=globals,
template_comments=template_comments,
comment_start_string=comment_start_string,
comment_end_string=comment_end_string,
cache_size=cache_size,
expression_cache_size=expression_cache_size,
)
# init_app will default to a file system loader if one was not provided.
# This differs from the default behavior of `liquid.Environment`, where the
# default loader is an empty DictLoader.
self._loader = loader
# Indicates if Flask context processors should be used to update the liquid
# context on each request.
self.flask_context_processors = flask_context_processors
# Indicates if the extension should trigger Flask's `template_rendered` and
# `before_template_rendered` signals.
self.flask_signals = flask_signals
if app is not None:
self.init_app(app)
def init_app(self, app: Flask) -> None:
"""Initialise a Flask app with a Liquid environment."""
app.config.setdefault(
"LIQUID_TAG_START_STRING",
self.env.tag_start_string,
)
app.config.setdefault(
"LIQUID_TAG_END_STRING",
self.env.tag_end_string,
)
app.config.setdefault(
"LIQUID_STATEMENT_START_STRING",
self.env.statement_start_string,
)
app.config.setdefault(
"LIQUID_STATEMENT_END_STRING",
self.env.statement_end_string,
)
app.config.setdefault(
"LIQUID_TEMPLATE_COMMENTS",
self.env.template_comments,
)
app.config.setdefault(
"LIQUID_COMMENT_START_STRING",
self.env.comment_start_string or "{#",
)
app.config.setdefault(
"LIQUID_COMMENT_END_STRING",
self.env.comment_end_string or "#}",
)
app.config.setdefault("LIQUID_TOLERANCE", self.env.mode)
app.config.setdefault("LIQUID_UNDEFINED", self.env.undefined)
app.config.setdefault("LIQUID_STRICT_FILTERS", self.env.strict_filters)
app.config.setdefault("LIQUID_TEMPLATE_FOLDER", app.template_folder)
app.config.setdefault("LIQUID_AUTOESCAPE", self.env.autoescape)
app.config.setdefault("LIQUID_AUTO_RELOAD", self.env.auto_reload)
app.config.setdefault(
"LIQUID_EXPRESSION_CACHE_SIZE",
self.env.expression_cache_size,
)
app.config.setdefault(
"LIQUID_FLASK_CONTEXT_PROCESSORS",
self.flask_context_processors,
)
app.config.setdefault(
"LIQUID_FLASK_SIGNALS",
self.flask_signals,
)
self.flask_signals = app.config["LIQUID_FLASK_SIGNALS"]
if not self._loader:
self._loader = FileSystemLoader(
search_path=app.config["LIQUID_TEMPLATE_FOLDER"]
)
self.flask_context_processors = app.config["LIQUID_FLASK_CONTEXT_PROCESSORS"]
self.env.tag_start_string = app.config["LIQUID_TAG_START_STRING"]
self.env.tag_end_string = app.config["LIQUID_TAG_END_STRING"]
self.env.statement_start_string = app.config["LIQUID_STATEMENT_START_STRING"]
self.env.statement_end_string = app.config["LIQUID_STATEMENT_END_STRING"]
self.env.mode = app.config["LIQUID_TOLERANCE"]
self.env.undefined = app.config["LIQUID_UNDEFINED"]
self.env.strict_filters = app.config["LIQUID_STRICT_FILTERS"]
self.env.autoescape = app.config["LIQUID_AUTOESCAPE"]
self.env.auto_reload = app.config["LIQUID_AUTO_RELOAD"]
self.env.mode = app.config["LIQUID_TOLERANCE"]
self.env.loader = self._loader
self.env.comment_start_string = app.config["LIQUID_COMMENT_START_STRING"]
self.env.comment_end_string = app.config["LIQUID_COMMENT_END_STRING"]
self.env.template_comments = app.config["LIQUID_TEMPLATE_COMMENTS"]
# Working around a bad decision in the Environment constructor.
if not self.env.template_comments:
self.env.comment_start_string = ""
self.env.comment_end_string = ""
self.env.set_expression_cache_size(app.config["LIQUID_EXPRESSION_CACHE_SIZE"])
# Just in case init_app is called late and templates have already been loaded.
self.env.cache.clear()
app.extensions["flask_liquid"] = self
self.app = app
def _make_context(self, context: Dict[str, object]) -> Dict[str, object]:
"""Add the result of Flask context processors to the given context."""
# NOTE: We're not using `app.update_template_context` because we don't want
# g, request, session etc.
# Updates `context` in place. Will not overwrite keys already in context.
if self.flask_context_processors and self.app:
processors = self.app.template_context_processors
funcs: Iterable[TemplateContextProcessorCallable] = processors[None]
request_context = _request_ctx_stack.top
if request_context is not None:
blueprint = request_context.request.blueprint
if blueprint is not None and blueprint in processors:
funcs = chain(funcs, processors[blueprint])
orig_ctx = context.copy()
for func in funcs:
context.update(func())
context.update(orig_ctx)
return context
@contextmanager
def _signals(
self, template: BoundTemplate, context: Mapping[str, object]
) -> Iterator[Liquid]:
if signals_available and self.flask_signals:
before_render_template.send(
self.app,
template=template,
context=context,
)
yield self
if signals_available and self.flask_signals:
template_rendered.send(
self.app,
template=template,
context=context,
)
def render_template(self, template_name: str, **context: object) -> str:
"""Render a Liquid template from the configured template loader."""
context = self._make_context(context)
template = self.env.get_template(template_name)
with self._signals(template, context):
rendered = template.render(**context)
return rendered
async def render_template_async(self, template_name: str, **context: object) -> str:
"""Render a Liquid template from the configured template loader."""
context = self._make_context(context)
template = await self.env.get_template_async(template_name)
with self._signals(template, context):
rendered = await template.render_async(**context)
return rendered
def render_template_string(self, source: str, **context: object) -> str:
"""Render a Liquid template from a template string."""
context = self._make_context(context)
template = self.env.from_string(source)
with self._signals(template, context):
rendered = template.render(**context)
return rendered
async def render_template_string_async(self, source: str, **context: object) -> str:
"""Render a Liquid template from a template string."""
context = self._make_context(context)
template = self.env.from_string(source)
with self._signals(template, context):
rendered = await template.render_async(**context)
return rendered
def render_template(template_name: str, **context: object) -> str:
"""Render a Liquid template in the current Flask application context."""
ext: Liquid = current_app.extensions["flask_liquid"]
return ext.render_template(template_name, **context)
async def render_template_async(template_name: str, **context: object) -> str:
"""Render a Liquid template in the current Flask application context."""
ext: Liquid = current_app.extensions["flask_liquid"]
return await ext.render_template_async(template_name, **context)
def render_template_string(source: str, **context: object) -> str:
"""Render a Liquid template from a string in the current Flask application
context."""
ext: Liquid = current_app.extensions["flask_liquid"]
return ext.render_template_string(source, **context)
async def render_template_string_async(source: str, **context: object) -> str:
"""Render a Liquid template from a string in the current Flask application
context."""
ext: Liquid = current_app.extensions["flask_liquid"]
return await ext.render_template_string_async(source, **context)
```
#### File: Flask-Liquid/tests/test_liquid.py
```python
import os
import tempfile
from contextlib import contextmanager
from unittest import TestCase
from unittest import skipIf
import flask
from flask import Flask
from flask import Blueprint
from flask import template_rendered
from flask import before_render_template
from liquid import Undefined
from liquid import StrictUndefined
from liquid.exceptions import UndefinedError
from liquid.exceptions import NoSuchFilterFunc
from liquid.loaders import DictLoader
from flask_liquid import Liquid
from flask_liquid import render_template
from flask_liquid import render_template_string
from flask_liquid import render_template_async
from flask_liquid import render_template_string_async
SKIP_ASYNC = bool(int(flask.__version__[0]) < 2)
# pylint: disable=redefined-builtin unused-variable
def create_app(config, globals=None, loader=None):
"""Test Flask application factory."""
app = Flask(__name__)
app.testing = True
app.config.from_mapping(config)
_ = Liquid(app, globals=globals, loader=loader)
@app.context_processor
def add_some_context():
return {"username": "some"}
@app.route("/fromstring")
def from_string():
return render_template_string(r"Hello {{ you }}", you="World")
@app.route("/rendertemplate")
def from_template_file():
return render_template("index.html", you="World")
@app.route("/render/<name>")
def render_by_name(name):
return render_template(name)
@app.route("/globalcontext")
def global_context():
return render_template_string(r"Hello {{ you }}")
@app.route("/standardcontext")
def standard_context():
return render_template_string(r"{{ g }}{{ username }}{{ request.path }}")
@app.route("/contextprocessor")
def with_context_from_processor():
return render_template_string(r"{{ username }}")
# pylint: disable=invalid-name
bp = Blueprint("blue", __name__, url_prefix="/blue")
@bp.route("/greeting")
def blueprint_hello():
return render_template_string(r"{{ greeting }}, {{ you }}.")
@bp.context_processor
def blueprint_context():
return {"greeting": "Goodbye"}
app.register_blueprint(bp)
return app
# pylint: disable=redefined-builtin unused-variable
def create_async_app(config, globals=None, loader=None):
"""Test Flask application factory."""
app = Flask(__name__)
app.testing = True
app.config.from_mapping(config)
_ = Liquid(app, globals=globals, loader=loader)
@app.context_processor
def add_some_context():
return {"username": "some"}
@app.route("/fromstring")
async def from_string():
return await render_template_string_async(r"Hello {{ you }}", you="World")
@app.route("/rendertemplate")
async def from_template_file():
return await render_template_async("index.html", you="World")
@app.route("/render/<name>")
async def render_by_name(name):
return await render_template_async(name)
@app.route("/globalcontext")
async def global_context():
return await render_template_string_async(r"Hello {{ you }}")
@app.route("/standardcontext")
async def standard_context():
return await render_template_string_async(
r"{{ g }}{{ username }}{{ request.path }}"
)
@app.route("/contextprocessor")
async def with_context_from_processor():
return await render_template_string_async(r"{{ username }}")
return app
@contextmanager
def capture_template_rendered(app):
"""Utility context manager for capturing signals."""
recorded = []
# pylint: disable=unused-argument
def record(_, template, context, **extra):
recorded.append((template, context))
template_rendered.connect(record, app)
try:
yield recorded
finally:
template_rendered.disconnect(record, app)
@contextmanager
def capture_before_render_templates(app):
"""Utility context manager for capturing signals."""
recorded = []
# pylint: disable=unused-argument
def record(_, template, context, **extra):
recorded.append((template, context))
before_render_template.connect(record, app)
try:
yield recorded
finally:
before_render_template.disconnect(record, app)
class DefaultLiquidTestCase(TestCase):
"""Flask-Liquid test case with default configuration."""
def setUp(self):
self.app = create_app(
config={"LIQUID_TEMPLATE_FOLDER": "tests/templates/"},
globals={"you": "World"},
)
def test_render_from_string(self):
"""Test that we can render a liquid template given as a string."""
with self.app.test_client() as client:
resp = client.get("/fromstring")
self.assertEqual(resp.data, b"Hello World")
def test_render_template(self):
"""Test that we can render a liquid template from the file system."""
with self.app.test_client() as client:
resp = client.get("/rendertemplate")
self.assertEqual(resp.data, b"Hello World")
def test_render_template_by_name(self):
"""Test that we can render a liquid template from the file system."""
with self.app.test_client() as client:
resp = client.get("/render/snippet.html")
self.assertEqual(resp.data, b"Goodbye, World!\n")
def test_render_with_global_context(self):
"""Test that we can render liquid templates with global context."""
with self.app.test_client() as client:
resp = client.get("/globalcontext")
self.assertEqual(resp.data, b"Hello World")
def test_standard_flask_context(self):
"""Test that the standard Flask context variables are not included."""
with self.app.test_client() as client:
resp = client.get("/standardcontext")
self.assertEqual(resp.data, b"")
def test_template_rendered_signal(self):
"""Test that the template_rendered signal is fired when a liquid
template is rendered."""
with capture_template_rendered(self.app) as templates:
resp = self.app.test_client().get("/rendertemplate")
self.assertEqual(resp.data, b"Hello World")
template, _ = templates[0]
self.assertEqual("index.html", template.name)
def test_before_render_template_signal(self):
"""Test that the before_rendered_tempplate signal is fired before a
liquid template is rendered."""
with capture_before_render_templates(self.app) as templates:
resp = self.app.test_client().get("/rendertemplate")
self.assertEqual(resp.data, b"Hello World")
template, _ = templates[0]
self.assertEqual("index.html", template.name)
@skipIf(SKIP_ASYNC, "async views require flask>=2")
class AsyncLiquidTestCase(DefaultLiquidTestCase):
"""Async Flask-Liquid test case."""
def setUp(self):
self.app = create_async_app(
config={"LIQUID_TEMPLATE_FOLDER": "tests/templates/"},
globals={"you": "World"},
)
class LiquidLoaderTestCase(TestCase):
"""Flask-Liquid test cases using arbitrary template loaders."""
def test_render_template(self):
"""Test that we can render a template from a location of our choosing."""
with tempfile.TemporaryDirectory() as tmpdirname:
with open(
os.path.join(tmpdirname, "index.html"), "w", encoding="utf-8"
) as fd:
fd.write(r"Hello {{ you }}")
app = create_app(
config={"LIQUID_TEMPLATE_FOLDER": tmpdirname},
globals={"you": "World"},
)
resp = app.test_client().get("/rendertemplate")
self.assertEqual(resp.data, b"Hello World")
def test_dict_loader(self):
"""Test that we can render a template from a dictionary loader."""
snippets = {
"index": "<HTML>{% include 'heading' %}</HTML>",
"heading": "<h1>{{ some }}</h1>",
}
expected = b"<HTML><h1>other</h1></HTML>"
app = create_app(
config={},
globals={"some": "other"},
loader=DictLoader(snippets),
)
with app.test_client() as client:
resp = client.get("/render/index")
self.assertEqual(resp.data, expected)
class FlaskContextTestCase(TestCase):
"""Flask-Liquid test case using Flask context processors."""
def setUp(self):
self.app = create_app(
config={"LIQUID_FLASK_CONTEXT_PROCESSORS": True},
globals={"you": "World"},
)
self.async_app = create_async_app(
config={"LIQUID_FLASK_CONTEXT_PROCESSORS": True},
globals={"you": "World"},
)
def test_context_processor(self):
"""Test that we can use context variables from context processors in liquid
templates."""
with self.app.test_client() as client:
resp = client.get("/contextprocessor")
self.assertEqual(resp.data, b"some")
@skipIf(SKIP_ASYNC, "async views require flask>=2")
def test_context_processor_async(self):
"""Test that we can use context variables from context processors in liquid
templates."""
with self.async_app.test_client() as client:
resp = client.get("/contextprocessor")
self.assertEqual(resp.data, b"some")
def test_blueprint_context_processor(self):
"""Test that we can use context variables from context processors registered on
blueprints."""
with self.app.test_client() as client:
resp = client.get("/blue/greeting")
self.assertEqual(resp.data, b"Goodbye, World.")
class NoSignalsTestCase(TestCase):
"""Flask-Liquid test case using Flask signals."""
def setUp(self):
self.app = create_app(
config={
"LIQUID_FLASK_SIGNALS": False,
"LIQUID_TEMPLATE_FOLDER": "tests/templates/",
},
globals={"you": "World"},
)
def test_template_rendered_signal(self):
"""Test that the template_rendered signal is not fired when send_flask_signals
is False."""
with capture_template_rendered(self.app) as templates:
resp = self.app.test_client().get("/rendertemplate")
self.assertEqual(resp.data, b"Hello World")
self.assertEqual(len(templates), 0)
def test_before_render_template_signal(self):
"""Test that the before_rendered_tempplate signal is not fired when
send_flask_signals is False"""
with capture_before_render_templates(self.app) as templates:
resp = self.app.test_client().get("/rendertemplate")
self.assertEqual(resp.data, b"Hello World")
self.assertEqual(len(templates), 0)
class LiquidEnvironmentTestCase(TestCase):
"""Liquid environment configuration test case."""
def test_undefined(self):
"""Test that we can reference undefined variables without error."""
app = create_app(
config={"LIQUID_UNDEFINED": Undefined},
)
with app.app_context():
result = render_template_string(r"Hello, {{ nosuchthing }}.")
self.assertEqual(result, "Hello, .")
def test_strict_undefined(self):
"""Test that we can set the `undefined` type."""
app = create_app(
config={"LIQUID_UNDEFINED": StrictUndefined},
)
with app.app_context():
with self.assertRaises(UndefinedError):
_ = render_template_string(r"Hello, {{ nosuchthing }}.")
def test_lax_filters(self):
"""Test that undefined filters can be ignored."""
app = create_app(
config={"LIQUID_STRICT_FILTERS": False},
globals={"username": "You"},
)
with app.app_context():
result = render_template_string(r"Hello, {{ username | upper }}.")
self.assertEqual(result, "Hello, You.")
def test_strict_filters(self):
"""Test that undefined filters can raise an exception."""
app = create_app(
config={"LIQUID_STRICT_FILTERS": True},
globals={"username": "You"},
)
with app.app_context():
with self.assertRaises(NoSuchFilterFunc):
_ = render_template_string(r"Hello, {{ username | upper }}.")
def test_autoescape(self):
"""Test that autoescape is enabled by default."""
app = create_app(
config={},
globals={"username": "You"},
)
with app.app_context():
result = render_template_string(
r"Hello, {{ foo }}.",
foo="<b>you</b>",
)
self.assertEqual(result, "Hello, <b>you</b>.")
def test_disable_autoescape(self):
"""Test that we can disable autoescape."""
app = create_app(
config={"LIQUID_AUTOESCAPE": False},
globals={"username": "You"},
)
with app.app_context():
result = render_template_string(
r"Hello, {{ foo }}.",
foo="<b>you</b>",
)
self.assertEqual(result, "Hello, <b>you</b>.")
def test_enable_template_comments(self):
"""Test that we can enable template comments."""
app = create_app(
config={"LIQUID_TEMPLATE_COMMENTS": True},
globals={"username": "You"},
)
with app.app_context():
result = render_template_string(r"Hello, {# some comment -#} World!")
self.assertEqual(result, "Hello, World!")
def test_expression_cache(self):
"""Test that we can enable expresssion caching."""
app = create_app(
config={"LIQUID_EXPRESSION_CACHE_SIZE": 1},
globals={"username": "You"},
)
ext: Liquid = app.extensions["flask_liquid"]
self.assertTrue(hasattr(ext.env.parse_filtered_expression_value, "cache_info"))
```
|
{
"source": "jg-rp/liquid-extra",
"score": 2
}
|
#### File: liquid_extra/filters/html.py
```python
import html
from liquid import Markup
from liquid import escape
from liquid.filter import string_filter
from liquid.filter import with_environment
from liquid import Environment
@string_filter
@with_environment
def stylesheet_tag(url: str, *, environment: Environment) -> str:
"""Wrap a URL in an HTML stylesheet tag."""
tag = '<link href="{}" rel="stylesheet" type="text/css" media="all" />'
if environment.autoescape:
# We are deliberately forcing possible Markup strings to normal strings. We do
# not want markup in the middle of a link tag.
return Markup(tag).format(escape(str(url)))
return tag.format(html.escape(url))
@string_filter
@with_environment
def script_tag(url: str, *, environment: Environment) -> str:
"""Wrap a URL in an HTML script tag."""
tag = '<script src="{}" type="text/javascript"></script>'
if environment.autoescape:
# We are deliberately forcing possible Markup strings to normal strings. We do
# not want markup in the middle of a script tag.
return Markup(tag).format(escape(str(url)))
return tag.format(html.escape(url))
```
#### File: liquid_extra/tags/withblock.py
```python
from __future__ import annotations
import sys
from functools import partial
from typing import TYPE_CHECKING
from typing import Dict
from typing import NamedTuple
from typing import Optional
from typing import TextIO
from liquid.ast import Node
from liquid.ast import BlockNode
from liquid.context import Context
from liquid.expression import Expression
from liquid.lex import include_expression_rules
from liquid.lex import _compile_rules
from liquid.lex import _tokenize
from liquid.parse import expect
from liquid.parse import get_parser
from liquid.parse import parse_expression
from liquid.parse import parse_unchained_identifier
from liquid.stream import TokenStream
from liquid.tag import Tag
from liquid.token import Token
from liquid.token import TOKEN_TAG
from liquid.token import TOKEN_EXPRESSION
from liquid.token import TOKEN_TRUE
from liquid.token import TOKEN_FALSE
from liquid.token import TOKEN_NIL
from liquid.token import TOKEN_NULL
from liquid.token import TOKEN_COLON
from liquid.token import TOKEN_AS
from liquid.token import TOKEN_EOF
from liquid.token import TOKEN_COMMA
if TYPE_CHECKING: # pragma: no cover
from liquid import Environment
TAG_WITH = sys.intern("with")
TAG_ENDWITH = sys.intern("endwith")
with_expression_keywords = frozenset(
[
TOKEN_TRUE,
TOKEN_FALSE,
TOKEN_NIL,
TOKEN_NULL,
TOKEN_AS,
]
)
# We're borrowing token rules from the `include` tag, with our own set of valid
# keywords.
tokenize_with_expression = partial(
_tokenize,
rules=_compile_rules(include_expression_rules),
keywords=with_expression_keywords,
)
class WithKeywordArg(NamedTuple):
name: str
expr: Expression
class WithNode(Node):
def __init__(self, tok: Token, args: Dict[str, Expression], block: BlockNode):
self.tok = tok
self.args = args
self.block = block
def render_to_output(self, context: Context, buffer: TextIO) -> Optional[bool]:
namespace = {k: v.evaluate(context) for k, v in self.args.items()}
with context.extend(namespace):
return self.block.render(context, buffer)
class WithTag(Tag):
name = TAG_WITH
end = TAG_ENDWITH
def __init__(self, env: Environment):
super().__init__(env)
self.parser = get_parser(self.env)
def parse(self, stream: TokenStream) -> Node:
expect(stream, TOKEN_TAG, value=TAG_WITH)
tok = stream.current
stream.next_token()
expect(stream, TOKEN_EXPRESSION)
expr_stream = TokenStream(tokenize_with_expression(stream.current.value))
# A dictionary to help handle duplicate keywords.
args = {}
while expr_stream.current.type != TOKEN_EOF:
key, expr = self.parse_argument(expr_stream)
args[key] = expr
if expr_stream.current.type == TOKEN_COMMA:
expr_stream.next_token() # Eat comma
stream.next_token()
block = self.parser.parse_block(stream, (TAG_ENDWITH, TOKEN_EOF))
expect(stream, TOKEN_TAG, value=TAG_ENDWITH)
return WithNode(tok=tok, args=args, block=block)
# pylint: disable=no-self-use
def parse_argument(self, stream: TokenStream) -> WithKeywordArg:
"""Parse a keyword argument from a stream of tokens."""
key = str(parse_unchained_identifier(stream))
stream.next_token()
expect(stream, TOKEN_COLON)
stream.next_token() # Eat colon
val = parse_expression(stream)
stream.next_token()
return WithKeywordArg(key, val)
```
#### File: liquid-extra/tests/test_if_not_tag.py
```python
from unittest import TestCase
from typing import Mapping
from typing import NamedTuple
from typing import Any
from liquid.context import Context
from liquid.environment import Environment
from liquid.loaders import DictLoader
from liquid.stream import TokenStream
from liquid import Mode
from liquid.exceptions import Error
from liquid.expression import BooleanExpression
from liquid.expression import IdentifierPathElement
from liquid.expression import Identifier
from liquid.expression import Boolean
from liquid.expression import StringLiteral
from liquid.expression import IntegerLiteral
from liquid.expression import FloatLiteral
from liquid.expression import PrefixExpression
from liquid.expression import InfixExpression
from liquid.golden.if_tag import cases as golden_cases
from liquid.token import Token
from liquid.token import TOKEN_IDENTIFIER
from liquid.token import TOKEN_STRING
from liquid.token import TOKEN_INTEGER
from liquid.token import TOKEN_NIL
from liquid.token import TOKEN_TRUE
from liquid.token import TOKEN_FALSE
from liquid.token import TOKEN_CONTAINS
from liquid.token import TOKEN_DOT
from liquid.token import TOKEN_AND
from liquid.token import TOKEN_OR
from liquid.token import TOKEN_EQ
from liquid.token import TOKEN_NE
from liquid.token import TOKEN_LG
from liquid.token import TOKEN_GT
from liquid.token import TOKEN_LE
from liquid.token import TOKEN_GE
from liquid.token import TOKEN_LPAREN
from liquid.token import TOKEN_RPAREN
from liquid.token import TOKEN_RANGE
from liquid_extra.tags.if_not import tokenize_boolean_not_expression
from liquid_extra.tags.if_not import NotExpressionParser
from liquid_extra.tags.if_not import NotPrefixExpression
from liquid_extra.tags.if_not import TOKEN_NOT
from liquid_extra.tags.if_not import TOKEN_RANGELPAREN
from liquid_extra.tags import IfNotTag
class LexerCase(NamedTuple):
description: str
source: str
expect: Any
class ParserCase(NamedTuple):
description: str
expression: str
expect: Any
class EvalCase(NamedTuple):
description: str
context: Mapping
expression: str
expect: Any
class RenderCase(NamedTuple):
description: str
template: str
expect: str
globals: Mapping[str, Any] = {}
partials: Mapping[str, Any] = {}
class BooleanLexerTestCase(TestCase):
def test_lex_boolean_expression(self):
"""Test that we can tokenize comparison expressions."""
test_cases = [
LexerCase(
"literal boolean",
"false == true",
[
Token(1, TOKEN_FALSE, "false"),
Token(1, TOKEN_EQ, "=="),
Token(1, TOKEN_TRUE, "true"),
],
),
LexerCase(
"not nil identifier",
"user != nil",
[
Token(1, TOKEN_IDENTIFIER, "user"),
Token(1, TOKEN_NE, "!="),
Token(1, TOKEN_NIL, "nil"),
],
),
LexerCase(
"alternate not nil",
"user <> nil",
[
Token(1, TOKEN_IDENTIFIER, "user"),
Token(1, TOKEN_LG, "<>"),
Token(1, TOKEN_NIL, "nil"),
],
),
LexerCase(
"identifier equals string literal",
"user.name == 'brian'",
[
Token(1, TOKEN_IDENTIFIER, "user"),
Token(1, TOKEN_DOT, "."),
Token(1, TOKEN_IDENTIFIER, "name"),
Token(1, TOKEN_EQ, "=="),
Token(1, TOKEN_STRING, "brian"),
],
),
LexerCase(
"equality with or",
"user.name == 'bill' or user.name == 'bob'",
[
Token(1, TOKEN_IDENTIFIER, "user"),
Token(1, TOKEN_DOT, "."),
Token(1, TOKEN_IDENTIFIER, "name"),
Token(1, TOKEN_EQ, "=="),
Token(1, TOKEN_STRING, "bill"),
Token(1, TOKEN_OR, "or"),
Token(1, TOKEN_IDENTIFIER, "user"),
Token(1, TOKEN_DOT, "."),
Token(1, TOKEN_IDENTIFIER, "name"),
Token(1, TOKEN_EQ, "=="),
Token(1, TOKEN_STRING, "bob"),
],
),
LexerCase(
"equality with and",
"user.name == 'bob' and user.age > 45",
[
Token(1, TOKEN_IDENTIFIER, "user"),
Token(1, TOKEN_DOT, "."),
Token(1, TOKEN_IDENTIFIER, "name"),
Token(1, TOKEN_EQ, "=="),
Token(1, TOKEN_STRING, "bob"),
Token(1, TOKEN_AND, "and"),
Token(1, TOKEN_IDENTIFIER, "user"),
Token(1, TOKEN_DOT, "."),
Token(1, TOKEN_IDENTIFIER, "age"),
Token(1, TOKEN_GT, ">"),
Token(1, TOKEN_INTEGER, "45"),
],
),
LexerCase(
"greater than or equal to integer literal",
"user.age >= 21",
[
Token(1, TOKEN_IDENTIFIER, "user"),
Token(1, TOKEN_DOT, "."),
Token(1, TOKEN_IDENTIFIER, "age"),
Token(1, TOKEN_GE, ">="),
Token(1, TOKEN_INTEGER, "21"),
],
),
LexerCase(
"less than or equal to integer literal",
"user.age <= 21",
[
Token(1, TOKEN_IDENTIFIER, "user"),
Token(1, TOKEN_DOT, "."),
Token(1, TOKEN_IDENTIFIER, "age"),
Token(1, TOKEN_LE, "<="),
Token(1, TOKEN_INTEGER, "21"),
],
),
LexerCase(
"identifier contains string",
"product.tags contains 'sale'",
[
Token(1, TOKEN_IDENTIFIER, "product"),
Token(1, TOKEN_DOT, "."),
Token(1, TOKEN_IDENTIFIER, "tags"),
Token(1, TOKEN_CONTAINS, "contains"),
Token(1, TOKEN_STRING, "sale"),
],
),
LexerCase(
"literal boolean not true",
"false == not true",
[
Token(1, TOKEN_FALSE, "false"),
Token(1, TOKEN_EQ, "=="),
Token(1, TOKEN_NOT, "not"),
Token(1, TOKEN_TRUE, "true"),
],
),
LexerCase(
"literal boolean not false",
"false == not false",
[
Token(1, TOKEN_FALSE, "false"),
Token(1, TOKEN_EQ, "=="),
Token(1, TOKEN_NOT, "not"),
Token(1, TOKEN_FALSE, "false"),
],
),
LexerCase(
"parens",
"(false and false)",
[
Token(1, TOKEN_LPAREN, "("),
Token(1, TOKEN_FALSE, "false"),
Token(1, TOKEN_AND, "and"),
Token(1, TOKEN_FALSE, "false"),
Token(1, TOKEN_RPAREN, ")"),
],
),
LexerCase(
"range literals",
"(1..3) == (1..3)",
[
Token(1, TOKEN_RANGELPAREN, "("),
Token(1, TOKEN_INTEGER, "1"),
Token(1, TOKEN_RANGE, ".."),
Token(1, TOKEN_INTEGER, "3"),
Token(1, TOKEN_RPAREN, ")"),
Token(1, TOKEN_EQ, "=="),
Token(1, TOKEN_RANGELPAREN, "("),
Token(1, TOKEN_INTEGER, "1"),
Token(1, TOKEN_RANGE, ".."),
Token(1, TOKEN_INTEGER, "3"),
Token(1, TOKEN_RPAREN, ")"),
],
),
]
for case in test_cases:
with self.subTest(msg=case.description):
tokens = list(tokenize_boolean_not_expression(case.source))
self.assertTrue(len(tokens) == len(case.expect))
for got, want in zip(tokens, case.expect):
self.assertEqual(got, want)
class BooleanExpressionParserTestCase(TestCase):
"""Liquid expression parser test cases."""
def _test(self, test_cases, lex_func, parse_func):
"""Helper method for testing lists of Cases."""
for case in test_cases:
with self.subTest(msg=case.description):
tokens = TokenStream(lex_func(case.expression))
expr = parse_func(tokens)
self.assertEqual(expr, case.expect)
def test_parse_boolean_expression(self):
"""Test that we can parse boolean expressions."""
test_cases = [
ParserCase(
"string literal double quotes",
'"foobar"',
BooleanExpression(
expression=StringLiteral("foobar"),
),
),
ParserCase(
"integer literal",
"7",
BooleanExpression(
expression=IntegerLiteral(7),
),
),
ParserCase(
"negative integer literal statement expression",
"-7",
BooleanExpression(
expression=PrefixExpression(
"-",
right=IntegerLiteral(7),
),
),
),
ParserCase(
"float literal statement expression",
"3.14",
BooleanExpression(
expression=FloatLiteral(3.14),
),
),
ParserCase(
"negative float literal statement expression",
"-3.14",
BooleanExpression(
expression=NotPrefixExpression(
"-",
right=FloatLiteral(3.14),
),
),
),
ParserCase(
"single identifier statement expression",
"collection",
BooleanExpression(
expression=Identifier(
path=[IdentifierPathElement("collection")],
),
),
),
ParserCase(
"chained identifier",
"collection.products",
BooleanExpression(
expression=Identifier(
path=[
IdentifierPathElement("collection"),
IdentifierPathElement("products"),
],
),
),
),
ParserCase(
"keyword true",
"true",
BooleanExpression(
expression=Boolean(True),
),
),
ParserCase(
"keyword false",
"false",
BooleanExpression(
expression=Boolean(False),
),
),
ParserCase(
"boolean equality",
"true == true",
BooleanExpression(
expression=InfixExpression(
left=Boolean(True),
operator="==",
right=Boolean(True),
),
),
),
ParserCase(
"boolean inequality",
"true != false",
BooleanExpression(
expression=InfixExpression(
left=Boolean(True),
operator="!=",
right=Boolean(False),
),
),
),
ParserCase(
"boolean inequality alternate",
"true <> false",
BooleanExpression(
expression=InfixExpression(
left=Boolean(True),
operator="<>",
right=Boolean(False),
),
),
),
ParserCase(
"identifier greater than literal",
"user.age > 21",
BooleanExpression(
expression=InfixExpression(
left=Identifier(
path=[
IdentifierPathElement("user"),
IdentifierPathElement("age"),
],
),
operator=">",
right=IntegerLiteral(21),
),
),
),
ParserCase(
"identifier less than literal",
"age < 18",
BooleanExpression(
expression=InfixExpression(
left=Identifier(
path=[IdentifierPathElement("age")],
),
operator="<",
right=IntegerLiteral(18),
),
),
),
ParserCase(
"identifier less than or equal to literal",
"age <= 18",
BooleanExpression(
expression=InfixExpression(
left=Identifier(
path=[IdentifierPathElement("age")],
),
operator="<=",
right=IntegerLiteral(18),
),
),
),
ParserCase(
"identifier greater than or equal to literal",
"age >= 18",
BooleanExpression(
expression=InfixExpression(
left=Identifier(
path=[IdentifierPathElement("age")],
),
operator=">=",
right=IntegerLiteral(18),
),
),
),
ParserCase(
"boolean or boolean",
"true or false",
BooleanExpression(
expression=InfixExpression(
left=Boolean(True),
operator="or",
right=Boolean(False),
),
),
),
ParserCase(
"identifier contains string",
"product.tags contains 'sale'",
BooleanExpression(
expression=InfixExpression(
left=Identifier(
path=[
IdentifierPathElement("product"),
IdentifierPathElement("tags"),
],
),
operator="contains",
right=StringLiteral("sale"),
),
),
),
ParserCase(
"not true",
"not true",
BooleanExpression(
expression=NotPrefixExpression(
"not",
right=Boolean(True),
),
),
),
ParserCase(
"right associative",
"true and false and false or true",
BooleanExpression(
expression=InfixExpression(
left=Boolean(True),
operator="and",
right=InfixExpression(
left=Boolean(False),
operator="and",
right=InfixExpression(
left=Boolean(False),
operator="or",
right=Boolean(True),
),
),
),
),
),
ParserCase(
"grouped boolean",
"(true and false and false) or true",
BooleanExpression(
expression=InfixExpression(
left=InfixExpression(
left=Boolean(True),
operator="and",
right=InfixExpression(
left=Boolean(False),
operator="and",
right=Boolean(False),
),
),
operator="or",
right=Boolean(True),
),
),
),
ParserCase(
"parens",
"(true and false and false)",
BooleanExpression(
expression=InfixExpression(
left=Boolean(True),
operator="and",
right=InfixExpression(
left=Boolean(False),
operator="and",
right=Boolean(False),
),
),
),
),
]
expression_parser = NotExpressionParser()
self._test(
test_cases,
tokenize_boolean_not_expression,
expression_parser.parse_boolean_expression,
)
class BooleanExpressionEvalTestCase(TestCase):
"""Boolean expression evaluator test cases."""
def _test(self, test_cases, lex_func, parse_func):
"""Utility method for evaluating lists of test cases."""
env = Environment()
for case in test_cases:
context = Context(env, case.context)
with self.subTest(msg=case.description):
tokens = TokenStream(lex_func(case.expression))
expr = parse_func(tokens)
res = expr.evaluate(context)
self.assertEqual(res, case.expect)
def test_eval_boolean_expression(self):
"""Test that we can evaluate boolean expressions."""
test_cases = [
EvalCase(
description="true literal",
context={},
expression="true",
expect=True,
),
EvalCase(
description="false literal",
context={},
expression="false",
expect=False,
),
EvalCase(
description="string literal",
context={},
expression="'some'",
expect=True,
),
EvalCase(
description="empty string",
context={},
expression="''",
expect=True,
),
EvalCase(
description="negative integer",
context={},
expression="-7",
expect=True,
),
EvalCase(
description="truthy identifier",
context={"collection": {"title": "foo"}},
expression="collection.title",
expect=True,
),
EvalCase(
description="falsey identifier",
context={"collection": {"title": "foo"}},
expression="collection.tags",
expect=False,
),
EvalCase(
description="truthy comparision",
context={"user": {"age": 21}},
expression="user.age >= 21",
expect=True,
),
EvalCase(
description="not equal comparision",
context={"user": {"age": 21}},
expression="user.age != 21",
expect=False,
),
EvalCase(
description="truthy comparision and logic operator",
context={
"user": {"age": 20},
"collection": {
"tags": [
"safe",
]
},
},
expression="user.age >= 21 or collection.tags contains 'safe'",
expect=True,
),
EvalCase(
description="boolean with logic operators",
context={},
expression="true and false and false or true",
expect=False,
),
EvalCase(
description="empty array",
context={"a": {"array": []}},
expression="a.array == empty",
expect=True,
),
EvalCase(
description="empty object",
context={"a": {"obj": {}}},
expression="a.obj == empty",
expect=True,
),
EvalCase(
description="not empty array",
context={"a": {"array": [1, 2]}},
expression="a.array == empty",
expect=False,
),
EvalCase(
description="not empty object",
context={"a": {"obj": {"foo": "bar"}}},
expression="a.obj == empty",
expect=False,
),
EvalCase(
description="invalid comparison to empty",
context={"a": {"foo": 1}},
expression="a.foo == empty",
expect=False,
),
EvalCase(
description="empty equals empty",
context={},
expression="empty == empty",
expect=True,
),
EvalCase(
description="empty not equals true",
context={},
expression="empty != true",
expect=True,
),
EvalCase(
description="nil equals nil",
context={},
expression="nil == nil",
expect=True,
),
EvalCase(
description="string contains string",
context={},
expression="'hello' contains 'ell'",
expect=True,
),
EvalCase(
description="string contains int",
context={},
expression="'hel1lo' contains 1",
expect=True,
),
EvalCase(
description="string not equal int",
context={},
expression="'hello' != 1",
expect=True,
),
EvalCase(
description="array contains",
context={"foo": [1, 2, 4]},
expression="foo contains 2",
expect=True,
),
EvalCase(
description="array does not contain",
context={"foo": [1, 2, 4]},
expression="foo contains 3",
expect=False,
),
EvalCase(
description="int equals",
context={},
expression="1 == 1",
expect=True,
),
EvalCase(
description="int less than",
context={},
expression="1 < 2",
expect=True,
),
EvalCase(
description="int less than or equal",
context={},
expression="1 <= 1",
expect=True,
),
EvalCase(
description="int greater than",
context={},
expression="1 > 0",
expect=True,
),
EvalCase(
description="int greater than or equal",
context={},
expression="1 >= 1",
expect=True,
),
EvalCase(
description="true equals true",
context={},
expression="true == true",
expect=True,
),
EvalCase(
description="true equal false",
context={},
expression="true == false",
expect=False,
),
EvalCase(
description="true not equal false",
context={},
expression="true != false",
expect=True,
),
EvalCase(
description="string equals int",
context={},
expression="'2' == 2",
expect=False,
),
EvalCase(
description="empty string is truthy",
context={},
expression="''",
expect=True,
),
EvalCase(
description="empty string and string is truthy",
context={},
expression="'' and 'foo'",
expect=True,
),
EvalCase(
description="float equals int",
context={},
expression="1 == 1.0",
expect=True,
),
EvalCase(
description="not true literal",
context={},
expression="not true",
expect=False,
),
EvalCase(
description="not false literal",
context={},
expression="not false",
expect=True,
),
EvalCase(
description="not nil literal",
context={},
expression="not nil",
expect=True,
),
EvalCase(
description="not empty",
context={},
expression="not empty",
expect=False,
),
EvalCase(
description="not string literal",
context={},
expression="not 'some'",
expect=False,
),
EvalCase(
description="not empty string",
context={},
expression="not ''",
expect=False,
),
EvalCase(
description="boolean with logic not operators",
context={},
expression="true and not false",
expect=True,
),
EvalCase(
description="grouped boolean with logic operators",
context={},
expression="(true and false and false) or true",
expect=True,
),
EvalCase(
description="nested grouped boolean with logic operators",
context={},
expression="((true or false) or (false)) and true",
expect=True,
),
EvalCase(
description="grouped boolean with not",
context={},
expression="(true and false and false) or not true",
expect=False,
),
EvalCase(
description="range literal equals range literal",
context={},
expression="(1..3) == (1..3)",
expect=True,
),
]
expression_parser = NotExpressionParser()
self._test(
test_cases,
tokenize_boolean_not_expression,
expression_parser.parse_boolean_expression,
)
class BooleanRenderTestCases(TestCase):
"""Test cases for testing template renders."""
def _test(self, test_cases):
"""Helper method for testing lists of test cases."""
for case in test_cases:
env = Environment(loader=DictLoader(case.partials))
env.add_tag(IfNotTag)
template = env.from_string(case.template, globals=case.globals)
with self.subTest(msg=case.description):
result = template.render()
self.assertEqual(result, case.expect)
def test_if_not_tag(self):
"""Test that we can render `if` tags."""
test_cases = [
RenderCase(
description="condition with literal consequence",
template=r"{% if product.title == 'foo' %}bar{% endif %}",
expect="bar",
globals={"product": {"title": "foo"}},
),
RenderCase(
description=(
"condition with literal consequence and literal alternative"
),
template=(
r"{% if product.title == 'hello' %}bar{% else %}baz{% endif %}"
),
expect="baz",
globals={"product": {"title": "foo"}},
),
RenderCase(
description="condition with conditional alternative",
template=(
r"{% if product.title == 'hello' %}"
r"foo"
r"{% elsif product.title == 'foo' %}"
r"bar"
r"{% endif %}"
),
expect="bar",
globals={"product": {"title": "foo"}},
),
RenderCase(
description=(
"condition with conditional alternative and final alternative"
),
template=(
r"{% if product.title == 'hello' %}"
r"foo"
r"{% elsif product.title == 'goodbye' %}"
r"bar"
r"{% else %}"
r"baz"
r"{% endif %}"
),
expect="baz",
globals={"product": {"title": "foo"}},
),
RenderCase(
description="truthy-ness of a dictionary",
template=r"{% if product %}bar{% else %}foo{% endif %}",
expect="bar",
globals={"product": {"title": "foo"}},
),
RenderCase(
description="falsey-ness of a literal nil",
template=r"{% if nil %}bar{% else %}foo{% endif %}",
expect="foo",
),
RenderCase(
description="falsey-ness of a non existant name",
template=r"{% if nosuchthing %}bar{% else %}foo{% endif %}",
expect="foo",
),
RenderCase(
description="nested condition in the consequence block",
template=(
r"{% if product %}"
r"{% if title == 'Hello' %}baz{% endif %}"
r"{% endif %}"
),
expect="baz",
globals={
"product": {"title": "foo"},
"title": "Hello",
},
),
RenderCase(
description="nested condition, alternative in the consequence block",
template=(
r"{% if product %}"
r"{% if title == 'goodbye' %}"
r"baz"
r"{% else %}"
r"hello"
r"{% endif %}"
r"{% endif %}"
),
expect="hello",
globals={"product": {"title": "foo"}, "title": "Hello"},
),
RenderCase(
description="false",
template=r"{% if false %}{% endif %}",
expect="",
),
RenderCase(
description="contains condition",
template=r"{% if product.tags contains 'garden' %}baz{% endif %}",
expect="baz",
globals={"product": {"tags": ["sports", "garden"]}},
),
RenderCase(
description="not equal condition",
template=r"{% if product.title != 'foo' %}baz{% endif %}",
expect="",
globals={"product": {"title": "foo"}},
),
RenderCase(
description="alternate not equal condition",
template=r"{% if product.title <> 'foo' %}baz{% endif %}",
expect="",
globals={"product": {"title": "foo"}},
),
RenderCase(
description="blank empty 'if'",
template=r"{% if true %} {% elsif false %} {% else %} {% endif %}",
expect="",
),
RenderCase(
description="blank nested block",
template=(
r"{% if true %} "
r"{% comment %} this is blank {% endcomment %} "
r"{% endif %}"
),
expect="",
),
RenderCase(
description="not false",
template=r"{% if not false %}foo{% endif %}",
expect="foo",
),
RenderCase(
description="not true",
template=r"{% if not true %}foo{% endif %}",
expect="",
),
RenderCase(
description="literal boolean filter",
template=r"{{ false | default: true }}",
expect="true",
),
RenderCase(
description="not comparison to empty",
template=r"{% if not '' == empty %}foo{% endif %}",
expect="",
),
RenderCase(
description="not contains",
template=r"{% if not foo contains 'z' %}bar{% endif %}",
expect="bar",
globals={"foo": ["a", "b", "c"]},
),
RenderCase(
description="and not",
template=r"{% if not foo and not bar %}hello{% endif %}",
expect="hello",
globals={"foo": False, "bar": False},
),
RenderCase(
description="true and not",
template=r"{% if foo and not bar %}hello{% endif %}",
expect="hello",
globals={"foo": True, "bar": False},
),
RenderCase(
description="not equals",
template=r"{% if not foo == True %}hello{% endif %}",
expect="hello",
globals={"foo": False},
),
RenderCase(
description="not not equals False",
template=r"{% if not foo != true %}hello{% endif %}",
expect="",
globals={"foo": False},
),
RenderCase(
description="not not equals true",
template=r"{% if not foo != true %}hello{% endif %}",
expect="hello",
globals={"foo": True},
),
RenderCase(
description="not contains with parens",
template=r"{% if not (foo contains 'z') %}bar{% endif %}",
expect="bar",
globals={"foo": ["a", "b", "c"]},
),
]
self._test(test_cases)
def test_golden_if(self):
"""Test liquid's golden test cases."""
for case in golden_cases:
env = Environment(
loader=DictLoader(case.partials),
tolerance=Mode.STRICT,
)
with self.subTest(msg=case.description):
if case.error:
with self.assertRaises(Error):
template = env.from_string(case.template, globals=case.globals)
result = template.render()
else:
template = env.from_string(case.template, globals=case.globals)
result = template.render()
self.assertEqual(result, case.expect)
```
#### File: liquid-extra/tests/test_json_filter.py
```python
from dataclasses import dataclass
from dataclasses import is_dataclass
from dataclasses import asdict
from liquid.exceptions import FilterArgumentError
from liquid_extra.filters import JSON
from .base import FilterTestCase
from .base import RenderFilterTestCase
from .base import Case
from .base import RenderCase
@dataclass
class MockData:
length: int
width: int
class JSONFilterTestCase(FilterTestCase):
"""Test the JSON template filter."""
def test_json_filter(self):
test_cases = [
Case(
description="serialize a string",
val="hello",
args=[],
kwargs={},
expect='"hello"',
),
Case(
description="serialize an int",
val=42,
args=[],
kwargs={},
expect="42",
),
Case(
description="serialize a dict with list",
val={"foo": [1, 2, 3]},
args=[],
kwargs={},
expect='{"foo": [1, 2, 3]}',
),
Case(
description="serialize an arbitrary object",
val={"foo": MockData(3, 4)},
args=[],
kwargs={},
expect=FilterArgumentError,
),
]
self.env.add_filter(JSON.name, JSON())
self._test(self.ctx.filter(JSON.name), test_cases)
def test_json_with_encoder_func(self):
test_cases = [
Case(
description="serialize a dataclass",
val={"foo": MockData(3, 4)},
args=[],
kwargs={},
expect=r'{"foo": {"length": 3, "width": 4}}',
),
Case(
description="serialize an arbitrary object",
val={"foo": object()},
args=[],
kwargs={},
expect=FilterArgumentError,
),
]
def default(obj):
if is_dataclass(obj):
return asdict(obj)
raise TypeError(f"can't serialize object {obj}")
self.env.add_filter(JSON.name, JSON(default=default))
self._test(self.ctx.filter(JSON.name), test_cases)
class RenderJSONFilterTestCase(RenderFilterTestCase):
"""Test the JSON filter from a template."""
def test_render_json_filter(self):
test_cases = [
RenderCase(
description="render a string literal as JSON",
template=r"{{ 'hello' | json }}",
expect='"hello"',
globals={},
partials={},
),
RenderCase(
description="render a tuple of ints",
template=r"{{ dat | json }}",
expect="[1, 2, 3]",
globals={"dat": (1, 2, 3)},
partials={},
),
]
self._test(JSON(), test_cases)
def test_render_json_with_encoder_func(self):
test_cases = [
RenderCase(
description="render an arbitrary object as JSON",
template=r"{{ foo | json }}",
expect=FilterArgumentError,
globals={"foo": object()},
partials={},
),
RenderCase(
description="render a dataclass as JSON",
template=r"{{ foo | json }}",
expect=r'{"length": 3, "width": 4}',
globals={"foo": MockData(3, 4)},
partials={},
),
]
def default(obj):
if is_dataclass(obj):
return asdict(obj)
raise TypeError(f"can't serialize object {obj}")
self._test(JSON(default=default), test_cases)
```
#### File: liquid-extra/tests/test_t_filter.py
```python
from liquid_extra.filters import Translate
from .base import FilterTestCase
from .base import RenderFilterTestCase
from .base import Case
from .base import RenderCase
mock_locales = {
"default": {
"layout": {
"greeting": r"Hello {{ name }}",
},
"cart": {
"general": {
"title": "Shopping Basket",
},
},
"pagination": {
"next": "Next Page",
},
},
"de": {
"layout": {
"greeting": r"Hallo {{ name }}",
},
"cart": {
"general": {
"title": "Warenkorb",
},
},
"pagination": {
"next": "Nächste Seite",
},
},
}
class TranslateFilterTestCase(FilterTestCase):
"""Test the Translate template filter."""
def test_translate_filter(self):
test_cases = [
Case(
description="default locale",
val="cart.general.title",
args=[],
kwargs={},
expect="Shopping Basket",
),
Case(
description="default locale missing key",
val="foo.bar",
args=[],
kwargs={},
expect="foo.bar",
),
Case(
description="default locale not a string",
val=42,
args=[],
kwargs={},
expect="42",
),
Case(
description="default locale interpopulate",
val="layout.greeting",
args=[],
kwargs={"name": "World"},
expect="Hello World",
),
]
self.env.add_filter(Translate.name, Translate(locales=mock_locales))
self._test(self.ctx.filter(Translate.name), test_cases)
def test_translate_filter_with_locale(self):
test_cases = [
Case(
description="de locale",
val="cart.general.title",
args=[],
kwargs={},
expect="Warenkorb",
),
Case(
description="de locale missing key",
val="foo.bar",
args=[],
kwargs={},
expect="foo.bar",
),
Case(
description="de locale not a string",
val=42,
args=[],
kwargs={},
expect="42",
),
Case(
description="de locale interpopulate",
val="layout.greeting",
args=[],
kwargs={"name": "Welt"},
expect="Hallo Welt",
),
]
self.env.add_filter(Translate.name, Translate(locales=mock_locales))
self.ctx.assign("locale", "de")
self._test(self.ctx.filter(Translate.name), test_cases)
class RenderTranslateFilterTestCase(RenderFilterTestCase):
"""Test the Translate filter from a template."""
def test_render_translate_filter(self):
test_cases = [
RenderCase(
description="default locale",
template=r"{{ 'cart.general.title' | t }}",
expect="Shopping Basket",
globals={},
partials={},
),
RenderCase(
description="default locale missing key",
template=r"{{ 'foobar' | t }}",
expect="foobar",
globals={},
partials={},
),
RenderCase(
description="default locale interpopulate",
template=r"{{ 'layout.greeting' | t: name: 'World' }}",
expect="Hello World",
globals={},
partials={},
),
RenderCase(
description="default locale interpopulate from context",
template=r"{{ 'layout.greeting' | t: name: user.name }}",
expect="Hello World",
globals={"user": {"name": "World"}},
partials={},
),
RenderCase(
description="de locale",
template=r"{{ 'cart.general.title' | t }}",
expect="Warenkorb",
globals={"locale": "de"},
partials={},
),
]
self._test(Translate(locales=mock_locales), test_cases)
```
#### File: liquid-extra/tests/test_with_tag.py
```python
from dataclasses import dataclass
from dataclasses import field
from typing import Dict
from unittest import TestCase
from liquid import Environment
from liquid_extra.tags import WithTag
@dataclass
class Case:
"""Table driven test helper."""
description: str
template: str
expect: str
globals: Dict[str, object] = field(default_factory=dict)
class RenderWithTagTestCase(TestCase):
def test_render_with_tag(self):
"""Test that we can render a `with` tag."""
test_cases = [
Case(
description="block scoped variable",
template=r"{{ x }}{% with x: 'foo' %}{{ x }}{% endwith %}{{ x }}",
expect="foo",
),
Case(
description="block scoped alias",
template=(
r"{% with p: collection.products.first %}"
r"{{ p.title }}"
r"{% endwith %}"
r"{{ p.title }}"
r"{{ collection.products.first.title }}"
),
expect="A ShoeA Shoe",
globals={"collection": {"products": [{"title": "A Shoe"}]}},
),
Case(
description="multiple block scoped variables",
template=(
r"{% with a: 1, b: 3.4 %}"
r"{{ a }} + {{ b }} = {{ a | plus: b }}"
r"{% endwith %}"
),
expect="1 + 3.4 = 4.4",
),
]
env = Environment()
env.add_tag(WithTag)
for case in test_cases:
with self.subTest(msg=case.description):
template = env.from_string(case.template, globals=case.globals)
self.assertEqual(template.render(), case.expect)
```
|
{
"source": "jg-rp/liquid",
"score": 3
}
|
#### File: builtin/filters/math.py
```python
import math
import decimal
from typing import Optional
from typing import Union
from liquid.context import is_undefined
from liquid.exceptions import FilterArgumentError
from liquid.filter import math_filter
from liquid.filter import num_arg
from liquid.filter import int_arg
DecimalT = decimal.Decimal
NumberT = Union[float, int]
@math_filter
def abs_(num: NumberT) -> NumberT:
"""Return the absolute value of a number.
Accepts an int, float or a string representations of an int or float.
"""
return abs(num)
@math_filter
def at_most(num: NumberT, other: NumberT) -> NumberT:
"""Return `val` or `args[0]`, whichever is smaller.
Accepts an int, float or a string representations of an int or float.
"""
other = num_arg(other, default=0)
return min(num, other)
@math_filter
def at_least(num: NumberT, other: NumberT) -> NumberT:
"""Return `val` or `args[0]`, whichever is greater.
Accepts an int, float or a string representations of an int or float.
"""
other = num_arg(other, default=0)
return max(num, other)
@math_filter
def ceil(num: NumberT) -> NumberT:
"""Return the ceiling of x as an Integral.
Accepts an int, float or a string representations of an int or float.
"""
return math.ceil(num)
@math_filter
def divided_by(num: NumberT, other: object) -> NumberT:
"""Divide `num` by `other`."""
other = num_arg(other, default=0)
try:
if isinstance(other, int):
return num // other
return num / other
except ZeroDivisionError as err:
raise FilterArgumentError(f"divided_by: can't divide by {other}") from err
@math_filter
def floor(num: NumberT) -> NumberT:
"""Return the floor of x as an Integral.
Accepts an int, float or a string representations of an int or float.
"""
return math.floor(num)
@math_filter
def minus(num: NumberT, other: NumberT) -> NumberT:
"""Subtract one number from another."""
other = num_arg(other, default=0)
if isinstance(num, int) and isinstance(other, int):
return num - other
return float(DecimalT(str(num)) - DecimalT(str(other)))
@math_filter
def plus(num: NumberT, other: NumberT) -> NumberT:
"""Add one number to another."""
other = num_arg(other, default=0)
if isinstance(num, int) and isinstance(other, int):
return num + other
return float(DecimalT(str(num)) + DecimalT(str(other)))
@math_filter
def round_(num: NumberT, ndigits: Optional[int] = None) -> NumberT:
"""Round a number to a given precision in decimal digits."""
if ndigits or is_undefined(ndigits):
ndigits = int_arg(ndigits, default=0)
return round(num, ndigits)
return round(num)
@math_filter
def times(num: NumberT, other: NumberT) -> NumberT:
"""Multiply a value by an integer or float."""
other = num_arg(other, default=0)
if isinstance(num, int) and isinstance(other, int):
return num * other
return float(DecimalT(str(num)) * DecimalT(str(other)))
@math_filter
def modulo(num: NumberT, other: NumberT) -> NumberT:
"""Divide a value by a number and returns the remainder."""
other = num_arg(other, default=0)
try:
if isinstance(num, int) and isinstance(other, int):
return num % other
return float(DecimalT(str(num)) % DecimalT(str(other)))
except ZeroDivisionError as err:
raise FilterArgumentError(f"modulo: can't divide by {other}") from err
```
#### File: builtin/filters/misc.py
```python
from __future__ import annotations
import datetime
import functools
from typing import Any
from typing import Union
from typing import TYPE_CHECKING
from dateutil import parser
try:
from markupsafe import Markup
except ImportError:
from liquid.exceptions import Markup # type: ignore
from liquid import is_undefined
from liquid.filter import liquid_filter
from liquid.filter import with_environment
from liquid.exceptions import FilterArgumentError
from liquid.expression import EMPTY
if TYPE_CHECKING:
from liquid import Environment
@liquid_filter
def size(obj: Any) -> int:
"""Return the length of an array or string."""
try:
return len(obj)
except TypeError:
return 0
@liquid_filter
def default(obj: Any, default_: object = "", *, allow_false: bool = False) -> Any:
"""Return a default value if the input is nil, false, or empty."""
_obj = obj
if hasattr(obj, "__liquid__"):
_obj = obj.__liquid__()
if allow_false is True and _obj is False:
return obj
if _obj in (None, False, EMPTY) or is_undefined(_obj):
return default_
return obj
@with_environment
@liquid_filter
@functools.lru_cache(maxsize=10)
def date(
dat: Union[datetime.datetime, str], fmt: str, *, environment: Environment
) -> str:
"""Formats a datetime according the the given format string."""
if is_undefined(dat):
return ""
if is_undefined(fmt):
return str(dat)
if isinstance(dat, str):
if dat in ("now", "today"):
dat = datetime.datetime.now()
else:
dat = parser.parse(dat)
if not isinstance(dat, (datetime.datetime, datetime.date)):
raise FilterArgumentError(
f"date expected datetime.datetime, found {type(dat).__name__}"
)
rv = dat.strftime(fmt)
if environment.autoescape and isinstance(fmt, Markup):
return Markup(rv)
return rv
```
#### File: builtin/tags/include_tag.py
```python
import sys
from typing import Optional
from typing import Dict
from typing import Any
from typing import TextIO
from typing import Tuple
from liquid.ast import Node
from liquid.builtin.drops import IterableDrop
from liquid.context import Context
from liquid.expression import Expression
from liquid.expression import Identifier
from liquid.exceptions import LiquidSyntaxError
from liquid.lex import tokenize_include_expression
from liquid.stream import TokenStream
from liquid.tag import Tag
from liquid.parse import expect
from liquid.parse import parse_identifier
from liquid.parse import parse_expression
from liquid.parse import parse_string_or_identifier
from liquid.parse import parse_unchained_identifier
from liquid.token import Token
from liquid.token import TOKEN_TAG
from liquid.token import TOKEN_EXPRESSION
from liquid.token import TOKEN_IDENTIFIER
from liquid.token import TOKEN_WITH
from liquid.token import TOKEN_FOR
from liquid.token import TOKEN_AS
from liquid.token import TOKEN_COMMA
from liquid.token import TOKEN_COLON
from liquid.token import TOKEN_EOF
TAG_INCLUDE = sys.intern("include")
class IncludeNode(Node):
"""Parse tree node for the built-in "include" tag."""
__slots__ = ("tok", "name", "var", "alias", "args")
def __init__(
self,
tok: Token,
name: Expression,
var: Optional[Identifier] = None,
alias: Optional[str] = None,
args: Optional[Dict[str, Any]] = None,
):
self.tok = tok
self.name = name
self.var = var
self.alias = alias
self.args = args or {}
def __str__(self) -> str:
buf = [f"{self.name}"]
if self.var:
buf.append(f" with {self.var}")
if self.alias:
buf.append(f" as {self.alias}")
if self.args:
buf.append(", ")
args = (f"{key}={val}" for key, val in self.args.items())
buf.append(", ".join(args))
return f"include({''.join(buf)})"
def __repr__(self) -> str:
return f"IncludeNode(tok={self.tok!r}, name={self.name})" # pragma: no cover
def render_to_output(self, context: Context, buffer: TextIO) -> Optional[bool]:
name = self.name.evaluate(context)
template = context.get_template(str(name))
namespace: Dict[str, object] = {}
# Add any keyword arguments to the new template context.
for key, val in self.args.items():
namespace[key] = val.evaluate(context)
with context.extend(namespace):
# Bind a variable to the included template.
if self.var is not None:
val = self.var.evaluate(context)
key = self.alias or template.name.split(".")[0]
# If the variable is array-like, render the template once for each
# item in the array.
#
# The reference implementation does not seem to distinguish between
# "for" and "with". Just checks for array-like-ness.
if isinstance(val, (tuple, list, IterableDrop)):
# NOTE: What if an included template with a bound array updates
# a keyword argument value? Do we need to update namespace
# arguments after each loop?
#
# The reference implementation seems to evaluate arguments once,
# before the loop.
for itm in val:
namespace[key] = itm
template.render_with_context(context, buffer, partial=True)
else:
namespace[key] = val
template.render_with_context(context, buffer, partial=True)
else:
template.render_with_context(context, buffer, partial=True)
return True
async def render_to_output_async(
self, context: Context, buffer: TextIO
) -> Optional[bool]:
"""Same as ``render_to_output`` but uses async versions of get_template and
render_with_context."""
name = await self.name.evaluate_async(context)
template = await context.get_template_async(str(name))
namespace: Dict[str, object] = {}
for key, val in self.args.items():
namespace[key] = await val.evaluate_async(context)
with context.extend(namespace):
if self.var is not None:
val = await self.var.evaluate_async(context)
key = self.alias or template.name.split(".")[0]
if isinstance(val, (tuple, list, IterableDrop)):
for itm in val:
namespace[key] = itm
await template.render_with_context_async(
context, buffer, partial=True
)
else:
namespace[key] = val
await template.render_with_context_async(
context, buffer, partial=True
)
else:
await template.render_with_context_async(context, buffer, partial=True)
return True
class IncludeTag(Tag):
"""The built-in "include" tag."""
name = TAG_INCLUDE
block = False
def parse(self, stream: TokenStream) -> IncludeNode:
"""Read an IncludeNode from the given stream of tokens."""
expect(stream, TOKEN_TAG, value=self.name)
tok = stream.current
stream.next_token()
expect(stream, TOKEN_EXPRESSION)
expr_stream = TokenStream(tokenize_include_expression(stream.current.value))
# Need a string or identifier that resolves to a string. This is the name
# of the template to be included.
name = parse_string_or_identifier(expr_stream, linenum=tok.linenum)
expr_stream.next_token()
identifier: Optional[Identifier] = None
alias: Optional[str] = None
# Optionally bind a variable to the included template context
if expr_stream.current.type in (TOKEN_WITH, TOKEN_FOR):
expr_stream.next_token() # Eat 'with' or 'for'
expect(expr_stream, TOKEN_IDENTIFIER)
identifier = parse_identifier(expr_stream)
expr_stream.next_token()
# The bound variable will take the name of the template by default,
# or an alias if an identifier follows the "as" keyword.
if expr_stream.current.type == TOKEN_AS:
expr_stream.next_token() # Eat 'as'
expect(expr_stream, TOKEN_IDENTIFIER)
alias = str(parse_unchained_identifier(expr_stream))
expr_stream.next_token()
# Zero or more keyword arguments
args = {}
# The first keyword argument might follow immediately or after a comma.
if expr_stream.current.type == TOKEN_IDENTIFIER:
key, val = _parse_argument(expr_stream)
args[key] = val
while expr_stream.current.type != TOKEN_EOF:
if expr_stream.current.type == TOKEN_COMMA:
expr_stream.next_token() # Eat comma
key, val = _parse_argument(expr_stream)
args[key] = val
else:
typ = expr_stream.current.type
raise LiquidSyntaxError(
f"expected a comma separated list of arguments, found {typ}",
linenum=tok.linenum,
)
return IncludeNode(tok, name=name, var=identifier, alias=alias, args=args)
def _parse_argument(stream: TokenStream) -> Tuple[str, Expression]:
key = str(parse_unchained_identifier(stream))
stream.next_token()
expect(stream, TOKEN_COLON)
stream.next_token() # Eat colon
val = parse_expression(stream)
stream.next_token()
return key, val
```
#### File: builtin/tags/increment_tag.py
```python
import sys
from typing import Optional
from typing import TextIO
from liquid.ast import Node
from liquid.context import Context
from liquid.lex import tokenize_identifier
from liquid.stream import TokenStream
from liquid.tag import Tag
from liquid.parse import expect
from liquid.parse import parse_unchained_identifier
from liquid.token import Token
from liquid.token import TOKEN_TAG
from liquid.token import TOKEN_EXPRESSION
TAG_INCREMENT = sys.intern("increment")
class IncrementNode(Node):
"""Parse tree node for the built-in "increment" tag."""
__slots__ = ("tok", "identifier")
def __init__(self, tok: Token, identifier: str):
self.tok = tok
self.identifier = identifier
def __str__(self) -> str:
return f"{self.identifier} += 1"
def render_to_output(self, context: Context, buffer: TextIO) -> Optional[bool]:
buffer.write(str(context.increment(self.identifier)))
return True
class IncrementTag(Tag):
"""The built-in "increment" tag."""
name = TAG_INCREMENT
block = False
def parse(self, stream: TokenStream) -> IncrementNode:
expect(stream, TOKEN_TAG, value=TAG_INCREMENT)
tok = stream.current
stream.next_token()
expect(stream, TOKEN_EXPRESSION)
tokens = TokenStream(tokenize_identifier(stream.current.value))
ident = parse_unchained_identifier(tokens)
return IncrementNode(tok=tok, identifier=str(ident))
```
#### File: builtin/tags/tablerow_tag.py
```python
import math
import sys
from itertools import islice
from typing import Any
from typing import Dict
from typing import List
from typing import Mapping
from typing import Optional
from typing import TextIO
from typing import Iterator
from liquid.token import Token
from liquid.token import TOKEN_TAG
from liquid.token import TOKEN_EXPRESSION
from liquid.ast import Node
from liquid.ast import BlockNode
from liquid.context import Context
from liquid.expression import LoopExpression
from liquid.expression import NIL
from liquid.lex import tokenize_loop_expression
from liquid.parse import expect
from liquid.parse import parse_loop_expression
from liquid.parse import get_parser
from liquid.tag import Tag
from liquid.stream import TokenStream
TAG_TABLEROW = sys.intern("tablerow")
TAG_ENDTABLEROW = sys.intern("endtablerow")
# pylint: disable=too-many-instance-attributes
class TableRow(Mapping[str, object]):
"""Table row helper variables."""
__slots__ = (
"name",
"it",
"length",
"ncols",
"item",
"first",
"last",
"index",
"index0",
"rindex",
"rindex0",
"col",
"col0",
"col_first",
"col_last",
"_keys",
"row",
"nrows",
)
def __init__(self, name: str, it: Iterator[Any], length: int, ncols: int) -> None:
self.name = name
self.it = it
self.length = length
self.ncols = ncols
self.item = None
self.first = False
self.last = False
self.index = 0
self.index0 = -1
self.rindex = self.length + 1
self.rindex0 = self.length
self.col = 0
self.col0 = -1
self.col_first = True
self.col_last = False
# Zero based row counter is not exposed to templates.
self.row = 0
self.nrows = math.ceil(self.length / self.ncols)
self._keys: List[str] = [
"length",
"index",
"index0",
"rindex",
"rindex0",
"first",
"last",
"col",
"col0",
"col_first",
"col_last",
]
def __repr__(self) -> str: # pragma: no cover
return f"TableRow(name='{self.name}', length={self.length})"
def __getitem__(self, key: str) -> object:
if key in self._keys:
return getattr(self, key)
raise KeyError(key)
def __len__(self) -> int:
return len(self._keys)
def __iter__(self) -> Iterator[Any]:
return self
def __next__(self) -> object:
return next(self.it)
def step(self) -> None:
"""Set the value for the current/next loop iteration and update forloop
helper variables."""
self.index += 1
self.index0 += 1
self.rindex -= 1
self.rindex0 -= 1
if self.index0 == 0:
self.first = True
else:
self.first = False
if self.rindex0 == 0:
self.last = True
else:
self.last = False
self.col0 = self.index0 % self.ncols
self.col = self.col0 + 1
if self.col == 1:
self.col_first = True
else:
self.col_first = False
if self.col == self.ncols:
self.col_last = True
else:
self.col_last = False
if self.col == 1:
self.row += 1
class TablerowNode(Node):
"""Parse tree node for the built-in "tablerow" tag."""
__slots__ = ("tok", "expression", "block")
def __init__(
self,
tok: Token,
expression: LoopExpression,
block: BlockNode,
):
self.tok = tok
self.expression = expression
self.block = block
def __str__(self) -> str:
return f"tablerow({ self.expression }) {{ {self.block} }}"
def render_to_output(self, context: Context, buffer: TextIO) -> Optional[bool]:
name = self.expression.name
loop_iter, length = self.expression.evaluate(context)
if self.expression.cols and self.expression.cols != NIL:
cols = self.expression.cols.evaluate(context)
assert isinstance(cols, int)
else:
cols = length
loop_iter = grouper(loop_iter, cols)
tablerow = TableRow(name, loop_iter, length, cols)
namespace: Dict[str, object] = {
"tablerowloop": tablerow,
name: None,
}
with context.extend(namespace):
for i, row in enumerate(tablerow):
buffer.write(f'<tr class="row{i+1}">')
# Hack to mimic the odd newlines in the reference implementation.
if i == 0:
buffer.write("\n")
for j, itm in enumerate(row):
tablerow.step()
namespace[name] = itm
buffer.write(f'<td class="col{j+1}">')
self.block.render(context=context, buffer=buffer)
buffer.write("</td>")
# Newline as per reference implementation.
buffer.write("</tr>\n")
return True
async def render_to_output_async(
self, context: Context, buffer: TextIO
) -> Optional[bool]:
name = self.expression.name
loop_iter, length = await self.expression.evaluate_async(context)
if self.expression.cols and self.expression.cols != NIL:
cols = await self.expression.cols.evaluate_async(context)
assert isinstance(cols, int)
else:
cols = length
loop_iter = grouper(loop_iter, cols)
tablerow = TableRow(name, loop_iter, length, cols)
namespace: Dict[str, object] = {
"tablerowloop": tablerow,
name: None,
}
with context.extend(namespace):
for i, row in enumerate(tablerow):
buffer.write(f'<tr class="row{i+1}">')
if i == 0:
buffer.write("\n")
for j, itm in enumerate(row):
tablerow.step()
namespace[name] = itm
buffer.write(f'<td class="col{j+1}">')
await self.block.render_async(context=context, buffer=buffer)
buffer.write("</td>")
buffer.write("</tr>\n")
return True
class TablerowTag(Tag):
"""The built-in "tablerow" tag."""
name = TAG_TABLEROW
end = TAG_ENDTABLEROW
def parse(self, stream: TokenStream) -> TablerowNode:
parser = get_parser(self.env)
expect(stream, TOKEN_TAG, value=TAG_TABLEROW)
tok = stream.current
stream.next_token()
expect(stream, TOKEN_EXPRESSION)
expr_iter = tokenize_loop_expression(stream.current.value)
loop_expression = parse_loop_expression(TokenStream(expr_iter))
stream.next_token()
block = parser.parse_block(stream, (TAG_ENDTABLEROW,))
expect(stream, TOKEN_TAG, value=TAG_ENDTABLEROW)
return TablerowNode(tok, expression=loop_expression, block=block)
def grouper(iterator: Iterator[Any], n: int) -> Iterator[Any]:
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3) --> ABC DEF G"
return iter(lambda: tuple(islice(iterator, n)), ())
```
#### File: liquid/liquid/exceptions.py
```python
from typing import Any
from typing import Dict
from typing import Optional
from typing import Type
from typing import Union
from pathlib import Path
class Error(Exception):
"""Base class for all Liquid exceptions."""
def __init__(
self,
*args: object,
linenum: Optional[int] = None,
filename: Optional[Union[str, Path]] = None,
):
self.linenum = linenum
self.filename = filename
super().__init__(*args)
def __str__(self) -> str:
msg = super().__str__()
if self.linenum:
msg = f"{msg}, on line {self.linenum}"
if self.filename:
msg += f" of {self.filename}"
return msg
@property
def message(self) -> object:
"""Return the exception's error message if one was given."""
if self.args:
return self.args[0]
return None
class LiquidInterrupt(Exception):
"""Loop interrupt"""
class LiquidSyntaxError(Error):
"""Exception raised when there is a parser error."""
def __init__(
self,
*args: object,
linenum: Optional[int] = None,
filename: Optional[Union[str, Path]] = None,
):
super().__init__(*args, linenum=linenum, filename=filename)
self.source: Optional[str] = None
@property
def name(self) -> str:
"""Return the name of the template that raised this exception. Return an empty
string if a name is not available."""
if isinstance(self.filename, Path):
return self.filename.as_posix()
if self.filename:
return str(self.filename)
return ""
class LiquidTypeError(Error):
"""Exception raised when an error occurs at render time."""
class DisabledTagError(Error):
"""Exception raised when an attempt is made to render a disabled tag."""
class NoSuchFilterFunc(Error):
"""Exception raised when a filter lookup fails."""
class FilterError(Error):
"""Exception raised when a filter fails."""
class FilterArgumentError(Error):
"""Exception raised when a filters arguments are invalid."""
class FilterValueError(Error):
"""Exception raised when a filters value is invalid."""
class TemplateNotFound(Error):
"""Excpetions raised when a template could not be found."""
def __str__(self) -> str:
msg = super().__str__()
msg = f"template not found {msg}"
return msg
class ContextDepthError(Error):
"""Exception raised when the maximum context depth is reached.
Usually indicates recursive use of ``render`` or ``include`` tags.
"""
class UndefinedError(Error):
"""Exception raised by the StrictUndefined type."""
class BreakLoop(LiquidInterrupt):
"""Exception raised when a BreakNode is rendered."""
class ContinueLoop(LiquidInterrupt):
"""Exception raised when a ContinueNode is rendered."""
class LiquidWarning(UserWarning):
"""Base warning."""
class LiquidSyntaxWarning(LiquidWarning):
"""Replaces LiquidSyntaxError when in WARN mode."""
class LiquidTypeWarning(LiquidWarning):
"""Replaces LiquidTypeError when in WARN mode."""
class FilterWarning(LiquidWarning):
"""Replaces filter exceptions when in WARN mode."""
WARNINGS: Dict[Type[Error], Type[LiquidWarning]] = {
LiquidSyntaxError: LiquidSyntaxWarning,
LiquidTypeError: LiquidTypeWarning,
FilterArgumentError: FilterWarning,
NoSuchFilterFunc: FilterWarning,
}
def lookup_warning(exc: Type[Error]) -> Type[LiquidWarning]:
"""Return a warning equivalent of the given exception."""
return WARNINGS.get(exc, LiquidWarning)
def escape(val: Any) -> str:
"""A dummy escape function that always raises an exception."""
raise Error("autoescape requires Markupsafe to be installed")
class Markup(str):
"""A dummy markup class that always raises an exception."""
def __init__(self, _: object):
super().__init__()
raise Error("autoescape requires Markupsafe to be installed")
def join(self, _: object) -> str:
raise Error(
"autoescape requires Markupsafe to be installed"
) # pragma: no cover
# pylint: disable=no-self-use,missing-function-docstring
def unescape(self) -> str:
raise Error(
"autoescape requires Markupsafe to be installed"
) # pragma: no cover
def format(self, *args: Any, **kwargs: Any) -> str:
raise Error(
"autoescape requires Markupsafe to be installed"
) # pragma: no cover
```
#### File: liquid/liquid/lex.py
```python
from __future__ import annotations
import re
from functools import lru_cache
from functools import partial
from typing import Iterator
from typing import Tuple
from typing import Iterable
from typing import Collection
from typing import Pattern
from typing import Callable
from liquid.exceptions import LiquidSyntaxError
from liquid.token import TOKEN_ILLEGAL
from liquid.token import TOKEN_TAG
from liquid.token import TOKEN_EXPRESSION
from liquid.token import TOKEN_STATEMENT
from liquid.token import TOKEN_LITERAL
from liquid.token import TOKEN_IDENTIFIER
from liquid.token import TOKEN_STRING
from liquid.token import TOKEN_INTEGER
from liquid.token import TOKEN_FLOAT
from liquid.token import TOKEN_EMPTY
from liquid.token import TOKEN_NIL
from liquid.token import TOKEN_NULL
from liquid.token import TOKEN_BLANK
from liquid.token import TOKEN_WITH
from liquid.token import TOKEN_FOR
from liquid.token import TOKEN_AS
from liquid.token import TOKEN_BY
from liquid.token import TOKEN_NEGATIVE
from liquid.token import TOKEN_TRUE
from liquid.token import TOKEN_FALSE
from liquid.token import TOKEN_CONTAINS
from liquid.token import TOKEN_IN
from liquid.token import TOKEN_LPAREN
from liquid.token import TOKEN_RPAREN
from liquid.token import TOKEN_RANGE
from liquid.token import TOKEN_LIMIT
from liquid.token import TOKEN_OFFSET
from liquid.token import TOKEN_REVERSED
from liquid.token import TOKEN_CONTINUE
from liquid.token import TOKEN_COLS
from liquid.token import TOKEN_PIPE
from liquid.token import TOKEN_COLON
from liquid.token import TOKEN_COMMA
from liquid.token import TOKEN_DOT
from liquid.token import TOKEN_LBRACKET
from liquid.token import TOKEN_RBRACKET
from liquid.token import TOKEN_ASSIGN
from liquid.token import TOKEN_AND
from liquid.token import TOKEN_OR
from liquid.token import operators
from liquid.token import Token
__all__ = (
"tokenize_assignment_expression",
"tokenize_boolean_expression",
"tokenize_filtered_expression",
"tokenize_loop_expression",
"tokenize_identifier",
"tokenize_include_expression",
"tokenize_paginate_expression",
"tokenize_liquid_expression",
"get_lexer",
"get_liquid_expression_lexer",
)
IDENTIFIER_PATTERN = r"\w[a-zA-Z0-9_\-]*"
STRING_PATTERN = r"(?P<quote>[\"'])(?P<quoted>.*?)(?P=quote)"
identifier_rules = (
(TOKEN_INTEGER, r"\d+"),
(TOKEN_STRING, STRING_PATTERN),
(TOKEN_IDENTIFIER, IDENTIFIER_PATTERN),
(TOKEN_DOT, r"\."),
(TOKEN_LBRACKET, r"\["),
(TOKEN_RBRACKET, r"]"),
("NEWLINE", r"\n"),
("SKIP", r"[ \t\r]+"),
(TOKEN_ILLEGAL, r"."),
)
filtered_expression_rules = (
(TOKEN_RANGE, r"\.\."),
(TOKEN_LPAREN, r"\("),
(TOKEN_RPAREN, r"\)"),
(TOKEN_FLOAT, r"\d+\.(?!\.)\d*"),
(TOKEN_INTEGER, r"\d+"),
(TOKEN_NEGATIVE, r"-"),
(TOKEN_STRING, STRING_PATTERN),
(TOKEN_IDENTIFIER, IDENTIFIER_PATTERN),
(TOKEN_DOT, r"\."),
(TOKEN_COMMA, r","),
(TOKEN_LBRACKET, r"\["),
(TOKEN_RBRACKET, r"]"),
(TOKEN_COLON, r":"),
(TOKEN_PIPE, r"\|"),
("NEWLINE", r"\n"),
("SKIP", r"[ \t\r]+"),
(TOKEN_ILLEGAL, r"."),
)
filtered_expression_keywords = frozenset(
[
TOKEN_TRUE,
TOKEN_FALSE,
TOKEN_NIL,
TOKEN_NULL,
TOKEN_EMPTY,
TOKEN_BLANK,
]
)
assignment_expression_rules = (
(TOKEN_ASSIGN, r"="),
*filtered_expression_rules,
)
boolean_expression_rules = (
(TOKEN_RANGE, r"\.\."),
(TOKEN_LPAREN, r"\("),
(TOKEN_RPAREN, r"\)"),
(TOKEN_FLOAT, r"\d+\.(?!\.)\d*"),
(TOKEN_INTEGER, r"\d+"),
(TOKEN_NEGATIVE, r"-"),
(TOKEN_STRING, STRING_PATTERN),
(TOKEN_IDENTIFIER, r"\w[a-zA-Z0-9_\-?]*"),
(TOKEN_DOT, r"\."),
(TOKEN_LBRACKET, r"\["),
(TOKEN_RBRACKET, r"]"),
(TOKEN_COLON, r":"),
("NEWLINE", r"\n"),
("OP", r"[!=<>]{1,2}"),
("SKIP", r"[ \t\r]+"),
(TOKEN_ILLEGAL, r"."),
)
boolean_expression_keywords = frozenset(
[
TOKEN_TRUE,
TOKEN_FALSE,
TOKEN_NIL,
TOKEN_NULL,
TOKEN_EMPTY,
TOKEN_BLANK,
TOKEN_AND,
TOKEN_OR,
TOKEN_CONTAINS,
]
)
loop_expression_rules = (
(TOKEN_FLOAT, r"\d+\.(?!\.)\d*"),
(TOKEN_INTEGER, r"\d+"),
(TOKEN_IDENTIFIER, IDENTIFIER_PATTERN),
(TOKEN_RANGE, r"\.\."),
(TOKEN_DOT, r"\."),
(TOKEN_LBRACKET, r"\["),
(TOKEN_RBRACKET, r"]"),
(TOKEN_LPAREN, r"\("),
(TOKEN_RPAREN, r"\)"),
(TOKEN_COLON, r":"),
("NEWLINE", r"\n"),
("SKIP", r"[ \t\r]+"),
(TOKEN_ILLEGAL, r"."),
)
loop_expression_keywords = frozenset(
[
TOKEN_IN,
TOKEN_OFFSET,
TOKEN_LIMIT,
TOKEN_REVERSED,
TOKEN_COLS,
TOKEN_CONTINUE,
]
)
include_expression_rules = (
(TOKEN_RANGE, r"\.\."),
(TOKEN_LPAREN, r"\("),
(TOKEN_RPAREN, r"\)"),
(TOKEN_FLOAT, r"\d+\.(?!\.)\d*"),
(TOKEN_INTEGER, r"\d+"),
(TOKEN_NEGATIVE, r"-"),
(TOKEN_STRING, STRING_PATTERN),
(TOKEN_IDENTIFIER, r"\w[a-zA-Z0-9_\-?]*"),
(TOKEN_DOT, r"\."),
(TOKEN_COMMA, r","),
(TOKEN_LBRACKET, r"\["),
(TOKEN_RBRACKET, r"]"),
(TOKEN_COLON, r":"),
("NEWLINE", r"\n"),
("SKIP", r"[ \t\r]+"),
(TOKEN_ILLEGAL, r"."),
)
include_expression_keywords = frozenset(
[
TOKEN_TRUE,
TOKEN_FALSE,
TOKEN_NIL,
TOKEN_NULL,
TOKEN_EMPTY,
TOKEN_BLANK,
TOKEN_WITH,
TOKEN_FOR,
TOKEN_AS,
]
)
# pylint: disable=too-many-locals
def compile_liquid_rules(
tag_start_string: str = r"{%",
tag_end_string: str = r"%}",
statement_start_string: str = r"{{",
statement_end_string: str = r"}}",
comment_start_string: str = r"",
comment_end_string: str = r"",
) -> Pattern[str]:
"""Compile rules for lexing liquid templates."""
tag_s = re.escape(tag_start_string)
tag_e = re.escape(tag_end_string)
stmt_s = re.escape(statement_start_string)
stmt_e = re.escape(statement_end_string)
comment_s = re.escape(comment_start_string)
comment_e = re.escape(comment_end_string)
raw_pattern = rf"{tag_s}\s*raw\s*{tag_e}(?P<raw>.*?){tag_s}\s*endraw\s*{tag_e}"
statement_pattern = rf"{stmt_s}-?\s*(?P<stmt>.*?)\s*(?P<rss>-?){stmt_e}"
# The "name" group is zero or more characters so that a malformed tag (one
# with no name) does not get treated as a literal.
tag_pattern = rf"{tag_s}-?\s*(?P<name>\w*)\s*(?P<expr>.*?)\s*(?P<rst>-?){tag_e}"
if not comment_start_string:
# Do not support shorthand comment syntax
literal_pattern = rf".+?(?=(({tag_s}|{stmt_s})(?P<rstrip>-?))|$)"
liquid_rules = [
("RAW", raw_pattern),
(TOKEN_STATEMENT, statement_pattern),
("TAG", tag_pattern),
(TOKEN_LITERAL, literal_pattern),
]
else:
literal_pattern = rf".+?(?=(({tag_s}|{stmt_s}|{comment_s})(?P<rstrip>-?))|$)"
comment_pattern = rf"{comment_s}(?P<comment>.*?)(?P<rsc>-?){comment_e}"
liquid_rules = [
("RAW", raw_pattern),
("COMMENT", comment_pattern),
(TOKEN_STATEMENT, statement_pattern),
("TAG", tag_pattern),
(TOKEN_LITERAL, literal_pattern),
]
return _compile_rules(liquid_rules)
def _compile_rules(rules: Iterable[Tuple[str, str]]) -> Pattern[str]:
"""Compile the given rules into a single regular expression."""
pattern = "|".join(f"(?P<{name}>{pattern})" for name, pattern in rules)
return re.compile(pattern, re.DOTALL)
# NOTE: Here we're talking about expressions found in "liquid" tags only. Each line
# starts with a tag name, optionally followed by zero or more space or tab characters
# and an expression, which is terminated by a newline.
def _tokenize_liquid_expression(
source: str,
rules: Pattern[str],
line_count: int = 1,
comment_start_string: str = "",
) -> Iterator[Token]:
"""Tokenize a "liquid" tag expression."""
for match in rules.finditer(source):
kind = match.lastgroup
assert kind is not None
line_num = line_count
value = match.group()
line_count += value.count("\n")
if kind == "LIQUID_EXPR":
name = match.group("name")
if name == comment_start_string:
continue
yield Token(line_num, TOKEN_TAG, name)
if match.group("expr"):
yield Token(line_num, TOKEN_EXPRESSION, match.group("expr"))
elif kind == "SKIP":
continue
else:
raise LiquidSyntaxError(
f"expected newline delimited tag expressions, found {value!r}"
)
@lru_cache(maxsize=128)
def get_liquid_expression_lexer(
comment_start_string: str = "",
) -> Callable[..., Iterator[Token]]:
"""Return a tokenizer that yields tokens from a `liquid` tag's expression."""
# Dubious assumption here.
comment_start_string = comment_start_string.replace("{", "")
if comment_start_string:
comment = re.escape(comment_start_string)
rules = (
(
"LIQUID_EXPR",
rf"[ \t]*(?P<name>(\w+|{comment}))[ \t]*(?P<expr>.*?)[ \t\r]*?(\n+|$)",
),
("SKIP", r"[\r\n]+"),
(TOKEN_ILLEGAL, r"."),
)
else:
rules = (
("LIQUID_EXPR", r"[ \t]*(?P<name>\w+)[ \t]*(?P<expr>.*?)[ \t\r]*?(\n+|$)"),
("SKIP", r"[\r\n]+"),
(TOKEN_ILLEGAL, r"."),
)
return partial(
_tokenize_liquid_expression,
rules=_compile_rules(rules),
comment_start_string=comment_start_string,
)
# For backaward compatibility. No line comments.
tokenize_liquid_expression = get_liquid_expression_lexer(comment_start_string="")
def _tokenize(
source: str, rules: Pattern[str], keywords: Collection[str]
) -> Iterator[Token]:
"""Generate tokens from the given source string according to the compiled rules."""
line_num = 1
for match in rules.finditer(source):
kind = match.lastgroup
assert kind is not None
value = match.group()
if kind == TOKEN_IDENTIFIER and value in keywords:
kind = value
elif kind == TOKEN_STRING:
value = match.group("quoted")
elif kind == "OP":
try:
kind = operators[value]
except KeyError as err:
raise LiquidSyntaxError(
f"unknown operator {value!r}",
linenum=line_num,
) from err
elif kind == "NEWLINE":
line_num += 1
continue
elif kind == "SKIP":
continue
elif kind == TOKEN_ILLEGAL:
raise LiquidSyntaxError(f"unexpected {value!r}", linenum=line_num)
yield Token(line_num, kind, value)
tokenize_identifier = partial(
_tokenize,
rules=_compile_rules(identifier_rules),
keywords=(),
)
tokenize_loop_expression = partial(
_tokenize,
rules=_compile_rules(loop_expression_rules),
keywords=loop_expression_keywords,
)
tokenize_filtered_expression = partial(
_tokenize,
rules=_compile_rules(filtered_expression_rules),
keywords=filtered_expression_keywords,
)
tokenize_assignment_expression = partial(
_tokenize,
rules=_compile_rules(assignment_expression_rules),
keywords=filtered_expression_keywords,
)
tokenize_boolean_expression = partial(
_tokenize,
rules=_compile_rules(boolean_expression_rules),
keywords=boolean_expression_keywords,
)
tokenize_include_expression = partial(
_tokenize,
rules=_compile_rules(include_expression_rules),
keywords=include_expression_keywords,
)
tokenize_paginate_expression = partial(
_tokenize,
rules=_compile_rules(identifier_rules),
keywords={TOKEN_BY},
)
def _tokenize_template(source: str, rules: Pattern[str]) -> Iterator[Token]:
line_count = 1
lstrip = False
for match in rules.finditer(source):
kind = match.lastgroup
assert kind is not None
line_num = line_count
value = match.group()
line_count += value.count("\n")
if kind == TOKEN_STATEMENT:
value = match.group("stmt")
lstrip = bool(match.group("rss"))
elif kind == "TAG":
name = match.group("name")
yield Token(line_num, TOKEN_TAG, name)
kind = TOKEN_EXPRESSION
value = match.group("expr")
lstrip = bool(match.group("rst"))
if not value:
continue
elif kind == "COMMENT":
lstrip = bool(match.group("rsc"))
continue
elif kind == "RAW":
kind = TOKEN_LITERAL
value = match.group("raw")
elif kind == TOKEN_LITERAL:
if lstrip:
value = value.lstrip()
if match.group("rstrip"):
value = value.rstrip()
if not value:
continue
if value.startswith(r"{{"):
raise LiquidSyntaxError(
"expected '}}', found 'eof'", linenum=line_count
)
if value.startswith(r"{%"):
raise LiquidSyntaxError(
"expected '%}', found 'eof'", linenum=line_count
)
yield Token(line_num, kind, value)
@lru_cache(maxsize=128)
def get_lexer(
tag_start_string: str = r"{%",
tag_end_string: str = r"%}",
statement_start_string: str = r"{{",
statement_end_string: str = r"}}",
comment_start_string: str = "",
comment_end_string: str = "",
) -> Callable[[str], Iterator[Token]]:
"""Return a template lexer using the given tag and statement delimiters."""
rules = compile_liquid_rules(
tag_start_string,
tag_end_string,
statement_start_string,
statement_end_string,
comment_start_string,
comment_end_string,
)
return partial(_tokenize_template, rules=rules)
```
#### File: liquid/liquid/tag.py
```python
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
from liquid.ast import Node
from liquid.ast import IllegalNode
from liquid.exceptions import Error
from liquid.parse import eat_block
from liquid.stream import TokenStream
if TYPE_CHECKING: # pragma: no cover
from liquid import Environment
class Tag(ABC):
"""Base class for all built-in and custom template tags."""
block = True
name = ""
end = ""
def __init__(self, env: Environment):
self.env = env
def get_node(self, stream: TokenStream) -> Node:
"""Wraps `Tag.parse`, possibly returning an `IllegalNode`."""
tok = stream.current
try:
return self.parse(stream)
except Error as err:
if not err.linenum:
err.linenum = tok.linenum
self.env.error(err)
if self.block and hasattr(self, "end"):
eat_block(stream, (self.end,))
return IllegalNode(tok)
@abstractmethod
def parse(self, stream: TokenStream) -> Node:
"""Return a parse tree node by parsing tokens from the given stream."""
```
#### File: liquid/utils/html.py
```python
from html.parser import HTMLParser
from typing import List
class StripParser(HTMLParser): # pylint: disable=abstract-method
"""An HTML parser that strips out tags."""
def __init__(self) -> None:
super().__init__(convert_charrefs=False)
self.reset()
self.dat: List[str] = []
def handle_data(self, data: str) -> None:
self.dat.append(data)
def handle_entityref(self, name: str) -> None:
self.dat.append(f"&{name};")
def handle_charref(self, name: str) -> None:
self.dat.append(f"&#{name};")
def get_data(self) -> str:
"""Return accumulated data."""
return "".join(self.dat)
def strip_tags(value: str) -> str:
"""Return the given value with all HTML tags removed."""
if "<" in value and ">" in value:
parser = StripParser()
parser.feed(value)
parser.close()
return parser.get_data()
return value
```
#### File: tests/filters/test_string.py
```python
import unittest
from functools import partial
from inspect import isclass
from typing import NamedTuple
from typing import Any
from typing import List
from typing import Dict
from liquid.environment import Environment
from liquid.exceptions import FilterArgumentError
from liquid.exceptions import FilterValueError
from liquid.exceptions import FilterError
from liquid.builtin.filters.string import capitalize
from liquid.builtin.filters.string import append
from liquid.builtin.filters.string import downcase
from liquid.builtin.filters.string import escape
from liquid.builtin.filters.string import escape_once
from liquid.builtin.filters.string import lstrip
from liquid.builtin.filters.string import newline_to_br
from liquid.builtin.filters.string import prepend
from liquid.builtin.filters.string import remove
from liquid.builtin.filters.string import remove_first
from liquid.builtin.filters.string import replace
from liquid.builtin.filters.string import replace_first
from liquid.builtin.filters.string import slice_
from liquid.builtin.filters.string import split
from liquid.builtin.filters.string import upcase
from liquid.builtin.filters.string import strip
from liquid.builtin.filters.string import rstrip
from liquid.builtin.filters.string import strip_html
from liquid.builtin.filters.string import strip_newlines
from liquid.builtin.filters.string import truncate
from liquid.builtin.filters.string import truncatewords
from liquid.builtin.filters.string import url_encode
from liquid.builtin.filters.string import url_decode
from liquid.builtin.filters.string import base64_encode
from liquid.builtin.filters.string import base64_decode
from liquid.builtin.filters.string import base64_url_safe_encode
from liquid.builtin.filters.string import base64_url_safe_decode
class Case(NamedTuple):
description: str
val: Any
args: List[Any]
kwargs: Dict[Any, Any]
expect: Any
class StringFilterTestCase(unittest.TestCase):
"""Test string filter functions."""
def setUp(self) -> None:
self.env = Environment()
def _test(self, func, test_cases):
if getattr(func, "with_environment", False):
func = partial(func, environment=self.env)
for case in test_cases:
with self.subTest(msg=case.description):
if isclass(case.expect) and issubclass(
case.expect, (FilterArgumentError, FilterValueError, FilterError)
):
with self.assertRaises(case.expect):
func(case.val, *case.args, **case.kwargs)
else:
self.assertEqual(
func(case.val, *case.args, **case.kwargs), case.expect
)
def test_capitalize(self):
"""Test capitalize filter function."""
test_cases = [
Case(
description="lower case string",
val="hello",
args=[],
kwargs={},
expect="Hello",
),
Case(
description="already capitalized string",
val="Hello",
args=[],
kwargs={},
expect="Hello",
),
Case(
description="unexpected argument",
val="hello",
args=[2],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(capitalize, test_cases)
def test_append(self):
"""Test append filter function."""
test_cases = [
Case(
description="concat",
val="hello",
args=["there"],
kwargs={},
expect="hellothere",
),
Case(
description="not a string",
val=5,
args=["there"],
kwargs={},
expect="5there",
),
Case(
description="argument not a string",
val="hello",
args=[5],
kwargs={},
expect="hello5",
),
Case(
description="missing argument",
val="hello",
args=[],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="too many arguments",
val="hello",
args=["how", "are", "you"],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=["hi"],
kwargs={},
expect="hi",
),
Case(
description="undefined argument",
val="hi",
args=[self.env.undefined("test")],
kwargs={},
expect="hi",
),
]
self._test(append, test_cases)
def test_downcase(self):
"""Test downcase filter function."""
test_cases = [
Case(
description="make lower case",
val="HELLO",
args=[],
kwargs={},
expect="hello",
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="5",
),
Case(
description="unexpected argument",
val="HELLO",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(downcase, test_cases)
def test_escape(self):
"""Test escape filter function."""
test_cases = [
Case(
description="make HTML-safe",
val="<p>test</p>",
args=[],
kwargs={},
expect="<p>test</p>",
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="5",
),
Case(
description="unexpected argument",
val="HELLO",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(escape, test_cases)
def test_escape_once(self):
"""Test escape_once filter function."""
test_cases = [
Case(
description="make HTML-safe",
val="<p>test</p>",
args=[],
kwargs={},
expect="<p>test</p>",
),
Case(
description="make HTML-safe from mixed safe and markup.",
val="<p>test</p><p>test</p>",
args=[],
kwargs={},
expect="<p>test</p><p>test</p>",
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="5",
),
Case(
description="unexpected argument",
val="HELLO",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(escape_once, test_cases)
def test_lstrip(self):
"""Test lstrip filter function."""
test_cases = [
Case(
description="left padded",
val=" \t\r\n hello",
args=[],
kwargs={},
expect="hello",
),
Case(
description="right padded",
val="hello \t\r\n ",
args=[],
kwargs={},
expect="hello \t\r\n ",
),
Case(
description="left and right padded",
val=" \t\r\n hello \t\r\n ",
args=[],
kwargs={},
expect="hello \t\r\n ",
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="5",
),
Case(
description="unexpected argument",
val="hello",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(lstrip, test_cases)
def test_newline_to_br(self):
"""Test newline_to_br filter function."""
test_cases = [
Case(
description="string with newlines",
val="- apples\n- oranges\n",
args=[],
kwargs={},
expect="- apples<br />\n- oranges<br />\n",
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="5",
),
Case(
description="unexpected argument",
val="hello",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="reference implementation test 1",
val="a\nb\nc",
args=[],
kwargs={},
expect="a<br />\nb<br />\nc",
),
Case(
description="reference implementation test 2",
val="a\r\nb\nc",
args=[],
kwargs={},
expect="a<br />\nb<br />\nc",
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(newline_to_br, test_cases)
def test_prepend(self):
"""Test prepend filter function."""
test_cases = [
Case(
description="concat",
val="hello",
args=["there"],
kwargs={},
expect="therehello",
),
Case(
description="not a string",
val=5,
args=["there"],
kwargs={},
expect="there5",
),
Case(
description="argument not a string",
val="hello",
args=[5],
kwargs={},
expect="5hello",
),
Case(
description="missing argument",
val="hello",
args=[],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="too many arguments",
val="hello",
args=["how", "are", "you"],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=["hi"],
kwargs={},
expect="hi",
),
Case(
description="undefined argument",
val="hi",
args=[self.env.undefined("test")],
kwargs={},
expect="hi",
),
]
self._test(prepend, test_cases)
def test_remove(self):
"""Test remove filter function."""
test_cases = [
Case(
description="remove substrings",
val="I strained to see the train through the rain",
args=["rain"],
kwargs={},
expect="I sted to see the t through the ",
),
Case(
description="not a string",
val=5,
args=["there"],
kwargs={},
expect="5",
),
Case(
description="argument not a string",
val="hello",
args=[5],
kwargs={},
expect="hello",
),
Case(
description="missing argument",
val="hello",
args=[],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="too many arguments",
val="hello",
args=["how", "are", "you"],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=["rain"],
kwargs={},
expect="",
),
Case(
description="undefined argument",
val="I strained to see the train through the rain",
args=[self.env.undefined("test")],
kwargs={},
expect="I strained to see the train through the rain",
),
]
self._test(remove, test_cases)
def test_remove_first(self):
"""Test remove_first filter function."""
test_cases = [
Case(
description="remove substrings",
val="I strained to see the train through the rain",
args=["rain"],
kwargs={},
expect="I sted to see the train through the rain",
),
Case(
description="not a string",
val=5,
args=["rain"],
kwargs={},
expect="5",
),
Case(
description="argument not a string",
val="hello",
args=[5],
kwargs={},
expect="hello",
),
Case(
description="missing argument",
val="hello",
args=[],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="too many arguments",
val="hello",
args=["how", "are", "you"],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=["rain"],
kwargs={},
expect="",
),
Case(
description="undefined argument",
val="I strained to see the train through the rain",
args=[self.env.undefined("test")],
kwargs={},
expect="I strained to see the train through the rain",
),
]
self._test(remove_first, test_cases)
def test_replace(self):
"""Test replace filter function."""
test_cases = [
Case(
description="replace substrings",
val="Take my protein pills and put my helmet on",
args=["my", "your"],
kwargs={},
expect="Take your protein pills and put your helmet on",
),
Case(
description="not a string",
val=5,
args=["rain", "foo"],
kwargs={},
expect="5",
),
Case(
description="argument not a string",
val="hello",
args=[5, "your"],
kwargs={},
expect="hello",
),
Case(
description="missing argument",
val="hello",
args=["ll"],
kwargs={},
expect="heo",
),
Case(
description="missing arguments",
val="hello",
args=[],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="too many arguments",
val="hello",
args=["how", "are", "you"],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=["my", "your"],
kwargs={},
expect="",
),
Case(
description="undefined first argument",
val="Take my protein",
args=[self.env.undefined("test"), "#"],
kwargs={},
expect="#T#a#k#e# #m#y# #p#r#o#t#e#i#n#",
),
Case(
description="undefined second argument",
val="Take my protein pills and put my helmet on",
args=["my", self.env.undefined("test")],
kwargs={},
expect="Take protein pills and put helmet on",
),
]
self._test(replace, test_cases)
def test_replace_first(self):
"""Test replace_first filter function."""
test_cases = [
Case(
description="replace substrings",
val="Take my protein pills and put my helmet on",
args=["my", "your"],
kwargs={},
expect="Take your protein pills and put my helmet on",
),
Case(
description="not a string",
val=5,
args=["rain", "foo"],
kwargs={},
expect="5",
),
Case(
description="argument not a string",
val="hello5",
args=[5, "your"],
kwargs={},
expect="helloyour",
),
Case(
description="missing argument",
val="hello",
args=["ll"],
kwargs={},
expect="heo",
),
Case(
description="missing arguments",
val="hello",
args=[],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="too many arguments",
val="hello",
args=["how", "are", "you"],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=["my", "your"],
kwargs={},
expect="",
),
Case(
description="undefined first argument",
val="Take my protein pills and put my helmet on",
args=[self.env.undefined("test"), "your"],
kwargs={},
expect="yourTake my protein pills and put my helmet on",
),
Case(
description="undefined second argument",
val="Take my protein pills and put my helmet on",
args=["my", self.env.undefined("test")],
kwargs={},
expect="Take protein pills and put my helmet on",
),
]
self._test(replace_first, test_cases)
def test_slice(self):
"""Test slice filter function."""
test_cases = [
Case(
description="zero",
val="hello",
args=[0],
kwargs={},
expect="h",
),
Case(
description="one",
val="hello",
args=[1],
kwargs={},
expect="e",
),
Case(
description="one length three",
val="hello",
args=[1, 3],
kwargs={},
expect="ell",
),
Case(
description="out of range",
val="hello",
args=[99],
kwargs={},
expect="",
),
Case(
description="not a string",
val=5,
args=[0],
kwargs={},
expect="5",
),
Case(
description="first argument not an integer",
val="hello",
args=["foo"],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="second argument not an integer",
val="hello",
args=[5, "foo"],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="missing arguments",
val="hello",
args=[],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="too many arguments",
val="hello",
args=[1, 2, 3],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="unexpected keyword arguments",
val="hello",
args=[1, 2],
kwargs={"x": "y"},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[1, 3],
kwargs={},
expect="",
),
Case(
description="undefined first argument",
val="hello",
args=[self.env.undefined("test"), 3],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined second argument",
val="hello",
args=[1, self.env.undefined("test")],
kwargs={},
expect="e",
),
]
self._test(slice_, test_cases)
def test_split(self):
"""Test split filter function."""
test_cases = [
Case(
description="split string",
val="Hi, how are you today?",
args=[
" ",
],
kwargs={},
expect=["Hi,", "how", "are", "you", "today?"],
),
Case(
description="not a string",
val=5,
args=[" "],
kwargs={},
expect=["5"],
),
Case(
description="argument not a string",
val="hello th1ere",
args=[1],
kwargs={},
expect=["hello th", "ere"],
),
Case(
description="missing argument",
val="hello there",
args=[],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="too many arguments",
val="hello there",
args=[" ", ","],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[" "],
kwargs={},
expect=[""],
),
Case(
description="undefined argument",
val="Hi, how are you today?",
args=[self.env.undefined("test")],
kwargs={},
expect=[
"H",
"i",
",",
" ",
"h",
"o",
"w",
" ",
"a",
"r",
"e",
" ",
"y",
"o",
"u",
" ",
"t",
"o",
"d",
"a",
"y",
"?",
],
),
]
self._test(split, test_cases)
def test_upcase(self):
"""Test upcase filter function."""
test_cases = [
Case(
description="make lower case",
val="hello",
args=[],
kwargs={},
expect="HELLO",
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="5",
),
Case(
description="unexpected argument",
val="hello",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(upcase, test_cases)
def test_strip(self):
"""Test strip filter function."""
test_cases = [
Case(
description="left padded",
val=" \t\r\n hello",
args=[],
kwargs={},
expect="hello",
),
Case(
description="right padded",
val="hello \t\r\n ",
args=[],
kwargs={},
expect="hello",
),
Case(
description="left and right padded",
val=" \t\r\n hello \t\r\n ",
args=[],
kwargs={},
expect="hello",
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="5",
),
Case(
description="unexpected argument",
val="hello",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(strip, test_cases)
def test_rstrip(self):
"""Test rstrip filter function."""
test_cases = [
Case(
description="left padded",
val=" \t\r\n hello",
args=[],
kwargs={},
expect=" \t\r\n hello",
),
Case(
description="right padded",
val="hello \t\r\n ",
args=[],
kwargs={},
expect="hello",
),
Case(
description="left and right padded",
val=" \t\r\n hello \t\r\n ",
args=[],
kwargs={},
expect=" \t\r\n hello",
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="5",
),
Case(
description="unexpected argument",
val="hello",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(rstrip, test_cases)
def test_strip_html(self):
"""Test strip_html filter function."""
test_cases = [
Case(
description="some HTML markup",
val="Have <em>you</em> read <strong>Ulysses</strong> & ?",
args=[],
kwargs={},
expect="Have you read Ulysses & ?",
),
Case(
description="some HTML markup with HTML comment",
val=(
"<!-- Have --><em>you</em> read "
"<strong>Ulysses</strong> & ?"
),
args=[],
kwargs={},
expect="you read Ulysses & ?",
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="5",
),
Case(
description="unexpected argument",
val="hello",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(strip_html, test_cases)
def test_strip_newlines(self):
"""Test strip_newlines filter function."""
test_cases = [
Case(
description="newline and other whitespace",
val="hello there\nyou",
args=[],
kwargs={},
expect="hello thereyou",
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="5",
),
Case(
description="unexpected argument",
val="hello",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="reference implementation test 1",
val="a\nb\nc",
args=[],
kwargs={},
expect="abc",
),
Case(
description="reference implementation test 2",
val="a\r\nb\nc",
args=[],
kwargs={},
expect="abc",
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(strip_newlines, test_cases)
def test_truncate(self):
"""Test truncate filter function."""
test_cases = [
Case(
description="default end",
val="Ground control to Major Tom.",
args=[20],
kwargs={},
expect="Ground control to...",
),
Case(
description="custom end",
val="Ground control to Major Tom.",
args=[25, ", and so on"],
kwargs={},
expect="Ground control, and so on",
),
Case(
description="no end",
val="Ground control to Major Tom.",
args=[20, ""],
kwargs={},
expect="Ground control to Ma",
),
Case(
description="string is shorter than length",
val="Ground control",
args=[20],
kwargs={},
expect="Ground control",
),
Case(
description="not a string",
val=5,
args=[10],
kwargs={},
expect="5",
),
Case(
description="too many arguments",
val="hello",
args=[5, "foo", "bar"],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[5],
kwargs={},
expect="",
),
Case(
description="undefined first argument",
val="Ground control to Major Tom.",
args=[self.env.undefined("test")],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined second argument",
val="Ground control to Major Tom.",
args=[20, self.env.undefined("test")],
kwargs={},
expect="Ground control to Ma",
),
Case(
description="default length is 50",
val="Ground control to Major Tom. Ground control to Major Tom.",
args=[],
kwargs={},
expect="Ground control to Major Tom. Ground control to ...",
),
]
self._test(truncate, test_cases)
def test_truncatewords(self):
"""Test truncatewords filter function."""
test_cases = [
Case(
description="default end",
val="Ground control to Major Tom.",
args=[3],
kwargs={},
expect="Ground control to...",
),
Case(
description="custom end",
val="Ground control to Major Tom.",
args=[3, "--"],
kwargs={},
expect="Ground control to--",
),
Case(
description="no end",
val="Ground control to Major Tom.",
args=[3, ""],
kwargs={},
expect="Ground control to",
),
Case(
description="fewer words than word count",
val="Ground control",
args=[3],
kwargs={},
expect="Ground control",
),
Case(
description="not a string",
val=5,
args=[10],
kwargs={},
expect="5",
),
Case(
description="too many arguments",
val="hello",
args=[5, "foo", "bar"],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="reference implementation test 1",
val="测试测试测试测试",
args=[5],
kwargs={},
expect="测试测试测试测试",
),
Case(
description="reference implementation test 2",
val="one two three",
args=[2, 1],
kwargs={},
expect="one two1",
),
Case(
description="reference implementation test 3",
val="one two\tthree\nfour",
args=[3],
kwargs={},
expect="one two three...",
),
Case(
description="reference implementation test 4",
val="one two three four",
args=[2],
kwargs={},
expect="one two...",
),
Case(
description="reference implementation test 5",
val="one two three four",
args=[0],
kwargs={},
expect="one...",
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[5],
kwargs={},
expect="",
),
Case(
description="undefined first argument",
val="one two three four",
args=[self.env.undefined("test")],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined second argument",
val="one two three four",
args=[2, self.env.undefined("test")],
kwargs={},
expect="one two",
),
Case(
description="very long argument",
val="",
args=[100000000000000],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="default number of words is 15",
val="a b c d e f g h i j k l m n o p q",
args=[],
kwargs={},
expect="a b c d e f g h i j k l m n o...",
),
]
self._test(truncatewords, test_cases)
def test_url_encode_html(self):
"""Test url_encode filter function."""
test_cases = [
Case(
description="some special URL characters",
val="email address is <EMAIL>!",
args=[],
kwargs={},
expect="email+address+is+bob%40example.com%21",
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="5",
),
Case(
description="unexpected argument",
val="hello",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(url_encode, test_cases)
def test_url_decode_html(self):
"""Test url_decode filter function."""
test_cases = [
Case(
description="some special URL characters",
val="email+address+is+bob%40example.com%21",
args=[],
kwargs={},
expect="email address is <EMAIL>!",
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="5",
),
Case(
description="unexpected argument",
val="hello",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(url_decode, test_cases)
def test_base64_encode(self):
"""Test base64_encode filter function."""
test_cases = [
Case(
description="from string",
val="_#/.",
args=[],
kwargs={},
expect="XyMvLg==",
),
Case(
description="from string with URL unsafe",
val=(
r"abcdefghijklmnopqrstuvwxyz "
r"ABCDEFGHIJKLMNOPQRSTUVWXYZ "
r"1234567890 !@#$%^&*()-=_+/?.:;[]{}\|"
),
args=[],
kwargs={},
expect=(
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXogQUJDREVGR0hJSktMTU5PUFFSU1RVV"
"ldYWVogMTIzNDU2Nzg5MCAhQCMkJV4mKigpLT1fKy8/Ljo7W117fVx8"
),
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="NQ==",
),
Case(
description="unexpected argument",
val="hello",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(base64_encode, test_cases)
def test_base64_decode(self):
"""Test base64_decode filter function."""
test_cases = [
Case(
description="from string",
val="XyMvLg==",
args=[],
kwargs={},
expect="_#/.",
),
Case(
description="from string with URL unsafe",
val=(
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXogQUJDREVGR0hJSktMTU5PUFFSU1RVV"
"ldYWVogMTIzNDU2Nzg5MCAhQCMkJV4mKigpLT1fKy8/Ljo7W117fVx8"
),
args=[],
kwargs={},
expect=(
r"abcdefghijklmnopqrstuvwxyz "
r"ABCDEFGHIJKLMNOPQRSTUVWXYZ "
r"1234567890 !@#$%^&*()-=_+/?.:;[]{}\|"
),
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect=FilterError,
),
Case(
description="unexpected argument",
val="hello",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(base64_decode, test_cases)
def test_base64_url_safe_encode(self):
"""Test base64_url_safe_encode filter function."""
test_cases = [
Case(
description="from string",
val="_#/.",
args=[],
kwargs={},
expect="XyMvLg==",
),
Case(
description="from string with URL unsafe",
val=(
r"abcdefghijklmnopqrstuvwxyz "
r"ABCDEFGHIJKLMNOPQRSTUVWXYZ "
r"1234567890 !@#$%^&*()-=_+/?.:;[]{}\|"
),
args=[],
kwargs={},
expect=(
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXogQUJDREVGR0hJSktMTU5PUFFSU1RVV"
"ldYWVogMTIzNDU2Nzg5MCAhQCMkJV4mKigpLT1fKy8_Ljo7W117fVx8"
),
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="NQ==",
),
Case(
description="unexpected argument",
val="hello",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(base64_url_safe_encode, test_cases)
def test_base64_url_safe_decode(self):
"""Test base64_url_safe_decode filter function."""
test_cases = [
Case(
description="from string",
val="XyMvLg==",
args=[],
kwargs={},
expect="_#/.",
),
Case(
description="from string with URL unsafe",
val=(
"<KEY>"
"<KEY>"
),
args=[],
kwargs={},
expect=(
r"abcdefghijklmnopqrstuvwxyz "
r"ABCDEFGHIJKLMNOPQRSTUVWXYZ "
r"1234567890 !@#$%^&*()-=_+/?.:;[]{}\|"
),
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect=FilterError,
),
Case(
description="unexpected argument",
val="hello",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(base64_url_safe_decode, test_cases)
```
#### File: mocks/filters/money.py
```python
from liquid.filter import math_filter
@math_filter
def money_with_currency(money):
"""A filter function that returns a number formatted with currency info."""
return f"$ {money / 100.0:.2f} USD"
@math_filter
def money_(money):
"""A filter function that returns a number formatted as money."""
return f"$ {money / 100.0:.2f}"
```
#### File: liquid/tests/test_context.py
```python
from unittest import TestCase
from typing import NamedTuple
from typing import Type
from liquid.context import builtin
from liquid.context import get_item
from liquid.context import _undefined
from liquid.context import ReadOnlyChainMap
from liquid.environment import Environment
from liquid.exceptions import LiquidTypeError
from liquid.exceptions import lookup_warning
from liquid.mode import Mode
class Case(NamedTuple):
"""Table driven test case helper."""
description: str
template: str
expect_exception: Type[Exception]
expect_msg: str
expect_render: str = ""
class BadContextTemplateTestCase(TestCase):
"""Bad context test case."""
def _test(self, test_cases, mode=Mode.STRICT):
"""Helper method for running lists of `Case`s"""
env = Environment()
env.mode = mode
global_context = {"arr": [], "hash": {}}
for case in test_cases:
with self.subTest(msg=case.description):
if mode == Mode.STRICT:
with self.assertRaises(case.expect_exception) as raised:
template = env.from_string(
case.template, globals=global_context
)
result = template.render()
self.assertEqual(str(raised.exception), case.expect_msg)
elif mode == Mode.WARN:
with self.assertWarns(lookup_warning(case.expect_exception)):
template = env.from_string(
case.template, globals=global_context
)
result = template.render()
elif mode == Mode.LAX:
template = env.from_string(case.template, globals=global_context)
result = template.render()
self.assertEqual(result, case.expect_render)
def test_bad_context(self):
"""Test that we handle render time errors due to incorrect context."""
test_cases = [
Case(
description="array less than hash",
template="{% if arr < hash %}foo{% endif %}",
expect_exception=LiquidTypeError,
expect_msg=r"invalid operator for types '[] < {}', on line 1",
),
]
self._test(test_cases, mode=Mode.STRICT)
self._test(test_cases, mode=Mode.WARN)
self._test(test_cases, mode=Mode.LAX)
class ReadOnlyChainMapTestCase(TestCase):
"""Read only chain map test case."""
def test_get(self):
"""Test that we can get items from a chain map."""
test_cases = [
{
"description": "earlier maps take priority",
"maps": ({"foo": 1}, {"foo": 2}),
"expect": 1,
},
{
"description": "fall back top later maps",
"maps": ({"bar": 1}, {"foo": 2}),
"expect": 2,
},
{
"description": "default to None",
"maps": ({"bar": 1}, {"bar": 2}),
"expect": None,
},
]
for case in test_cases:
with self.subTest(msg=case["description"]):
chain_map = ReadOnlyChainMap(*case["maps"])
self.assertEqual(chain_map.get("foo"), case["expect"])
def test_iter(self):
"""Test that we can iterate a chain map."""
chain_map = ReadOnlyChainMap({"foo": 1}, {"bar": 2}, {"foo": 3})
self.assertEqual(list(chain_map), ["foo", "bar", "foo"])
class ChainedItemGetterTestCase(TestCase):
"""Chained item getter test case."""
def test_get_item(self):
"""Test that we can get nested items."""
test_cases = [
{
"description": "single string key",
"obj": {"foo": 1},
"key": ["foo"],
"expect": 1,
},
{
"description": "chained string key",
"obj": {"foo": {"bar": 2}},
"key": ["foo", "bar"],
"expect": 2,
},
{
"description": "single int key",
"obj": ["foo", "bar"],
"key": [0],
"expect": "foo",
},
{
"description": "chained string and int key",
"obj": {"foo": [1, 2]},
"key": ["foo", 1],
"expect": 2,
},
{
"description": "default to undefined",
"obj": {"foo": 1},
"key": ["no", "such", "thing"],
"expect": _undefined,
},
]
for case in test_cases:
with self.subTest(msg=case["description"]):
self.assertEqual(get_item(case["obj"], *case["key"]), case["expect"])
class BuiltinDynamicScopeTestCase(TestCase):
"""Built-in dynamic scope test case."""
def test_builtin_contains_now(self):
"""Test that `now` is in the builtin scope."""
self.assertTrue("now" in builtin)
def test_builtin_contains_today(self):
"""Test that `today` is in the builtin scope."""
self.assertTrue("today" in builtin)
def test_builtin_not_contains(self):
"""Test that garbage is not in the builtin scope."""
self.assertFalse("foo" in builtin)
def test_builtin_length(self):
"""Test that builtin has a length."""
self.assertEqual(len(builtin), 2)
def test_builtin_iter(self):
"""Test that builtin has a length."""
self.assertEqual(list(builtin), ["now", "today"])
```
#### File: liquid/tests/test_exceptions.py
```python
from pathlib import Path
from unittest import TestCase
from liquid import Environment
from liquid import Mode
from liquid.exceptions import Error
from liquid.exceptions import LiquidSyntaxError
class EnvironmentErrorTestCase(TestCase):
def test_exception_class_is_raised(self):
"""Test env.error handles exception classes."""
env = Environment(tolerance=Mode.STRICT, strict_filters=True)
with self.assertRaises(Error):
env.error(LiquidSyntaxError, msg=":(")
class LiquidErrorTestCase(TestCase):
def test_base_error_message(self):
"""Test that the base error can include a message."""
try:
raise Error("Oh no!", "extra info")
except Error as err:
self.assertEqual(err.message, "Oh no!")
def test_base_error_no_message(self):
"""Test that the base error can not include a message."""
try:
raise Error()
except Error as err:
self.assertEqual(err.message, None)
def test_base_error_include_filename(self):
"""Test that the base error can include a filename."""
try:
raise Error("Oh no!", linenum=1, filename="foo.liquid")
except Error as err:
self.assertEqual(str(err), "Oh no!, on line 1 of foo.liquid")
class LiquidSyntaxErrorTestCase(TestCase):
def test_template_name_from_string(self):
"""Test that a syntax error can include a template name as a string."""
try:
raise LiquidSyntaxError("Oh no!", filename="foo.liquid")
except LiquidSyntaxError as err:
self.assertEqual(err.name, "foo.liquid")
def test_template_name_from_path(self):
"""Test that a syntax error can include a template name as a path."""
try:
raise LiquidSyntaxError("Oh no!", filename=Path("/templates/foo.liquid"))
except LiquidSyntaxError as err:
self.assertEqual(err.name, "/templates/foo.liquid")
def test_no_template_name(self):
"""Test that a syntax error can not include template name."""
try:
raise LiquidSyntaxError("Oh no!")
except LiquidSyntaxError as err:
self.assertEqual(err.name, "")
```
#### File: liquid/tests/test_template_api.py
```python
import unittest
from liquid import Template
from liquid import Environment
from liquid import Mode
class TemplateAPITestCase(unittest.TestCase):
"""Test case for the `Template` API."""
def test_implicit_environment(self):
"""Test that an Environment is created automatically."""
template = Template(r"Hello, {{ you }}")
self.assertIsNotNone(template.env)
def test_environment_cache(self):
"""Test that we reuse Environments."""
template = Template(r"Hello, {{ you }}!")
another = Template(r"Goodbye, {{ you }}.")
self.assertEqual(template.env, another.env)
lax = Template("something", tolerance=Mode.LAX)
self.assertNotEqual(lax.env, template.env)
def test_implicit_explicit(self):
"""Test that an implicit environment renders the same as an explicit one."""
env = Environment()
source = r"Hello, {{ you }}"
context = {"you": "there"}
some = env.from_string(source)
other = Template(source)
self.assertEqual(some.render(**context), other.render(**context))
```
#### File: liquid/tests/test_template_cache.py
```python
import unittest
from liquid.utils import LRUCache
class TemplateCacheTestCase(unittest.TestCase):
def setUp(self) -> None:
self.cache = LRUCache(capacity=5)
self.cache["foo"] = "bar"
self.cache["some"] = "other"
def test_copy(self):
"""Test that we can copy a cache."""
copy = self.cache.copy()
self.assertEqual(self.cache.items(), copy.items())
copy["some"] = "different"
self.assertNotEqual(self.cache.items(), copy.items())
def test_set_default(self):
"""Test that we can set a default value for cache keys."""
self.cache.setdefault("foo", "baz")
self.assertEqual(self.cache["foo"], "bar")
val = self.cache.setdefault("hello", "there")
self.assertEqual(self.cache["hello"], val)
def test_clear(self):
"""Test that we can clear all items from the cache."""
self.assertEqual(len(self.cache), 2)
self.cache.clear()
self.assertEqual(len(self.cache), 0)
def test_contains(self):
"""Test that we can check for membership."""
self.assertEqual("foo" in self.cache, True)
def test_delete(self):
"""Test that we can remove items from the cache."""
self.assertEqual(len(self.cache), 2)
del self.cache["foo"]
self.assertEqual("foo" in self.cache, False)
self.assertEqual(len(self.cache), 1)
def test_values(self):
"""Test that we can get a list of cache values."""
self.assertEqual(self.cache.values(), ["other", "bar"])
def test_keys(self):
"""Test that we can get a list of cache keys."""
self.assertEqual(self.cache.keys(), ["some", "foo"])
def test_reversed(self):
"""Test that we can get a list of cache keys, oldest first."""
self.assertEqual(list(reversed(self.cache)), ["foo", "some"])
def test_capacity(self):
"""Test that the cache does not exceed capacity."""
self.cache["a"] = 1
self.cache["b"] = 2
self.cache["c"] = 3
self.assertEqual(len(self.cache), 5)
self.cache["d"] = 4
self.assertEqual(len(self.cache), 5)
def test_priority(self):
"""Test that recently used items are not removed from the cache."""
self.cache["a"] = 1
self.cache["b"] = 2
self.cache["c"] = 3
self.assertEqual(len(self.cache), 5)
self.assertEqual("foo" in self.cache, True)
self.cache["d"] = 4
self.assertEqual("foo" in self.cache, False)
self.assertEqual("some" in self.cache, True)
self.cache["some"] = "updated"
self.cache["e"] = 5
self.assertEqual("some" in self.cache, True)
self.assertEqual("a" in self.cache, False)
```
|
{
"source": "jgrprior/csp",
"score": 3
}
|
#### File: jgrprior/csp/init_db.py
```python
import argparse
import collections
import datetime
import hashlib
import itertools
import pathlib
import random
import secrets
import string
import sqlite3
import sys
from dataclasses import dataclass
from dataclasses import field
from typing import NamedTuple
from typing import Optional
__version__ = "0.1.3"
# A list of colours that are used to generate usernames.
COLOURS = [
"Aquamarine",
"Chocolate",
"Crimson",
"Coral",
"Magenta",
"Olive",
"Orchid",
"Salmon",
"Fire",
"Ghost",
"Golden",
"Honey",
"Lavender",
"Lime",
"Spring",
"Rose",
"Violet",
"Peach",
"Turquoise",
]
# A list of animals that are used to generate usernames.
ANIMALS = [
"Aardvark",
"Albatross",
"Goat",
"Alsatian",
"Leopard",
"Angelfish",
"Antelope",
"Fox",
"Armadillo",
"Alpaca",
"Baboon",
"Bandicoot",
"Badger",
"Barracuda",
"Bison",
"Camel",
"Chinchilla",
"Cockatoo",
"Dingo",
"Shrew",
"Eskipoo",
"Ermine",
]
# Characters used for generating password hashing salt.
SALT_CHARS = string.ascii_lowercase + string.ascii_uppercase + string.digits
# Default number of iterations used by the password hashing algorithm.
HASH_ITERATIONS = 10 # 260000
# Choose a random mean (average) performance per user from this list.
PERFORMANCE_MEANS = [5000, 7500, 10000, 15000]
# Choose a random standard deviation per user from this list.
PERFORMANCE_SD = [1000, 1250, 1500]
# Choose a random performance trend per user/week from this list.
PERFORMANCE_TRENDS = [0.8, 1, 1.2]
# Skew average performance on gender.
GENDER_PERFORMANCE = {
"male": 0.9,
"female": 1.2,
"neutral": 1,
}
# Skew average performance on age band.
AGEBAND_PERFORMANCE = {
1: 1.3,
2: 1.1,
3: 0.8,
4: 1.1,
5: 1.2,
6: 1.0,
7: 0.9,
8: 0.8,
9: 0.7,
10: 0.6,
11: 0.6,
}
class User(NamedTuple):
"""A generated user that does not yet have a user ID."""
email: str
nickname: str
hashed_password: str
gender: Optional[str] = None
dob: Optional[datetime.date] = None
class Room(NamedTuple):
"""A generated room that does not yet have a room ID."""
user_id: int
name: str
description: str
units: str
access: str
created: datetime.datetime
class RoomMember(NamedTuple):
"""A generated room member that does not yet have a member ID."""
room_id: int
user_id: int
class Activity(NamedTuple):
"""A generated activity that does not yet have an activity ID."""
room_id: int
user_id: int
timestamp: datetime.datetime
performance: int
effort: int
class Buddy(NamedTuple):
"""A generated buddy relationship between two users."""
room_id: int
inviter_id: int
invitee_id: int
def init_db(db):
"""Initialise the database. If the database already exists, data will be
deleted before creating new tables."""
with open("schema.sql") as fd:
db.executescript(fd.read())
def insert_users(db, users):
"""Insert users into the database."""
with db:
db.executemany(
"INSERT INTO user "
"(email, nickname, hashed_password, gender, dob) "
"VALUES (?, ?, ?, ?, ?)",
users,
)
def insert_rooms(db, rooms):
"""Insert rooms into the database."""
with db:
db.executemany(
"INSERT INTO room "
"(owner_id, name, description, units, access, created) "
"VALUES (?, ?, ?, ?, ?, ?)",
rooms,
)
def insert_room_members(db, members):
"""Insert room members into the database."""
with db:
db.executemany(
"INSERT INTO room_member (room_id, user_id) VALUES (?, ?)",
members,
)
def insert_activities(db, activities):
"""Insert activities into the database."""
with db:
db.executemany(
"INSERT INTO activity "
"(room_id, user_id, timestamp, performance, effort) "
"VALUES (?, ?, ?, ?, ?)",
activities,
)
def insert_buddies(db, buddies):
"""Insert buddies into the database."""
with db:
db.executemany(
"INSERT INTO buddy "
"(room_id, inviter_id, invitee_id) "
"VALUES (?, ?, ?)",
buddies,
)
def hash_password(password, salt_length=16, iterations=HASH_ITERATIONS):
"""Securely hash the given password."""
salt = "".join(secrets.choice(SALT_CHARS) for _ in range(salt_length))
dk = hashlib.pbkdf2_hmac("sha256", password.encode(), salt.encode(), iterations)
return f"pbkdf2:sha256:{iterations}${salt}${dk.hex()}"
def generate_users(hash_iterations=HASH_ITERATIONS):
"""Generate users with arbitrary names and email addresses."""
animal_colour_product = list(itertools.product(set(COLOURS), set(ANIMALS)))
random.shuffle(animal_colour_product)
genders = random.choices(
["male", "female", "neutral"],
weights=[100, 100, 2],
k=len(animal_colour_product),
)
ages = []
for _ in range(len(animal_colour_product)):
age = int(random.gauss(35, 20))
while age < 3 or age > 110:
age = int(random.gauss(35, 20))
ages.append(age)
users = []
for animal, colour in animal_colour_product:
nickname = animal + colour
email = nickname + "@example.com"
# XXX: Never use a user's username as a default password, or use any
# default password whatsoever.
users.append(
User(
email,
nickname,
hash_password(nickname, iterations=hash_iterations),
gender=genders.pop(),
dob=datetime.date.today() - datetime.timedelta(days=ages.pop() * 365),
)
)
return users
def generate_rooms(db):
"""Generate some activity rooms owned by randomly selected users."""
users = db.execute("SELECT user_id FROM user ORDER BY RANDOM()")
rooms = [
Room(
user_id=next(users)["user_id"],
name="<NAME>",
description="Fleet town daily step counters.",
units="steps/day",
access="public",
created=random_timestamp(
datetime.datetime.now() - datetime.timedelta(days=90)
),
),
Room(
user_id=next(users)["user_id"],
name="<NAME>",
description="Hart district daily step counters.",
units="steps/day",
access="public",
created=random_timestamp(
datetime.datetime.now() - datetime.timedelta(days=100)
),
),
Room(
user_id=next(users)["user_id"],
name="<NAME>",
description="Daily steps for Holly and friends.",
units="steps/day",
access="private",
created=random_timestamp(
datetime.datetime.now() - datetime.timedelta(days=85)
),
),
]
return rooms
def generate_room_members(db):
"""Generate some records for the `room_member` table, indicating that a
user is a member of a room."""
rooms = db.execute("SELECT room_id FROM room")
users = db.execute("SELECT user_id FROM user")
user_ids = [user["user_id"] for user in users]
members = []
for room in rooms:
room_users = random.sample(user_ids, len(user_ids) // 3)
for user_id in room_users:
members.append(RoomMember(room["room_id"], user_id))
return members
def random_timestamp(dt):
"""Return a random datetime on the same day as `dt`. `dt` is assumed
to be a ``datetime.datetime``."""
start = dt.replace(hour=23, minute=59, second=59)
end = dt.replace(hour=0, minute=0, second=0)
max_timestamp = int(start.timestamp())
min_timestamp = int(end.timestamp())
timestamp = random.randrange(min_timestamp, max_timestamp)
return datetime.datetime.fromtimestamp(timestamp)
def age_band_from_dob(dob):
"""Return the ageband given a date of birth."""
today = datetime.date.today()
age = today.year - dob.year - ((today.month, today.day) < (dob.month, dob.day))
if age <= 15:
band = 1
elif age <= 25:
band = 2
elif age <= 35:
band = 3
elif age <= 45:
band = 4
elif age <= 55:
band = 5
elif age <= 65:
band = 6
elif age <= 75:
band = 7
elif age <= 85:
band = 8
elif age <= 95:
band = 9
elif age <= 105:
band = 10
else:
band = 11
return band
@dataclass
class UserPerformance:
user_id: int
gender: str
dob: datetime.datetime
age_band: int = field(init=False)
mu: int = field(init=False)
sigma: int = field(init=False)
week_number: int = field(init=False, default=-1)
activities: dict = field(default_factory=dict, init=False)
def __post_init__(self):
self.age_band = age_band_from_dob(self.dob)
self.mu = (
random.choice(PERFORMANCE_MEANS) * GENDER_PERFORMANCE[self.gender]
) * AGEBAND_PERFORMANCE[self.age_band]
self.sigma = random.choice(PERFORMANCE_SD)
def activity(self, room_id, timestamp: datetime.datetime) -> Activity:
"""Return a new activity for this user for the given timestamp."""
date = timestamp.date()
if date in self.activities:
# We've already generated an activity for this user on this day,
# but in a different room. Copy the activity to the new room.
return self.activities[date]._replace(room_id=room_id)
# Trend up or down when we start a new week.
week_number = timestamp.isocalendar()[1]
if self.week_number != week_number:
self.week_number = week_number
if self.mu < 3000:
# Don't let mean performance get too low.
self.mu *= max(PERFORMANCE_TRENDS)
else:
self.mu *= random.choice(PERFORMANCE_TRENDS)
performance = int(random.gauss(self.mu, self.sigma))
while performance < 1:
# Don't want negative steps.
performance = int(random.gauss(self.mu, self.sigma))
effort = random.randint(1, 10)
return Activity(
room_id=room_id,
user_id=self.user_id,
timestamp=timestamp,
performance=performance,
effort=effort,
)
def generate_activities(db):
"""Generate some activities representing users that have completed the
activity defined in a room."""
members = db.execute(
"SELECT room.room_id, room.created AS 'room_timestamp [timestamp]', "
"user.user_id, user.gender, user.dob AS 'dob [date]' "
"FROM room_member "
"JOIN room ON (room.room_id = room_member.room_id) "
"JOIN user ON (user.user_id = room_member.user_id) "
"WHERE departed is NULL"
)
# XXX: Assumes all activities are step counts for now.
activities = []
# We're trying to simulate performance trends for each user. We'll keep
# track of a user's performance with a map of user_ids to UserPerformance
# objects.
users_performance = {}
# XXX: There are no gaps. We're assuming all users have recorded their
# performance every day since the creation of the room. Even if they
# didn't join the room until later.
now = datetime.datetime.now()
for room_id, room_timestamp, user_id, gender, dob in members:
if user_id not in users_performance:
# First time we're seeing this user. Create a new UserPerformance
# object with a random (skewed) performance mean and standard
# deviation.
users_performance[user_id] = UserPerformance(user_id, gender, dob)
user = users_performance[user_id]
# Generate an activity for the current room and user for each day
# since the room was created.
max_delta = now - room_timestamp
for day in range(max_delta.days):
timestamp = random_timestamp(now - datetime.timedelta(days=day))
activities.append(user.activity(room_id, timestamp))
return activities
def pop_users(ids, n):
invitees = []
for _ in range(n):
try:
invitees.append(ids.pop())
except IndexError:
break
return invitees
def generate_buddies(db):
"""Generate some buddies representing users that have invited other users
to join a room."""
members = db.execute(
"SELECT room.room_id, room.owner_id, user_id "
"FROM room_member JOIN room "
"ON (room.room_id = room_member.room_id) "
"WHERE departed IS NULL "
"ORDER BY room_member.room_id"
)
# Rather than making multiple SQL queries, at least one for each room,
# we're grabbing them all in one go, then grouping them into rooms here.
room_ids = []
room_members = []
for key, group in itertools.groupby(members, key=lambda r: r["room_id"]):
room_ids.append(key)
room_members.append(list(group))
buddies = []
# We're trying to simulate a directed graph. The `buddy` table effectively
# being an adjacency list but with the added dimension of a room.
for room_id, room in zip(room_ids, room_members):
owner = room[0]["owner_id"]
queue = collections.deque([[owner]])
user_ids = [member["user_id"] for member in room]
if owner in user_ids:
user_ids.remove(owner)
random.shuffle(user_ids)
while queue:
for inviter in queue.popleft():
invitees = pop_users(user_ids, random.randrange(2, 5))
for invitee in invitees:
buddies.append(Buddy(room_id, inviter, invitee))
if invitees:
queue.append(invitees)
return buddies
def main(path_to_database, init_only=False, hash_iterations=HASH_ITERATIONS):
db = sqlite3.connect(
path_to_database,
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES,
)
db.row_factory = sqlite3.Row
init_db(db)
if not init_only:
users = generate_users(hash_iterations=hash_iterations)
insert_users(db, users)
rooms = generate_rooms(db)
insert_rooms(db, rooms)
room_members = generate_room_members(db)
insert_room_members(db, room_members)
activities = generate_activities(db)
insert_activities(db, activities)
buddies = generate_buddies(db)
insert_buddies(db, buddies)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Project database initialisation and mock data generation."
)
parser.add_argument(
"path",
help="Name or path to the SQLite database file.",
metavar="PATH",
)
parser.add_argument(
"--hash-iterations",
type=int,
default=HASH_ITERATIONS,
help=(
"The number of iterations used by the password "
f"hahsing algorithm. Defaults to {HASH_ITERATIONS}."
),
)
parser.add_argument(
"--version",
action="version",
version=f"%(prog)s v{__version__}",
)
parser.add_argument(
"--init-only",
action="store_true",
help="Initialise the database without generating mock data.",
)
parser.add_argument(
"-f",
"--force",
action="store_true",
help="Don't wait for confirmation before overriting an existing database.",
)
args = parser.parse_args()
path = pathlib.Path(args.path).with_suffix(".sqlite")
if not args.force and path.exists():
overwrite = input(
f"The database at '{path}' already exists. Overwrite (y/[n])? "
)
if overwrite.lower() not in ("y", "yes"):
sys.exit(1)
main(
str(path),
init_only=args.init_only,
hash_iterations=args.hash_iterations,
)
```
|
{
"source": "jgrprior/Gymtivity",
"score": 3
}
|
#### File: Gymtivity/landing/tests.py
```python
from http import HTTPStatus
from django.test import SimpleTestCase
from django.urls import reverse
class LandingPageTestCase(SimpleTestCase):
def test_landing_page_loads(self):
"""Test that the landing page loads."""
response = self.client.get(reverse("landing:index"))
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_landing_link_to_login(self):
"""Test that the landing page contains a link to the login page."""
response = self.client.get(reverse("landing:index"))
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertContains(response, "Login")
def test_landing_link_to_registration(self):
"""Test that the landing page contains a link to the sign up page."""
response = self.client.get(reverse("landing:index"))
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertContains(response, "Sign Up")
```
#### File: project/views/generic.py
```python
from django.http import JsonResponse
from project.db.models import SerializableModel
class JSONResponseMixin:
"""A mixin that can be used to render a JSON response."""
def render_to_json_response(self, context, **response_kwargs):
"""Returns a JSON response, transforming 'context' to make the payload."""
return JsonResponse(
self.get_data(context),
json_dumps_params={"indent": 2},
**response_kwargs,
)
def get_data(self, context):
"""Returns an object that will be serialized as JSON by json.dumps()."""
context_object_name = getattr(self, "context_object_name")
context_object = context.get(context_object_name)
if isinstance(context_object, SerializableModel):
data = context_object.serialize()
else:
data = [obj.serialize() for obj in context_object]
# TODO: Add status code
return {"data": {context_object_name: data}}
```
|
{
"source": "jgrss/cultionet",
"score": 2
}
|
#### File: cultionet/scripts/create_train_files.py
```python
import argparse
import logging
from pathlib import Path
from cultionet.utils.project_paths import setup_paths
import geowombat as gw
import numpy as np
import geopandas as gpd
from shapely.geometry import Polygon
logger = logging.getLogger(__name__)
class TemporalStats(gw.TimeModule):
def __init__(self):
super(TemporalStats, self).__init__()
self.count = 3
self.dtype = 'uint16'
@staticmethod
def nan_to_num(array) -> np.ndarray:
return np.nan_to_num(array, nan=0.0, posinf=0.0, neginf=0.0)
def calculate(self, array):
array_mean = self.nan_to_num(array.mean(axis=0).squeeze())
array_max = self.nan_to_num(array.max(axis=0).squeeze())
array_cv = (self.nan_to_num(array.std(axis=0).squeeze() / array_mean) * 10000.0).clip(0, 10000)
return np.stack((array_mean, array_max, array_cv))
def create_train_files(args):
ppaths = setup_paths(args.project_path)
region_ids = args.regions.split('-')
region_ids = list(map(int, region_ids))
if len(region_ids) > 1:
region_ids = list(range(region_ids[0], region_ids[1]+1))
for region in region_ids:
for var in args.image_vars:
image_path = Path(args.project_path) / f'{region:06d}' / 'brdf_ts' / 'ms' / var
image_list = list(image_path.glob('*.tif'))
image_year = int(image_list[0].name[:4]) + 1
# File names
grids = ppaths.edge_training_path / f'{region:06d}_grid_{image_year}.gpkg'
edges = ppaths.edge_training_path / f'{region:06d}_poly_{image_year}.gpkg'
view_bands_path = ppaths.edge_training_path.parent / 'view_images'
view_bands_path.mkdir(parents=True, exist_ok=True)
view_bands = view_bands_path / f'{region:06d}_view_{image_year}.tif'
if not view_bands.is_file():
with gw.series(image_list, transfer_lib='numpy') as src:
src.apply(
TemporalStats(),
bands=-1,
processes=False,
num_workers=4,
outfile=str(view_bands)
)
if not grids.is_file():
with gw.open(image_list[0], chunks=512) as src:
grid_df = src.gw.geodataframe
grid_df['grid'] = 0
left, bottom, right, top = grid_df.total_bounds.tolist()
geom = Polygon([(left, top),
(left, top),
(left, top),
(left, top),
(left, top)])
edge_df = gpd.GeoDataFrame(
data=[0], columns=['class'], geometry=[geom], crs=grid_df.crs
)
edge_df.to_file(edges, driver='GPKG')
grid_df.to_file(grids, driver='GPKG')
def main():
parser = argparse.ArgumentParser(description='Creates edge and grid files for training',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-v', '--image-vars', dest='image_vars', help='The time series variables', default=None, nargs='+'
)
parser.add_argument('-p', '--project-path', dest='project_path', help='The NNet project path', default=None)
parser.add_argument('--regions', dest='regions', help='The region ids (e.g., 1-10)', default=None)
args = parser.parse_args()
create_train_files(args)
if __name__ == '__main__':
main()
```
#### File: cultionet/data/datasets.py
```python
import typing as T
from pathlib import Path
import random
import logging
from ..errors import TensorShapeError
import attr
import torch
from torch_geometric.data import Data, Dataset
ATTRVINSTANCE = attr.validators.instance_of
ATTRVIN = attr.validators.in_
ATTRVOPTIONAL = attr.validators.optional
@attr.s
class EdgeDataset(Dataset):
"""An edge dataset
"""
root: T.Union[str, Path, bytes] = attr.ib(default='.')
transform: T.Any = attr.ib(default=None)
pre_transform: T.Any = attr.ib(default=None)
data_means: T.Optional[torch.Tensor] = attr.ib(validator=ATTRVOPTIONAL(ATTRVINSTANCE(torch.Tensor)), default=None)
data_stds: T.Optional[torch.Tensor] = attr.ib(validator=ATTRVOPTIONAL(ATTRVINSTANCE(torch.Tensor)), default=None)
pattern: T.Optional[str] = attr.ib(validator=ATTRVOPTIONAL(ATTRVINSTANCE(str)), default='data*.pt')
data_list_ = None
def __attrs_post_init__(self):
super(EdgeDataset, self).__init__(
str(self.root), transform=self.transform, pre_transform=self.pre_transform
)
def get_data_list(self):
"""Gets the list of data files"""
self.data_list_ = list(Path(self.processed_dir).glob(self.pattern))
def shuffle_items(self):
"""Applies a random in-place shuffle to the data list"""
random.shuffle(self.data_list_)
@property
def num_time_features(self):
"""Get the number of time features
"""
data = self[0]
return int(data.nbands)
@property
def raw_file_names(self):
"""Get the raw file names
"""
if not self.data_list_:
self.get_data_list()
return self.data_list_
def download(self):
pass
def process(self):
pass
@property
def processed_file_names(self):
"""Get a list of processed files"""
return self.data_list_
def check_dims(self):
"""Checks if all tensors in the dataset match in shape dimensions
"""
ref_dim = self[0].x.shape
for i in range(1, len(self)):
if self[i].x.shape != ref_dim:
raise TensorShapeError(f'{Path(self.data_list_[i]).name} does not match the reference.')
def len(self):
"""Returns the dataset length"""
return len(self.processed_file_names)
def split_train_val(self, val_frac: float) -> T.Tuple['EdgeDataset', 'EdgeDataset']:
"""Splits the dataset into train and validation
Args:
val_frac (float): The validation fraction.
Returns:
train dataset, validation dataset
"""
n_train = int(len(self) * (1-val_frac))
self.shuffle_items()
train_ds = self[:n_train]
val_ds = self[n_train:]
return train_ds, val_ds
def normalize(self, batch: Data) -> Data:
"""Normalizes data to z-scores
Args:
batch (Data): A `torch_geometric` data object.
z-scores:
z = (x - mean) / std
Returns:
A `torch_geometric` data object.
"""
def add_dims(d: torch.Tensor) -> torch.Tensor:
return d.unsqueeze(0)
x = torch.cat([
(batch.x[:, :-1] - add_dims(self.data_means)) / add_dims(self.data_stds),
batch.x[:, -1][:, None]
], dim=1)
norm_batch = Data(x=x, **{k: getattr(batch, k) for k in batch.keys if k != 'x'})
return norm_batch
def get(self, idx):
"""Gets an individual data object from the dataset
Args:
idx (int): The dataset index position.
Returns:
A `torch_geometric` data object.
"""
if isinstance(self.data_means, torch.Tensor):
batch = torch.load(Path(self.processed_dir) / self.data_list_[idx])
return self.normalize(batch)
else:
return torch.load(Path(self.processed_dir) / self.data_list_[idx])
```
#### File: cultionet/data/modules.py
```python
import typing as T
from .datasets import EdgeDataset
import pytorch_lightning as pl
from torch_geometric.data import DataLoader
class EdgeDataModule(pl.LightningDataModule):
"""A Lightning data module
"""
def __init__(
self,
train_ds: T.Optional[EdgeDataset] = None,
val_ds: T.Optional[EdgeDataset] = None,
test_ds: T.Optional[EdgeDataset] = None,
predict_ds: T.Optional[EdgeDataset] = None,
batch_size: int = 5,
num_workers: int = 0,
shuffle: bool = True
):
super().__init__()
self.train_ds = train_ds
self.val_ds = val_ds
self.test_ds = test_ds
self.predict_ds = predict_ds
self.batch_size = batch_size
self.num_workers = num_workers
self.shuffle = shuffle
def train_dataloader(self):
"""Returns a data loader for train data
"""
return DataLoader(
self.train_ds,
batch_size=self.batch_size,
shuffle=self.shuffle,
num_workers=self.num_workers
)
def val_dataloader(self):
"""Returns a data loader for validation data
"""
return DataLoader(
self.val_ds,
batch_size=self.batch_size,
shuffle=self.shuffle,
num_workers=self.num_workers
)
def test_dataloader(self):
"""Returns a data loader for test data
"""
return DataLoader(
self.test_ds,
batch_size=self.batch_size,
shuffle=self.shuffle,
num_workers=self.num_workers
)
def predict_dataloader(self):
"""Returns a data loader for predict data
"""
return DataLoader(
self.predict_ds,
batch_size=self.batch_size,
shuffle=self.shuffle,
num_workers=self.num_workers
)
```
#### File: cultionet/models/refinement.py
```python
from . import model_utils
import torch
class RefineConv(torch.nn.Module):
"""A refinement convolution layer
"""
def __init__(self, in_channels: int, mid_channels: int, out_channels: int, dropout: float = 0.1):
super(RefineConv, self).__init__()
self.gc = model_utils.GraphToConv()
self.cg = model_utils.ConvToGraph()
self.seq = torch.nn.Sequential(
torch.nn.Conv2d(in_channels, mid_channels, kernel_size=1, padding=0, bias=False, padding_mode='replicate'),
torch.nn.BatchNorm2d(mid_channels),
torch.nn.ELU(alpha=0.1, inplace=False),
torch.nn.Conv2d(mid_channels, mid_channels, kernel_size=3, padding=1, bias=False, padding_mode='replicate'),
torch.nn.BatchNorm2d(mid_channels),
torch.nn.Dropout(dropout),
torch.nn.Conv2d(mid_channels, mid_channels, kernel_size=3, padding=1, bias=False, padding_mode='replicate'),
torch.nn.BatchNorm2d(mid_channels),
torch.nn.Dropout(dropout),
torch.nn.Conv2d(mid_channels, out_channels, kernel_size=1, padding=0, bias=False, padding_mode='replicate')
)
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def forward(self, x: torch.Tensor, batch_size: torch.Tensor, height: int, width: int) -> torch.Tensor:
x = self.gc(
x,
batch_size,
height,
width
)
x = self.seq(x)
return self.cg(x)
```
#### File: cultionet/utils/project_paths.py
```python
from pathlib import Path
from dataclasses import dataclass
import shutil
import typing as T
@dataclass
class ProjectPaths:
project_path: Path
image_path: Path
composite_path: Path
proba_path: Path
figure_path: Path
data_path: Path
process_path: Path
ckpt_path: Path
train_path: Path
predict_path: Path
edge_training_path: Path
ckpt_file: Path
loss_file: Path
norm_file: Path
def remove_train_path(self):
if self.process_path.is_dir():
for fn in self.process_path.glob('*.pt'):
fn.unlink()
shutil.rmtree(str(self.process_path))
self.process_path.mkdir(exist_ok=True, parents=True)
def setup_paths(project_path: T.Union[str, Path, bytes], append_ts: T.Optional[bool] = True) -> ProjectPaths:
project_path = Path(project_path)
image_path = project_path / 'time_series_vars' if append_ts else project_path
composite_path = project_path.parent / 'composites'
proba_path = project_path.parent / 'composites_probas'
figure_path = project_path / 'figures'
data_path = project_path / 'data'
ckpt_path = project_path / 'ckpt'
train_path = data_path / 'train'
process_path = train_path / 'processed'
predict_path = data_path / 'predict'
edge_training_path = project_path / 'user_train'
ckpt_file = ckpt_path / 'last.ckpt'
loss_file = ckpt_path / 'losses.npy'
norm_file = ckpt_path / 'last.norm'
for p in [proba_path, figure_path, data_path, process_path, ckpt_path, train_path, edge_training_path]:
p.mkdir(exist_ok=True, parents=True)
return ProjectPaths(
project_path=project_path,
image_path=image_path,
composite_path=composite_path,
proba_path=proba_path,
figure_path=figure_path,
data_path=data_path,
process_path=process_path,
ckpt_path=ckpt_path,
train_path=train_path,
predict_path=predict_path,
edge_training_path=edge_training_path,
ckpt_file=ckpt_file,
loss_file=loss_file,
norm_file=norm_file
)
```
#### File: cultionet/tests/test_dataset.py
```python
import os
from pathlib import Path
from .data import load_data
from cultionet.data.datasets import EdgeDataset
from cultionet.utils.project_paths import setup_paths
import torch
import pytest
project_path = Path(os.path.abspath(os.path.dirname(__file__)))
ppaths = setup_paths(project_path)
ds = EdgeDataset(ppaths.train_path)
data = next(iter(ds))
loaded_data = load_data()
def test_load():
assert torch.allclose(data.x, loaded_data.x)
assert torch.allclose(data.y, loaded_data.y)
def test_ds_type():
assert isinstance(ds, EdgeDataset)
def test_ds_len():
assert len(ds) == 1
def test_x_type():
assert isinstance(data.x, torch.Tensor)
def test_x_shape():
assert data.x.shape == (10000, 39)
def test_y_shape():
assert data.y.shape == (10000,)
def test_nbands_attr():
assert data.nbands == 13
def test_image_shape():
assert data.height == 100
assert data.width == 100
```
|
{
"source": "jgrss/geowombat",
"score": 2
}
|
#### File: geowombat/radiometry/angles.py
```python
import os
from pathlib import Path
import fnmatch
import subprocess
from collections import namedtuple
import tarfile
import logging
from ..handler import add_handler
import numpy as np
import xarray as xr
import rasterio as rio
from rasterio.crs import CRS
# from rasterio.warp import reproject, Resampling
from affine import Affine
import xml.etree.ElementTree as ET
try:
import cv2
OPENCV_INSTALLED = True
except:
OPENCV_INSTALLED = False
logger = logging.getLogger(__name__)
logger = add_handler(logger)
def shift_objects(data,
solar_za,
solar_az,
sensor_za,
sensor_az,
h,
num_workers):
"""
Shifts objects along x and y dimensions
Args:
data (DataArray): The data to shift.
solar_za (DataArray): The solar zenith angle.
solar_az (DataArray): The solar azimuth angle.
sensor_za (DataArray): The sensor, or view, zenith angle.
sensor_az (DataArray): The sensor, or view, azimuth angle.
h (float): The object height.
num_workers (Optional[int]): The number of dask workers.
Returns:
``xarray.DataArray``
"""
# Scale the angles to degrees
sza = solar_za * 0.01
sza.coords['band'] = [1]
saa = solar_az * 0.01
saa.coords['band'] = [1]
vza = sensor_za * 0.01
vza.coords['band'] = [1]
vaa = sensor_az * 0.01
vaa.coords['band'] = [1]
# Convert to radians
rad_sza = np.deg2rad(sza)
rad_saa = np.deg2rad(saa)
rad_vza = np.deg2rad(vza)
rad_vaa = np.deg2rad(vaa)
apparent_solar_az = np.pi + np.arctan((np.sin(rad_saa) * np.tan(rad_sza) - np.sin(rad_vaa) * np.tan(rad_vza)) /
(np.cos(rad_saa) * np.tan(rad_sza) - np.cos(rad_vaa) * np.tan(rad_vza)))
# Maximum horizontal distance
d = (h**2 * ((np.sin(rad_saa) * np.tan(rad_sza) - np.sin(rad_vaa) * np.tan(rad_vza))**2 +
(np.cos(rad_saa) * np.tan(rad_sza) - np.cos(rad_vaa) * np.tan(rad_vza))**2))**0.5
# Convert the polar angle to cartesian offsets
x = int((np.cos(apparent_solar_az) * d).max(skipna=True).data.compute(num_workers=num_workers))
y = int((np.sin(apparent_solar_az) * d).max(skipna=True).data.compute(num_workers=num_workers))
return data.shift(shifts={'x': x, 'y': y}, fill_value=0)
def estimate_cloud_shadows(data,
clouds,
solar_za,
solar_az,
sensor_za,
sensor_az,
heights=None,
num_workers=1):
"""
Estimates shadows from a cloud mask and adds to the existing mask
Args:
data (DataArray): The wavelengths, scaled 0-1.
clouds (DataArray): The cloud mask, where clouds=1 and clear sky=0.
solar_za (DataArray): The solar zenith angle.
solar_az (DataArray): The solar azimuth angle.
sensor_za (DataArray): The sensor, or view, zenith angle.
sensor_az (DataArray): The sensor, or view, azimuth angle.
heights (Optional[list]): The cloud heights, in kilometers.
num_workers (Optional[int]): The number of dask workers.
Returns:
``xarray.DataArray``
References:
For the angle offset calculations, see :cite:`fisher_2014`.
For the shadow test, see :cite:`sun_etal_2018`.
"""
attrs = data.attrs.copy()
if not heights:
heights = list(range(200, 1400, 200))
shadows = None
for h in heights:
potential_shadows = shift_objects(clouds,
solar_za,
solar_az,
sensor_za,
sensor_az,
h,
num_workers)
if not isinstance(shadows, xr.DataArray):
shadows = xr.where((data.sel(band='nir') < 0.25) & (data.sel(band='swir1') < 0.11) & (potential_shadows.sel(band='mask') == 1), 1, 0)
else:
shadows = xr.where(((data.sel(band='nir') < 0.25) & (data.sel(band='swir1') < 0.11) & (potential_shadows.sel(band='mask') == 1)) | shadows.sel(band='mask') == 1, 1, 0)
shadows = shadows.expand_dims(dim='band')
# Add the shadows to the cloud mask
data = xr.where(clouds.sel(band='mask') == 1, 1, xr.where(shadows.sel(band='mask') == 1, 2, 0))
data = data.expand_dims(dim='band')
data.attrs = attrs
return data
def scattering_angle(cos_sza, cos_vza, sin_sza, sin_vza, cos_raa):
"""
Calculates the scattering angle
Args:
cos_sza (DataArray): The cosine of the solar zenith angle.
cos_vza (DataArray): The cosine of the view zenith angle.
sin_sza (DataArray): The sine of the solar zenith angle.
sin_vza (DataArray): The sine of the view zenith angle.
cos_raa (DataArray): The cosine of the relative azimuth angle.
Equation:
.. math::
\Theta = scattering angle
\theta_0 = solar zenith angle
\theta_S = sensor zenith angle
\zeta = relative azimuth angle
\Theta_s = \arccos{- \cos{\theta_0} \cos{\theta_S} - \sin{\theta_0} \sin{\theta_S} \cos{\zeta}}
References:
scattering angle = the angle between the direction of incident and scattered radiation
Liu, CH and <NAME> (2009) AEROSOL OPTICAL DEPTH RETRIEVAL FOR SPOT HRV IMAGES, Journal of Marine Science and Technology
http://stcorp.github.io/harp/doc/html/algorithms/derivations/scattering_angle.html
Returns:
Scattering angle (in radians) as an ``xarray.DataArray``
"""
scattering_angle = xr.ufuncs.arccos(-cos_sza * cos_vza - sin_sza * sin_vza * cos_raa)
return xr.ufuncs.cos(scattering_angle) ** 2
def relative_azimuth(saa, vaa):
"""
Calculates the relative azimuth angle
Args:
saa (DataArray): The solar azimuth angle (in degrees).
vaa (DataArray): The view azimuth angle (in degrees).
Reference:
http://stcorp.github.io/harp/doc/html/algorithms/derivations/relative_azimuth_angle.html
Returns:
Relative azimuth (in degrees) as an ``xarray.DataArray``
"""
# Relative azimuth (in radians)
raa = xr.ufuncs.deg2rad(saa - vaa)
# Create masks
raa_plus = xr.where(raa >= 2.0*np.pi, 1, 0)
raa_minus = xr.where(raa < 0, 1, 0)
# raa = xr.where(raa_plus == 1, raa + (2.0*np.pi), raa)
# raa = xr.where(raa_minus == 1, raa - (2.0*np.pi), raa)
raa = xr.where(raa_plus == 1, raa - (2.0 * np.pi), raa)
raa = xr.where(raa_minus == 1, raa + (2.0 * np.pi), raa)
return xr.ufuncs.fabs(xr.ufuncs.rad2deg(raa))
def get_sentinel_sensor(metadata):
# Parse the XML file
tree = ET.parse(metadata)
root = tree.getroot()
for child in root:
if 'general_info' in child.tag[-14:].lower():
general_info = child
for ginfo in general_info:
if ginfo.tag == 'TILE_ID':
file_name = ginfo.text
return file_name[:3].lower()
def parse_sentinel_angles(metadata, proc_angles, nodata):
"""
Gets the Sentinel-2 solar angles from metadata
Args:
metadata (str): The metadata file.
proc_angles (str): The angles to parse. Choices are ['solar', 'view'].
nodata (int or float): The 'no data' value.
Returns:
zenith and azimuth angles as a ``tuple`` of 2d ``numpy`` arrays
"""
if proc_angles == 'view':
zenith_values = np.zeros((13, 23, 23), dtype='float64') + nodata
azimuth_values = np.zeros((13, 23, 23), dtype='float64') + nodata
else:
zenith_values = np.zeros((23, 23), dtype='float64') + nodata
azimuth_values = np.zeros((23, 23), dtype='float64') + nodata
view_tag = 'Sun_Angles_Grid' if proc_angles == 'solar' else 'Viewing_Incidence_Angles_Grids'
# Parse the XML file
tree = ET.parse(metadata)
root = tree.getroot()
# Find the angles
for child in root:
if child.tag.split('}')[-1] == 'Geometric_Info':
geoinfo = child
break
for segment in geoinfo:
if segment.tag == 'Tile_Angles':
angles = segment
for angle in angles:
if angle.tag == view_tag:
if proc_angles == 'view':
band_id = int(angle.attrib['bandId'])
for bset in angle:
if bset.tag == 'Zenith':
zenith = bset
if bset.tag == 'Azimuth':
azimuth = bset
for field in zenith:
if field.tag == 'Values_List':
zvallist = field
for field in azimuth:
if field.tag == 'Values_List':
avallist = field
for rindex in range(len(zvallist)):
zvalrow = zvallist[rindex]
avalrow = avallist[rindex]
zvalues = zvalrow.text.split(' ')
avalues = avalrow.text.split(' ')
values = list(zip(zvalues, avalues))
for cindex in range(len(values)):
if (values[cindex][0].lower() != 'nan') and (values[cindex][1].lower() != 'nan'):
ze = float(values[cindex][0])
az = float(values[cindex][1])
if proc_angles == 'view':
zenith_values[band_id, rindex, cindex] = ze
azimuth_values[band_id, rindex, cindex] = az
else:
zenith_values[rindex, cindex] = ze
azimuth_values[rindex, cindex] = az
return zenith_values, azimuth_values
def sentinel_pixel_angles(metadata,
ref_file,
outdir='.',
nodata=-32768,
overwrite=False,
verbose=0):
"""
Generates Sentinel pixel angle files
Args:
metadata (str): The metadata file.
ref_file (str): A reference image to use for geo-information.
outdir (Optional[str])): The output directory to save the angle files to.
nodata (Optional[int or float]): The 'no data' value.
overwrite (Optional[bool]): Whether to overwrite existing angle files.
verbose (Optional[int]): The verbosity level.
References:
https://www.sentinel-hub.com/faq/how-can-i-access-meta-data-information-sentinel-2-l2a
https://github.com/marujore/sentinel_angle_bands/blob/master/sentinel2_angle_bands.py
Returns:
zenith and azimuth angles as a ``namedtuple`` of angle file names
"""
if not OPENCV_INSTALLED:
logger.exception('OpenCV must be installed.')
AngleInfo = namedtuple('AngleInfo', 'vza vaa sza saa sensor')
sza, saa = parse_sentinel_angles(metadata, 'solar', nodata)
vza, vaa = parse_sentinel_angles(metadata, 'view', nodata)
sensor_name = get_sentinel_sensor(metadata)
with rio.open(ref_file) as src:
profile = src.profile.copy()
ref_height = src.height
ref_width = src.width
ref_extent = src.bounds
profile.update(transform=Affine(src.res[0], 0.0, ref_extent.left, 0.0, -src.res[1], ref_extent.top),
height=ref_height,
width=ref_width,
nodata=-32768,
dtype='int16',
count=1,
driver='GTiff',
tiled=True,
compress='lzw')
ref_base = '_'.join(os.path.basename(ref_file).split('_')[:-1])
opath = Path(outdir)
opath.mkdir(parents=True, exist_ok=True)
# Set output angle file names.
sensor_azimuth_file = opath.joinpath(ref_base + '_sensor_azimuth.tif').as_posix()
sensor_zenith_file = opath.joinpath(ref_base + '_sensor_zenith.tif').as_posix()
solar_azimuth_file = opath.joinpath(ref_base + '_solar_azimuth.tif').as_posix()
solar_zenith_file = opath.joinpath(ref_base + '_solar_zenith.tif').as_posix()
for angle_array, angle_file in zip([vaa,
vza,
saa,
sza],
[sensor_azimuth_file,
sensor_zenith_file,
solar_azimuth_file,
solar_zenith_file]):
pfile = Path(angle_file)
if overwrite:
if pfile.is_file():
pfile.unlink()
if not pfile.is_file():
# TODO: write data for each band?
if len(angle_array.shape) > 2:
angle_array = angle_array.mean(axis=0)
with rio.open(angle_file, mode='w', **profile) as dst:
if verbose > 0:
logger.info(' Writing {} to file ...'.format(angle_file))
# Resample and scale
angle_array_resamp = np.int16(cv2.resize(angle_array,
(0, 0),
fy=ref_height / angle_array.shape[0],
fx=ref_width / angle_array.shape[1],
interpolation=cv2.INTER_LINEAR) / 0.01)
dst.write(angle_array_resamp, indexes=1)
return AngleInfo(vaa=str(sensor_azimuth_file),
vza=str(sensor_zenith_file),
saa=str(solar_azimuth_file),
sza=str(solar_zenith_file),
sensor=sensor_name)
# Potentially useful for angle creation
# https://github.com/gee-community/gee_tools/blob/master/geetools/algorithms.py
# def slope_between(a, b):
# return (a[1] - b[1]) / (a[0] - b[0])
#
#
# @nb.jit
# def _calc_sensor_angles(data,
# zenith_angles,
# azimuth_angles,
# yvalues,
# xvalues,
# celly,
# cellx,
# satellite_height,
# nodata,
# acquisition_date):
#
# """
# Calculates sensor zenith and azimuth angles
# """
#
# slope = slope_between(np.array([data.gw.meta.right + ((data.gw.ncols/2.0)*data.gw.cellx), data.gw.meta.top]),
# np.array([data.gw.meta.left, data.gw.meta.top - ((data.gw.nrows / 2.0) * data.gw.celly)]))
#
# slope_perc = -1.0 / slope
#
# view_az = (math.pi / 2.0) - math.arctan(slope_perc)
#
# for i in range(0, yvalues.shape[0]):
#
# for j in range(0, xvalues.shape[0]):
#
# if data_band[i, j] != nodata:
#
# # TODO: calculate satellite drift angle
# dist_from_nadir = None
#
# # Calculate the distance from the current location to the satellite
# dist_to_satellite = np.hypot(satellite_height, dist_from_nadir)
#
# # Calculate the view angle
#
# zenith_angles[i, j]
#
# # Solar zenith angle = 90 - elevation angle scaled to integer range
# zenith_angles[i, j] = (90.0 - get_altitude_fast(xvalues[j], yvalues[i], acquisition_date)) / 0.01
#
# # Solar azimuth angle
# azimuth_angles[i, j] = float(get_azimuth_fast(xvalues[j], yvalues[i], acquisition_date)) / 0.01
#
# return zenith_angles, azimuth_angles
# @nb.jit
# def _calc_solar_angles(data_band, zenith_angles, azimuth_angles, yvalues, xvalues, nodata, acquisition_date):
#
# """
# Calculates solar zenith and azimuth angles
# """
#
# for i in range(0, yvalues):
#
# for j in range(0, xvalues):
#
# if data_band[i, j] != nodata:
#
# # Solar zenith angle = 90 - elevation angle scaled to integer range
# zenith_angles[i, j] = (90.0 - get_altitude_fast(xvalues[j], yvalues[i], acquisition_date)) / 0.01
#
# # Solar azimuth angle
# azimuth_angles[i, j] = float(get_azimuth_fast(xvalues[j], yvalues[i], acquisition_date)) / 0.01
#
# return zenith_angles, azimuth_angles
# def pixel_angles(data, band, nodata, meta):
#
# """
# Generates pixel zenith and azimuth angles
#
# Args:
# data (Xarray): The data with coordinate and transform attributes.
# band (int or str): The ``data`` band to use for masking.
# nodata (int or float): The 'no data' value in ``data``.
# meta (namedtuple): The metadata file. Should have image acquisition year, month, day and hour attributes.
# """
#
# acquisition_date = dtime(meta.year, meta.month, meta.day, meta.hour, 0, 0, 0, tzinfo=datetime.timezone.utc)
#
# yvalues = data.y.values
# xvalues = data.x.values
#
# data_band = data.sel(band=band).data.compute()
# sze = np.zeros((data.gw.nrows, data.gw.ncols), dtype='int16') - 32768
# saa = np.zeros((data.gw.nrows, data.gw.ncols), dtype='int16') - 32768
#
# sze, saa = _calc_solar_angles(data_band, sze, saa, yvalues, xvalues, nodata, acquisition_date)
#
# sze_attrs = data.attrs.copy()
# saa_attrs = data.attrs.copy()
#
# sze_attrs['values'] = 'Solar zenith angle'
# sze_attrs['scale_factor'] = 0.01
#
# saa_attrs['values'] = 'Solar azimuth angle'
# sze_attrs['scale_factor'] = 0.01
#
# szex = xr.DataArray(data=da.from_array(sze[np.newaxis, :, :],
# chunks=(1, data.gw.row_chunks, data.gw.col_chunks)),
# coords={'band': 'sze',
# 'y': data.y,
# 'x': data.x},
# dims=('band', 'y', 'x'),
# attrs=sze_attrs)
#
# saax = xr.DataArray(data=da.from_array(saa[np.newaxis, :, :],
# chunks=(1, data.gw.row_chunks, data.gw.col_chunks)),
# coords={'band': 'saa',
# 'y': data.y,
# 'x': data.x},
# dims=('band', 'y', 'x'),
# attrs=saa_attrs)
#
# return szex, saax
def landsat_pixel_angles(angles_file,
ref_file,
out_dir,
sensor,
l57_angles_path=None,
l8_angles_path=None,
subsample=1,
resampling='bilinear',
num_threads=1,
verbose=0):
"""
Generates Landsat pixel angle files
Args:
angles_file (str): The angles file.
ref_file (str): A reference file.
out_dir (str): The output directory.
sensor (str): The sensor.
l57_angles_path (str): The path to the Landsat 5 and 7 angles bin.
l8_angles_path (str): The path to the Landsat 8 angles bin.
subsample (Optional[int]): The sub-sample factor when calculating the angles.
resampling (Optional[str]): The resampling method if ``filename`` is a ``list``.
Choices are ['average', 'bilinear', 'cubic', 'cubic_spline', 'gauss', 'lanczos', 'max', 'med', 'min', 'mode', 'nearest'].
num_threads (Optional[int]): The number of threads to pass to ``rasterio.warp.reproject``.
verbose (Optional[int]): The verbosity level.
Returns:
zenith and azimuth angles as a ``namedtuple`` of angle file names
"""
if not l57_angles_path:
gw_bin = os.path.realpath(os.path.dirname(__file__))
gw_out = os.path.realpath(Path(gw_bin).joinpath('../bin').as_posix())
gw_tar = os.path.realpath(Path(gw_bin).joinpath('../bin/ESPA.tar.gz').as_posix())
if not Path(gw_bin).joinpath('../bin/ESPA').is_dir():
with tarfile.open(gw_tar, mode='r:gz') as tf:
tf.extractall(gw_out)
l57_angles_path = Path(gw_out).joinpath('ESPA/landsat_angles').as_posix()
l8_angles_path = Path(gw_out).joinpath('ESPA/l8_angles').as_posix()
AngleInfo = namedtuple('AngleInfo', 'vza vaa sza saa')
# Setup the angles name.
# example file = LE07_L1TP_225098_20160911_20161008_01_T1_sr_band1.tif
with rio.open(ref_file) as src:
ref_res = src.res
ref_height = src.height
ref_width = src.width
ref_extent = src.bounds
ref_base = '_'.join(os.path.basename(ref_file).split('_')[:-1])
opath = Path(out_dir)
opath.mkdir(parents=True, exist_ok=True)
# Set output angle file names.
sensor_azimuth_file = opath.joinpath(ref_base + '_sensor_azimuth.tif').as_posix()
sensor_zenith_file = opath.joinpath(ref_base + '_sensor_zenith.tif').as_posix()
solar_azimuth_file = opath.joinpath(ref_base + '_solar_azimuth.tif').as_posix()
solar_zenith_file = opath.joinpath(ref_base + '_solar_zenith.tif').as_posix()
if not Path(sensor_azimuth_file).is_file():
# Setup the command.
if sensor.lower() in ['l5', 'l7']:
angle_command = '{PATH} {META} -s {SUBSAMP:d} -b 1'.format(PATH=str(Path(l57_angles_path).joinpath('landsat_angles')),
META=angles_file,
SUBSAMP=subsample)
# 1=zenith, 2=azimuth
out_order = dict(azimuth=2, zenith=1)
# out_order = [2, 1, 2, 1]
else:
angle_command = '{PATH} {META} BOTH {SUBSAMP:d} -f -32768 -b 4'.format(PATH=str(Path(l8_angles_path).joinpath('l8_angles')),
META=angles_file,
SUBSAMP=subsample)
# 1=azimuth, 2=zenith
out_order = dict(azimuth=1, zenith=2)
# out_order = [1, 2, 1, 2]
os.chdir(out_dir)
if verbose > 0:
logger.info(' Generating pixel angles ...')
# Create the angle files.
subprocess.call(angle_command, shell=True)
# Get angle data from 1 band.
sensor_angles = fnmatch.filter(os.listdir(out_dir), '*sensor_B04.img')[0]
solar_angles = fnmatch.filter(os.listdir(out_dir), '*solar_B04.img')[0]
sensor_angles_fn_in = opath.joinpath(sensor_angles).as_posix()
solar_angles_fn_in = opath.joinpath(solar_angles).as_posix()
# Convert the data
for in_angle, out_angle, band_pos in zip([sensor_angles_fn_in,
sensor_angles_fn_in,
solar_angles_fn_in,
solar_angles_fn_in],
[sensor_azimuth_file,
sensor_zenith_file,
solar_azimuth_file,
solar_zenith_file],
['azimuth',
'zenith',
'azimuth',
'zenith']):
new_res = subsample*ref_res[0]
# Update the .hdr file
with open(in_angle + '.hdr', mode='r') as txt:
lines = txt.readlines()
for lidx, line in enumerate(lines):
if line.startswith('map info'):
lines[lidx] = line.replace('30.000, 30.000', f'{new_res:.3f}, {new_res:.3f}')
Path(in_angle + '.hdr').unlink()
with open(in_angle + '.hdr', mode='w') as txt:
txt.writelines(lines)
with rio.open(in_angle) as src:
profile = src.profile.copy()
epsg = src.crs.to_epsg()
# Adjust Landsat images in the Southern hemisphere
if str(epsg).startswith('326') and (ref_extent.top < 0):
transform = Affine(new_res, 0.0, ref_extent.left, 0.0, -new_res, ref_extent.top+10_000_000.0)
crs = CRS.from_epsg(f'327{str(epsg)[3:]}')
else:
transform = Affine(new_res, 0.0, ref_extent.left, 0.0, -new_res, ref_extent.top)
crs = src.crs
profile.update(transform=transform,
crs=crs,
height=src.height,
width=src.width,
nodata=-32768,
dtype='int16',
count=1,
driver='GTiff',
tiled=True,
compress='lzw')
# src_band = rio.Band(src, out_order[band_pos], 'int16', (src.height, src.width))
with rio.open(out_angle, mode='w', **profile) as dst:
dst.write(src.read(out_order[band_pos]),
indexes=1)
# dst_band = rio.Band(dst, 1, 'int16', (dst.height, dst.width))
#
# reproject(src_band,
# destination=dst_band,
# resampling=getattr(Resampling, resampling),
# num_threads=num_threads)
os.chdir(os.path.expanduser('~'))
return AngleInfo(vaa=str(sensor_azimuth_file),
vza=str(sensor_zenith_file),
saa=str(solar_azimuth_file),
sza=str(solar_zenith_file))
```
#### File: geowombat/radiometry/mask.py
```python
from ..errors import logger
from ..core import ndarray_to_xarray
from ..core import norm_diff
import numpy as np
import xarray as xr
try:
from s2cloudless import S2PixelCloudDetector
S2CLOUDLESS_INSTALLED = True
except:
S2CLOUDLESS_INSTALLED = False
def estimate_shadows(data,
cloud_mask,
solar_zenith,
solar_azimuth,
cloud_heights,
nodata,
scale_factor,
num_workers):
"""
Estimates shadows from masked clouds, solar angle, and solar azimuth
Args:
data (DataArray)
cloud_mask (DataArray)
solar_zenith (DataArray)
solar_azimuth (DataArray)
cloud_heights (list)
nodata (int | float)
scale_factor (float)
num_workers (int): The number of parallel compute workers.
Reference:
https://github.com/samsammurphy/cloud-masking-sentinel2/blob/master/cloud-masking-sentinel2.ipynb
Returns:
``xarray.DataArray``:
Data range: 0 to 1, where 0=non-shadow; 1=shadow
"""
potential_shadows = []
for cloud_height in cloud_heights:
shadow_vector = xr.ufuncs.tan(solar_zenith.sel(band=1)) * cloud_height
# x and y components of shadow vector length
# TODO: check if correct
y = int(((xr.ufuncs.cos(solar_azimuth.sel(band=1)) * shadow_vector) / data.gw.celly).round().min().data.compute(num_workers=num_workers))
x = -int(((xr.ufuncs.sin(solar_azimuth.sel(band=1)) * shadow_vector) / data.gw.celly).round().min().data.compute(num_workers=num_workers))
# affine translation of clouds
cloud_shift = cloud_mask.shift({'x': x, 'y': y}, fill_value=0)
potential_shadows.append(cloud_shift)
potential_shadows = xr.concat(potential_shadows, dim='band')
potential_shadows = potential_shadows.assign_coords(coords={'band': list(range(1, len(cloud_heights)+1))})
potential_shadows = potential_shadows.max(dim='band')
potential_shadows = potential_shadows.expand_dims(dim='band')
potential_shadows = potential_shadows.assign_coords(coords={'band': [1]})
dark_pixels = norm_diff(data,
'swir2',
'green',
sensor='s2',
nodata=nodata,
scale_factor=scale_factor)
shadows = xr.where((potential_shadows.sel(band=1) >= 1) &
(cloud_mask.sel(band=1) != 1) &
(dark_pixels.sel(band='norm-diff') >= 0.1), 1, 0)
shadows = shadows.expand_dims(dim='band')
shadows = shadows.assign_coords(coords={'band': [1]})
return shadows
class CloudShadowMasker(object):
@staticmethod
def mask_s2(data,
solar_za,
solar_az,
cloud_heights=None,
nodata=None,
scale_factor=1,
num_workers=1,
**kwargs):
"""
Masks Sentinel 2 data
Args:
data (DataArray): The Sentinel 2 data to mask.
solar_za (DataArray): The solar zenith angle.
solar_az (DataArray): The solar azimuth angle.
cloud_heights (Optional[list]): A list of potential cloud heights.
nodata (Optional[int or float]): A 'no data' value to fill NAs with.
scale_factor (Optional[float]): A scale factor to apply to the data.
num_workers (Optional[int]): The number of parallel compute workers.
kwargs (Optional[dict]): Keyword arguments for ``s2cloudless.S2PixelCloudDetector``.
Returns:
``xarray.DataArray``:
Data range: 0 to 4, where 0=clear; 2=shadow; 4=cloud
Example:
>>> import geowombat as gw
>>> from geowombat.radiometry import mask_s2
>>>
>>> with gw.config.update(sensor='s2f', scale_factor=0.0001):
>>>
>>> with gw.open('image.tif') as src, \
>>> gw.open('solar_zenith.tif') as sza, \
>>> gw.open('solar_azimuth.tif') as saa:
>>>
>>> s2_mask = mask_s2(src, sza, saa)
"""
# from ..radiometry.mask import CloudShadowMasker
# mask_s2 = CloudShadowMasker().mask_s2
#
# mask = mask_s2(data,
# sza,
# saa,
# scale_factor=0.0001,
# nodata=0,
# num_workers=num_threads)
#
# fnmask = Path(load_bands_names[0]).name.split('.')[0]
# mask.gw.to_raster(f'/media/jcgr/data/projects/global_fields/data/grids/ms/test/000960/{fnmask}_mask.tif',
# n_workers=1, n_threads=1)
#
# if bands_out:
# data = _assign_attrs(data, attrs, bands_out)
new_attrs = data.attrs.copy()
if not cloud_heights:
cloud_heights = list(range(500, 2000, 500))
if not isinstance(nodata, int) and not isinstance(nodata, float):
nodata = data.gw.nodata
if scale_factor == 1.0:
scale_factor = data.gw.scale_factor
if S2CLOUDLESS_INSTALLED:
if not kwargs:
kwargs = dict(threshold=0.4,
average_over=4,
dilation_size=5,
all_bands=False)
cloud_detector = S2PixelCloudDetector(**kwargs)
# Get the S2Cloudless bands
data_cloudless = data.sel(band=['coastal',
'blue',
'red',
'nir1',
'nir',
'rededge',
'water',
'cirrus',
'swir1',
'swir2'])
# Scale from 0-10000 to 0-1
if isinstance(nodata, int) or isinstance(nodata, float):
data_cloudless = xr.where(data_cloudless != nodata, data_cloudless * scale_factor, nodata).clip(0, 1).astype('float64')
else:
data_cloudless = (data_cloudless * scale_factor).clip(0, 1).astype('float64')
# Reshape for predictions ..
# from [bands x rows x columns]
# to [images x rows x columns x bands]
X = data_cloudless.transpose('y', 'x', 'band').data.compute(num_workers=num_workers)[np.newaxis, :, :, :]
################
# Predict clouds
################
# Convert from NumPy array to DataArray
# clear=0, clouds=1
cloud_mask = ndarray_to_xarray(data, cloud_detector.get_cloud_masks(X), [1])
#################
# Predict shadows
#################
# Scale the angles to degrees
sza = solar_za * 0.01
sza.coords['band'] = [1]
saa = solar_az * 0.01
saa.coords['band'] = [1]
# Convert to radians
rad_sza = xr.ufuncs.deg2rad(sza)
rad_saa = xr.ufuncs.deg2rad(saa)
# non-shadow=0, shadows=1
shadow_mask = estimate_shadows(data,
cloud_mask,
rad_sza,
rad_saa,
cloud_heights,
nodata,
scale_factor,
num_workers)
# Recode for final output
mask = xr.where(cloud_mask.sel(band=1) == 1, 4,
xr.where(shadow_mask.sel(band=1) == 1, 2,
xr.where(data.max(dim='band') == nodata, 255, 0))).expand_dims(dim='band').astype('uint8')
mask = mask.assign_coords(coords={'band': ['mask']})
new_attrs['nodatavals'] = (255)
new_attrs['scales'] = (1.0)
new_attrs['offsets'] = (0.0)
new_attrs['pre-scaling'] = scale_factor
new_attrs['sensor'] = 's2'
new_attrs['clearval'] = (0)
new_attrs['shadowval'] = (2)
new_attrs['cloudval'] = (4)
new_attrs['fillval'] = (255)
mask = mask.assign_attrs(**new_attrs)
else:
logger.warning(' S2Cloudless is not installed.')
mask = None
return mask
```
#### File: geowombat/util/web.py
```python
import os
import shutil
import fnmatch
import tarfile
import subprocess
from pathlib import Path
from datetime import datetime
from collections import namedtuple
import time
import logging
import concurrent.futures
from ..handler import add_handler
from ..radiometry import BRDF, LinearAdjustments, RadTransforms, landsat_pixel_angles, sentinel_pixel_angles, QAMasker, DOS, SixS
from ..radiometry.angles import estimate_cloud_shadows
from ..core.properties import get_sensor_info
from ..core import ndarray_to_xarray
from ..backends.gdal_ import warp
import geowombat as gw
import numpy as np
from osgeo import gdal
import pandas as pd
import geopandas as gpd
import xarray as xr
from shapely.geometry import Polygon
try:
import requests
REQUESTS_INSTALLED = True
except:
REQUESTS_INSTALLED = False
try:
from s2cloudless import S2PixelCloudDetector
S2CLOUDLESS_INSTALLED = True
except:
S2CLOUDLESS_INSTALLED = False
logger = logging.getLogger(__name__)
logger = add_handler(logger)
RESAMPLING_DICT = dict(bilinear=gdal.GRA_Bilinear,
cubic=gdal.GRA_Cubic,
nearest=gdal.GRA_NearestNeighbour)
OrbitDates = namedtuple('OrbitDates', 'start end')
FileInfo = namedtuple('FileInfo', 'name key')
GoogleFileInfo = namedtuple('GoogleFileInfo', 'url url_file meta angles')
def _rmdir(pathdir):
"""
Removes a directory path
"""
if pathdir.is_dir():
for child in pathdir.iterdir():
if child.is_file():
try:
child.unlink()
except:
pass
try:
pathdir.rmdir()
except:
try:
shutil.rmtree(str(pathdir))
except:
pass
def _delayed_read(fn):
attempt = 0
max_attempts = 10
while True:
if Path(fn).is_file():
break
else:
time.sleep(2)
attempt += 1
if attempt >= max_attempts:
break
with open(str(fn), mode='r') as tx:
lines = tx.readlines()
return lines
def _update_status_file(fn, log_name):
attempt = 0
max_attempts = 10
while True:
wait_on_file = False
# Check if the file is open by another process
for proc in psutil.process_iter():
try:
for item in proc.open_files():
if item.path == str(fn):
wait_on_file = True
break
except Exception:
pass
if wait_on_file:
break
if wait_on_file:
time.sleep(2)
else:
break
attempt += 1
if attempt >= max_attempts:
break
with open(str(fn), mode='r') as tx:
lines = tx.readlines()
if lines:
lines = list(set(lines))
if log_name + '\n' not in lines:
lines.append(log_name + '\n')
fn.unlink()
with open(str(fn), mode='w') as tx:
tx.writelines(lines)
def _clean_and_update(outdir_angles,
finfo_dict,
meta_name,
check_angles=True,
check_downloads=True,
load_bands_names=None):
if check_angles:
_rmdir(outdir_angles)
if check_downloads:
for k, v in finfo_dict.items():
if Path(v.name).is_file():
try:
Path(v.name).unlink()
except Warning:
logger.warning(' Could not delete {}.'.format(v.name))
else:
logger.warning(' The {} file does not exist to delete.'.format(v.name))
# if update_status:
# _update_status_file(status, meta_name)
if load_bands_names:
for loaded_band in load_bands_names:
if Path(loaded_band).is_file():
try:
Path(loaded_band).unlink()
except Warning:
logger.warning(' Could not delete {}.'.format(loaded_band))
def _assign_attrs(data, attrs, bands_out):
if bands_out:
data = data.sel(band=bands_out)
data = data.transpose('band', 'y', 'x')
data.attrs = attrs
return data
def _parse_google_filename(filename, landsat_parts, sentinel_parts, public_url):
file_info = GoogleFileInfo(url=None, url_file=None, meta=None, angles=None)
f_base, f_ext = os.path.splitext(filename)
fn_parts = f_base.split('_')
if fn_parts[0].lower() in landsat_parts:
# Collection 1
url_ = '{PUBLIC}-landsat/{SENSOR}/01/{PATH}/{ROW}/{FDIR}'.format(PUBLIC=public_url,
SENSOR=fn_parts[0],
PATH=fn_parts[2][:3],
ROW=fn_parts[2][3:],
FDIR='_'.join(fn_parts[:-1]))
url_filename = '{URL}/{FN}'.format(URL=url_, FN=filename)
url_meta = '{URL}/{FN}_MTL.txt'.format(URL=url_, FN='_'.join(fn_parts[:-1]))
url_angles = '{URL}/{FN}_ANG.txt'.format(URL=url_, FN='_'.join(fn_parts[:-1]))
file_info = GoogleFileInfo(url=url_,
url_file=url_filename,
meta=url_meta,
angles=url_angles)
return file_info
def _download_workers(gcp_str, poutdir, outdir, fname, fn, null_items, verbose):
# Renaming Sentinel data
rename = False
# Full path of GCP local download
down_file = str(poutdir.joinpath(fname))
if down_file.endswith('_ANG.txt'):
fbase = fname.replace('_ANG.txt', '')
key = 'angle'
elif down_file.endswith('_MTL.txt'):
fbase = fname.replace('_MTL.txt', '')
key = 'meta'
elif down_file.endswith('MTD_TL.xml'):
fbase = Path(fn).parent.name
down_file = str(poutdir.joinpath(fbase + '_MTD_TL.xml'))
key = 'meta'
rename = True
elif down_file.endswith('_BQA.TIF'):
fbase = fname.replace('_BQA.TIF', '')
key = 'qa'
else:
if fname.endswith('.jp2'):
fbase = Path(fn).parent.parent.name
key = Path(fn).name.split('.')[0].split('_')[-1]
down_file = str(poutdir.joinpath(fbase + '_' + key + '.jp2'))
rename = True
else:
fsplit = fname.split('_')
fbase = '_'.join(fsplit[:-1])
key = fsplit[-1].split('.')[0]
# TODO: QA60
continue_download = True
if fbase in null_items:
continue_download = False
if continue_download:
###################
# Download the file
###################
if not Path(down_file).is_file():
if fn.lower().startswith('gs://gcp-public-data'):
com = 'gsutil cp -r {} {}'.format(fn, outdir)
else:
com = 'gsutil cp -r {}/{} {}'.format(gcp_str, fn, outdir)
if verbose > 0:
logger.info(' Downloading {} ...'.format(fname))
subprocess.call(com, shell=True)
if rename:
os.rename(str(Path(outdir).joinpath(Path(fn).name)), down_file)
# Store file information
return key, FileInfo(name=down_file, key=key)
else:
return None, None
class DownloadMixin(object):
def download_gcp(self,
sensor,
downloads=None,
outdir='.',
outdir_brdf=None,
search_wildcards=None,
search_dict=None,
n_jobs=1,
verbose=0):
"""
Downloads a file from Google Cloud platform
Args:
sensor (str): The sensor to query. Choices are ['l5', 'l7', 'l8', 's2a', 's2c'].
downloads (Optional[str or list]): The file or list of keys to download. If not given, keys will be taken
from ``search_dict`` or ``self.search_dict``.
outdir (Optional[str | Path]): The output directory.
outdir_brdf (Optional[Path]): The output directory.
search_wildcards (Optional[list]): A list of search wildcards.
search_dict (Optional[dict]): A keyword search dictionary to override ``self.search_dict``.
n_jobs (Optional[int]): The number of files to download in parallel.
verbose (Optional[int]): The verbosity level.
Returns:
``dict`` of ``dicts``
where sub-dictionaries contain a ``namedtuple`` of the downloaded file and tag
"""
if not search_dict:
if not self.search_dict:
logger.exception(' A keyword search dictionary must be provided, either from `self.list_gcp` or the `search_dict` argument.')
else:
search_dict = self.search_dict
poutdir = Path(outdir)
if outdir != '.':
poutdir.mkdir(parents=True, exist_ok=True)
if not downloads:
downloads = list(search_dict.keys())
if not isinstance(downloads, list):
downloads = [downloads]
if sensor in ['s2', 's2a', 's2b', 's2c']:
gcp_str = 'gsutil cp -r gs://gcp-public-data-sentinel-2'
else:
gcp_str = 'gsutil cp -r gs://gcp-public-data-landsat'
downloaded = {}
null_items = []
for search_key in downloads:
download_list = self.search_dict[search_key]
if search_wildcards:
download_list_ = []
for swild in search_wildcards:
download_list_ += fnmatch.filter(download_list, '*{}'.format(swild))
download_list = download_list_
download_list_names = [Path(dfn).name for dfn in download_list]
logger.info(' The download contains {:d} items: {}'.format(len(download_list_names), ','.join(download_list_names)))
# Separate each scene
if sensor.lower() in ['l5', 'l7', 'l8']:
# list of file ids
id_list = ['_'.join(fn.split('_')[:-1]) for fn in download_list_names if fn.endswith('_MTL.txt')]
# list of lists where each sub-list is unique
download_list_unique = [[fn for fn in download_list if sid in Path(fn).name] for sid in id_list]
else:
id_list = list(set(['_'.join(fn.split('_')[:-1]) for fn in download_list_names]))
download_list_unique = [download_list]
for scene_id, sub_download_list in zip(id_list, download_list_unique):
logger.info(' Checking scene {} ...'.format(scene_id))
downloaded_sub = {}
# Check if the file has been downloaded
if sensor.lower() in ['l5', 'l7', 'l8']:
if not scene_id.lower().startswith(self.sensor_collections[sensor.lower()]):
logger.exception(' The scene id {SCENE_ID} does not match the sensor {SENSOR}.'.format(SCENE_ID=scene_id,
SENSOR=sensor))
raise NameError
# Path of BRDF stack
out_brdf = outdir_brdf.joinpath(scene_id + '.tif')
else:
fn = sub_download_list[0]
fname = Path(fn).name
if fname.lower().endswith('.jp2'):
fbase = Path(fn).parent.parent.name
key = Path(fn).name.split('.')[0].split('_')[-1]
down_file = str(poutdir.joinpath(fbase + '_' + key + '.jp2'))
brdfp = '_'.join(Path(down_file).name.split('_')[:-1])
out_brdf = outdir_brdf.joinpath(brdfp + '_MTD.tif')
else:
out_brdf = None
if out_brdf:
if out_brdf.is_file() or \
Path(str(out_brdf).replace('.tif', '.nc')).is_file() or \
Path(str(out_brdf).replace('.tif', '.nodata')).is_file():
logger.warning(f' The output BRDF file, {str(out_brdf)}, already exists.')
_clean_and_update(None, None, None, check_angles=False, check_downloads=False)
continue
else:
logger.warning(f' Continuing with the download for {str(out_brdf)}.')
# Move the metadata file to the front of the
# list to avoid unnecessary downloads.
if sensor.lower() in ['l5', 'l7', 'l8']:
meta_index = [i for i in range(0, len(sub_download_list)) if sub_download_list[i].endswith('_MTL.txt')][0]
sub_download_list.insert(0, sub_download_list.pop(meta_index))
else:
# The Sentinel 2 metadata files come in their own list
pass
download_list_names = [Path(dfn).name for dfn in sub_download_list]
results = []
with concurrent.futures.ThreadPoolExecutor(max_workers=n_jobs) as executor:
futures = [executor.submit(_download_workers,
gcp_str,
poutdir,
outdir,
fname,
fn,
null_items,
verbose) for fname, fn in zip(download_list_names, sub_download_list)]
for f in concurrent.futures.as_completed(futures):
results.append(f.result())
for key, finfo_ in results:
if finfo_:
downloaded_sub[key] = finfo_
if downloaded_sub:
if len(downloaded_sub) < len(sub_download_list):
downloaded_names = [Path(v.name).name for v in list(downloaded_sub.values())]
missing_items = ','.join(list(set(download_list_names).difference(downloaded_names)))
logger.warning(' Only {:d} files out of {:d} were downloaded.'.format(len(downloaded_sub), len(sub_download_list)))
logger.warning(' {} are missing.'.format(missing_items))
downloaded[search_key] = downloaded_sub
return downloaded
def download_aws(self,
landsat_id,
band_list,
outdir='.'):
"""
Downloads Landsat 8 data from Amazon AWS
Args:
landsat_id (str): The Landsat id to download.
band_list (list): The Landsat bands to download.
outdir (Optional[str]): The output directory.
Examples:
>>> from geowombat.util import GeoDownloads
>>>
>>> dl = GeoDownloads()
>>> dl.download_aws('LC08_L1TP_224077_20200518_20200518_01_RT', ['b2', 'b3', 'b4'])
"""
if not REQUESTS_INSTALLED:
logger.exception('Requests must be installed.')
if not isinstance(outdir, Path):
outdir = Path(outdir)
parts = landsat_id.split('_')
path_row = parts[2]
path = int(path_row[:3])
row = int(path_row[3:])
def _download_file(in_file, out_file):
response = requests.get(in_file)
with open(out_file, 'wb') as f:
f.write(response.content)
mtl_id = '{landsat_id}_MTL.txt'.format(landsat_id=landsat_id)
url = '{aws_l8_public}/{path:03d}/{row:03d}/{landsat_id}/{mtl_id}'.format(aws_l8_public=self.aws_l8_public,
path=path,
row=row,
landsat_id=landsat_id,
mtl_id=mtl_id)
mtl_out = outdir / mtl_id
_download_file(url, str(mtl_out))
angle_id = '{landsat_id}_ANG.txt'.format(landsat_id=landsat_id)
url = '{aws_l8_public}/{path:03d}/{row:03d}/{landsat_id}/{angle_id}'.format(aws_l8_public=self.aws_l8_public,
path=path,
row=row,
landsat_id=landsat_id,
angle_id=angle_id)
angle_out = outdir / angle_id
_download_file(url, str(angle_out))
for band in band_list:
band_id = '{landsat_id}_{band}.TIF'.format(landsat_id=landsat_id,
band=band.upper())
url = '{aws_l8_public}/{path:03d}/{row:03d}/{landsat_id}/{band_id}'.format(aws_l8_public=self.aws_l8_public,
path=path,
row=row,
landsat_id=landsat_id,
band_id=band_id)
band_out = outdir / band_id
_download_file(url, str(band_out))
# def download_landsat_range(self, sensors, bands, path_range, row_range, date_range, **kwargs):
#
# """
# Downloads Landsat data from iterables
#
# Args:
# sensors (str): A list of sensors to download.
# bands (str): A list of bands to download.
# path_range (iterable): A list of paths.
# row_range (iterable): A list of rows.
# date_range (iterable): A list of ``datetime`` objects or a list of strings as yyyymmdd.
# kwargs (Optional[dict]): Keyword arguments to pass to ``download``.
#
# Examples:
# >>> from geowombat.util import GeoDownloads
# >>>
# >>> dl = GeoDownloads()
# >>> dl.download_landsat_range(['lc08'], ['b4'], [42], [34], ['20170616', '20170620'])
# """
#
# if (len(date_range) == 2) and not isinstance(date_range[0], datetime):
# start_date = date_range[0]
# end_date = date_range[1]
#
# sdt = datetime.strptime(start_date, '%Y%m%d')
# edt = datetime.strptime(end_date, '%Y%m%d')
#
# date_range = pd.date_range(start=sdt, end=edt).to_pydatetime().tolist()
#
# for sensor in sensors:
# for band in bands:
# for path in path_range:
# for row in row_range:
# for dt in date_range:
# str_date = '{:d}{:02d}{:02d}'.format(dt.year, dt.month, dt.day)
#
# # TODO: check if L1TP is used for all sensors
# # TODO: fixed DATE2
# filename = '{SENSOR}_L1TP_{PATH:03d}{ROW:03d}_{DATE}_{DATE2}_01_T1_{BAND}.TIF'.format(
# SENSOR=sensor.upper(),
# PATH=path,
# ROW=row,
# DATE=str_date,
# DATE2=None,
# BAND=band)
#
# self.download(filename, **kwargs)
class CloudPathMixin(object):
@staticmethod
def get_landsat_urls(scene_id, bands=None, cloud='gcp'):
"""
Gets Google Cloud Platform COG urls for Landsat
Args:
scene_id (str): The Landsat scene id.
bands (Optional[list]): The list of band names.
cloud (Optional[str]): The cloud strorage to get the URL from. For now, only 'gcp' is supported.
Returns:
``tuple`` of band URLs and metadata URL as strings
Example:
>>> import os
>>> import geowombat as gw
>>> from geowombat.util import GeoDownloads
>>>
>>> os.environ['CURL_CA_BUNDLE'] = '/etc/ssl/certs/ca-certificates.crt'
>>>
>>> gdl = GeoDownloads()
>>>
>>> scene_urls, meta_url = gdl.get_landsat_urls('LC08_L1TP_042034_20171225_20180103_01_T1',
>>> bands=['blue', 'green', 'red'])
>>>
>>> with gw.open(urls) as src:
>>> print(src)
"""
gcp_base = 'https://storage.googleapis.com/gcp-public-data-landsat'
sensor_collection, level, path_row, date_acquire, date_other, collection, tier = scene_id.split('_')
path = path_row[:3]
row = path_row[3:]
if bands:
sensor = f'{sensor_collection[0].lower()}{sensor_collection[3]}'
# Landsat 7 has the thermal band
sensor = 'l7th' if sensor == 'l7' else sensor
wavelengths = get_sensor_info(key='wavelength', sensor=sensor)
band_pos = [getattr(wavelengths, b) for b in bands]
else:
band_pos = [1]
lid = f'{sensor_collection}/01/{path}/{row}/{scene_id}'
scene_urls = [f'{gcp_base}/{lid}/{scene_id}_B{band_pos}.TIF' for band_pos in band_pos]
meta_url = f'{gcp_base}/{lid}/{scene_id}_MTL.txt'
return scene_urls, meta_url
@staticmethod
def get_sentinel2_urls(safe_id, bands=None, cloud='gcp'):
"""
Gets Google Cloud Platform COG urls for Sentinel 2
Args:
safe_id (str): The Sentinel 2 SAFE id.
bands (Optional[list]): The list of band names.
cloud (Optional[str]): The cloud strorage to get the URL from. For now, only 'gcp' is supported.
Returns:
``tuple`` of band URLs and metadata URL as strings
Example:
>>> import os
>>> import geowombat as gw
>>> from geowombat.util import GeoDownloads
>>>
>>> os.environ['CURL_CA_BUNDLE'] = '/etc/ssl/certs/ca-certificates.crt'
>>>
>>> gdl = GeoDownloads()
>>>
>>> safe_id = 'S2A_MSIL1C_20180109T135101_N0206_R024_T21HUD_20180109T171608.SAFE/GRANULE/L1C_T21HUD_A013320_20180109T135310'
>>>
>>> scene_urls, meta_url = gdl.get_sentinel2_urls(safe_id,
>>> bands=['blue', 'green', 'red', 'nir'])
>>>
>>> with gw.open(urls) as src:
>>> print(src)
"""
gcp_base = 'https://storage.googleapis.com/gcp-public-data-sentinel-2'
sensor, level, date, __, __, mgrs, __ = safe_id.split('/')[0].split('_')
utm = mgrs[1:3]
zone = mgrs[3]
id_ = mgrs[4:]
if bands:
sensor = sensor.lower()
wavelengths = get_sensor_info(key='wavelength', sensor=sensor)
band_pos = [getattr(wavelengths, b) for b in bands]
else:
band_pos = [1]
lid = f'{utm}/{zone}/{id_}/{safe_id}/IMG_DATA/{mgrs}_{date}'
scene_urls = [f'{gcp_base}/tiles/{lid}_B{band_pos:02d}.jp2' for band_pos in band_pos]
meta_url = f'{utm}/{zone}/{id_}/{safe_id}/MTD_TL.xml'
return scene_urls, meta_url
class GeoDownloads(CloudPathMixin, DownloadMixin):
def __init__(self):
self._gcp_search_dict = None
self.search_dict = None
self.gcp_public = 'https://storage.googleapis.com/gcp-public-data'
self.aws_l8_public = 'https://landsat-pds.s3.amazonaws.com/c1/L8'
self.landsat_parts = ['lt05', 'le07', 'lc08']
self.sentinel_parts = ['s2a', 's2b']
s2_dict = dict(coastal=1,
blue=2,
green=3,
red=4,
nir1=5,
nir2=6,
nir3=7,
nir=8,
rededge=8,
water=9,
cirrus=10,
swir1=11,
swir2=12)
self.gcp_dict = dict(l5='LT05/01',
l7='LE07/01',
l8='LC08/01',
s2='tiles',
s2a='tiles',
s2b='tiles',
s2c='tiles')
self.sensor_collections = dict(l5='lt05',
l7='le07',
l8='lc08')
self.orbit_dates = dict(l5=OrbitDates(start=datetime.strptime('1984-3-1', '%Y-%m-%d'),
end=datetime.strptime('2013-6-5', '%Y-%m-%d')),
l7=OrbitDates(start=datetime.strptime('1999-4-15', '%Y-%m-%d'),
end=datetime.strptime('2100-1-1', '%Y-%m-%d')),
l8=OrbitDates(start=datetime.strptime('2013-2-11', '%Y-%m-%d'),
end=datetime.strptime('2100-1-1', '%Y-%m-%d')),
s2a=OrbitDates(start=datetime.strptime('2015-6-23', '%Y-%m-%d'),
end=datetime.strptime('2100-1-1', '%Y-%m-%d')),
s2b=OrbitDates(start=datetime.strptime('2017-3-7', '%Y-%m-%d'),
end=datetime.strptime('2100-1-1', '%Y-%m-%d')))
self.associations = dict(l5=dict(blue=1,
green=2,
red=3,
nir=4,
swir1=5,
thermal=6,
swir2=7),
l7=dict(blue=1,
green=2,
red=3,
nir=4,
swir1=5,
thermal=6,
swir2=7,
pan=8),
l8=dict(coastal=1,
blue=2,
green=3,
red=4,
nir=5,
swir1=6,
swir2=7,
pan=8,
cirrus=9,
tirs1=10,
tirs2=11),
s2=s2_dict,
s2a=s2_dict,
s2b=s2_dict,
s2c=s2_dict)
def download_cube(self,
sensors,
date_range,
bounds,
bands,
bands_out=None,
crs=None,
out_bounds=None,
outdir='.',
resampling='bilinear',
ref_res=None,
l57_angles_path=None,
l8_angles_path=None,
subsample=1,
write_format='gtiff',
write_angle_files=False,
mask_qa=False,
lqa_mask_items=None,
chunks=512,
cloud_heights=None,
sr_method='srem',
earthdata_username=None,
earthdata_key_file=None,
earthdata_code_file=None,
srtm_outdir=None,
n_jobs=1,
num_workers=1,
num_threads=1,
**kwargs):
"""
Downloads a cube of Landsat and/or Sentinel 2 imagery
Args:
sensors (str or list): The sensors, or sensor, to download.
date_range (list): The date range, given as [date1, date2], where the date format is yyyy-mm.
bounds (GeoDataFrame, list, or tuple): The geometry bounds (in WGS84 lat/lon) that define the cube extent
to download. If given as a ``GeoDataFrame``, only the first ``DataFrame`` record will be used.
If given as a ``tuple`` or a ``list``, the order should be (left, bottom, right, top).
bands (str or list): The bands to download.
E.g.:
Sentinel s2cloudless bands:
bands = ['coastal', 'blue', 'red', 'nir1', 'nir', 'rededge', 'water', 'cirrus', 'swir1', 'swir2']
bands_out (Optional[list]): The bands to write to file. This might be useful after downloading all bands to
mask clouds, but are only interested in subset of those bands.
crs (Optional[str or object]): The output CRS. If ``bounds`` is a ``GeoDataFrame``, the CRS is taken
from the object.
out_bounds (Optional[list or tuple]): The output bounds in ``crs``. If not given, the bounds are
taken from ``bounds``.
outdir (Optional[str]): The output directory.
ref_res (Optional[tuple]): A reference cell resolution.
resampling (Optional[str]): The resampling method.
l57_angles_path (str): The path to the Landsat 5 and 7 angles bin.
l8_angles_path (str): The path to the Landsat 8 angles bin.
subsample (Optional[int]): The sub-sample factor when calculating the angles.
write_format (Optional[bool]): The data format to write. Choices are ['gtiff', 'netcdf'].
write_angle_files (Optional[bool]): Whether to write the angles to file.
mask_qa (Optional[bool]): Whether to mask data with the QA file.
lqa_mask_items (Optional[list]): A list of QA mask items for Landsat.
chunks (Optional[int]): The chunk size to read at.
cloud_heights (Optional[list]): The cloud heights, in kilometers.
sr_method (Optional[str]): The surface reflectance correction method. Choices are ['srem', '6s'].
earthdata_username (Optional[str]): The EarthData username.
earthdata_key_file (Optional[str]): The EarthData secret key file.
earthdata_code_file (Optional[str]): The EarthData secret passcode file.
srtm_outdir (Optional[str]): The output SRTM directory.
n_jobs (Optional[int]): The number of parallel download workers for ``joblib``.
num_workers (Optional[int]): The number of parallel workers for ``dask.compute``.
num_threads (Optional[int]): The number of GDAL warp threads.
kwargs (Optional[dict]): Keyword arguments passed to ``to_raster``.
Examples:
>>> from geowombat.util import GeoDownloads
>>> gdl = GeoDownloads()
>>>
>>> # Download a Landsat 7 panchromatic cube
>>> gdl.download_cube(['l7'],
>>> ['2010-01-01', '2010-02-01'],
>>> (-91.57, 40.37, -91.46, 40.42),
>>> ['pan'],
>>> crs="+proj=aea +lat_1=-5 +lat_2=-42 +lat_0=-32 +lon_0=-60 +x_0=0 +y_0=0 +ellps=aust_SA +units=m +no_defs")
>>>
>>> # Download a Landsat 7, 8 and Sentinel 2 cube of the visible spectrum
>>> gdl.download_cube(['l7', 'l8', 's2a'],
>>> ['2017-01-01', '2018-01-01'],
>>> (-91.57, 40.37, -91.46, 40.42),
>>> ['blue', 'green', 'red'],
>>> crs={'init': 'epsg:102033'},
>>> readxsize=1024,
>>> readysize=1024,
>>> n_workers=1,
>>> n_threads=8)
"""
if write_format not in ['gtiff', 'netcdf']:
logger.warning(f' Did not recognize {write_format}. Setting the output data format as gtiff.')
write_format = 'gtiff'
if not lqa_mask_items:
lqa_mask_items = ['fill',
'saturated',
'cloudconf',
'shadowconf',
'cirrusconf']
if isinstance(sensors, str):
sensors = [sensors]
angle_kwargs = kwargs.copy()
angle_kwargs['nodata'] = -32768
nodataval = kwargs['nodata'] if 'nodata' in kwargs else 65535
angle_infos = {}
rt = RadTransforms()
br = BRDF()
la = LinearAdjustments()
dos = DOS()
sixs = SixS()
main_path = Path(outdir)
outdir_tmp = main_path.joinpath('tmp')
outdir_brdf = main_path.joinpath('brdf')
main_path.mkdir(parents=True, exist_ok=True)
outdir_tmp.mkdir(parents=True, exist_ok=True)
outdir_brdf.mkdir(parents=True, exist_ok=True)
# Logging file
# status = Path(outdir).joinpath('status.txt')
#
# if not status.is_file():
#
# with open(str(status), mode='w') as tx:
# pass
# Get bounds from geometry
if isinstance(bounds, tuple) or isinstance(bounds, list):
bounds = Polygon([(bounds[0], bounds[3]), # upper left
(bounds[2], bounds[3]), # upper right
(bounds[2], bounds[1]), # lower right
(bounds[0], bounds[1]), # lower left
(bounds[0], bounds[3])]) # upper left
bounds = gpd.GeoDataFrame([0],
geometry=[bounds],
crs={'init': 'epsg:4326'})
bounds_object = bounds.geometry.values[0]
if not out_bounds:
# Project the bounds
out_bounds = bounds.to_crs(crs).bounds.values[0].tolist()
# Get WRS file
data_bin = os.path.realpath(os.path.dirname(__file__))
data_dir = Path(data_bin).joinpath('../data')
shp_dict = {}
if ('l5' in sensors) or ('l7' in sensors) or ('l8' in sensors):
path_tar = Path(data_dir).joinpath('wrs2.tar.gz')
path_shp = Path(data_dir).joinpath('wrs2_descending.shp')
wrs = os.path.realpath(path_shp.as_posix())
if not path_shp.is_file():
with tarfile.open(os.path.realpath(path_tar.as_posix()), mode='r:gz') as tf:
tf.extractall(data_dir.as_posix())
df_wrs = gpd.read_file(wrs)
df_wrs = df_wrs[df_wrs.geometry.intersects(bounds_object)]
if df_wrs.empty:
logger.warning(' The geometry bounds is empty.')
return
shp_dict['wrs'] = df_wrs
if ('s2a' in sensors) or ('s2b' in sensors) or ('s2c' in sensors):
path_tar = Path(data_dir).joinpath('mgrs.tar.gz')
path_shp = Path(data_dir).joinpath('sentinel2_grid.shp')
mgrs = os.path.realpath(path_shp.as_posix())
if not path_shp.is_file():
with tarfile.open(os.path.realpath(path_tar.as_posix()), mode='r:gz') as tf:
tf.extractall(data_dir.as_posix())
df_mgrs = gpd.read_file(mgrs)
df_mgrs = df_mgrs[df_mgrs.geometry.intersects(bounds_object)]
if df_mgrs.empty:
logger.warning(' The geometry bounds is empty.')
return
shp_dict['mgrs'] = df_mgrs
dt1 = datetime.strptime(date_range[0], '%Y-%m')
dt2 = datetime.strptime(date_range[1], '%Y-%m')
months = list(range(1, 13))
year_months = {}
if dt1.month <= dt2.month:
month_range = months[months.index(dt1.month):months.index(dt2.month) + 1]
else:
month_range = months[months.index(dt1.month):] + months[:months.index(dt2.month) + 1]
if dt1.year == dt2.year:
year_months[dt1.year] = month_range
else:
for y in range(dt1.year, dt2.year + 1):
if y == dt1.year:
year_months[y] = list(range(dt1.month, 13))
elif y == dt2.year:
year_months[y] = list(range(1, dt2.month + 1))
else:
year_months[y] = months
year = dt1.year
while True:
if year > dt2.year:
break
for m in year_months[year]:
yearmonth_query = '{:d}{:02d}'.format(year, m)
target_date = datetime.strptime(yearmonth_query, '%Y%m')
for sensor in sensors:
# Avoid unnecessary GCP queries
if (target_date < self.orbit_dates[sensor.lower()].start) or \
(target_date > self.orbit_dates[sensor.lower()].end):
continue
band_associations = self.associations[sensor]
if sensor.lower() in ['s2', 's2a', 's2b', 's2c']:
locations = ['{}/{}/{}'.format(dfrow.Name[:2], dfrow.Name[2], dfrow.Name[3:])
for dfi, dfrow in shp_dict['mgrs'].iterrows()]
else:
locations = ['{:03d}/{:03d}'.format(int(dfrow.PATH), int(dfrow.ROW))
for dfi, dfrow in shp_dict['wrs'].iterrows()]
for location in locations:
if sensor.lower() in ['s2', 's2a', 's2b', 's2c']:
query = '{LOCATION}/{LEVEL}*{YM}*.SAFE/GRANULE/*'.format(LOCATION=location,
LEVEL=sensor.upper(),
YM=yearmonth_query)
else:
query = '{LOCATION}/*{PATHROW}_{YM}*_T1'.format(LOCATION=location,
PATHROW=location.replace('/', ''),
YM=yearmonth_query)
# Query and list available files on the GCP
self.list_gcp(sensor, query)
self.search_dict = self.get_gcp_results
if not self.search_dict:
logger.warning(
' No results found for {SENSOR} at location {LOC}, year {YEAR:d}, month {MONTH:d}.'.format(
SENSOR=sensor,
LOC=location,
YEAR=year,
MONTH=m))
continue
# Download data
if sensor.lower() in ['s2', 's2a', 's2b', 's2c']:
load_bands = [f'B{band_associations[bd]:02d}' if bd != 'rededge' else
f'B{band_associations[bd]:01d}A' for bd in bands]
search_wildcards = ['MTD_TL.xml'] + [bd + '.jp2' for bd in load_bands]
file_info = self.download_gcp(sensor,
outdir=outdir_tmp,
outdir_brdf=outdir_brdf,
search_wildcards=search_wildcards,
n_jobs=n_jobs,
verbose=1)
# Reorganize the dictionary to combine bands and metadata
new_dict_ = {}
for finfo_key, finfo_dict in file_info.items():
sub_dict_ = {}
if 'meta' in finfo_dict:
key = finfo_dict['meta'].name
sub_dict_['meta'] = finfo_dict['meta']
for finfo_key_, finfo_dict_ in file_info.items():
if 'meta' not in finfo_dict_:
for bdkey_, bdinfo_ in finfo_dict_.items():
if '_'.join(bdinfo_.name.split('_')[:-1]) in key:
sub_dict_[bdkey_] = bdinfo_
new_dict_[finfo_key] = sub_dict_
file_info = new_dict_
else:
del_keys = [k for k, v in self.search_dict.items() if 'gap_mask' in k]
for dk in del_keys:
del self.search_dict[dk]
load_bands = sorted(['B{:d}'.format(band_associations[bd]) for bd in bands])
search_wildcards = ['ANG.txt', 'MTL.txt', 'BQA.TIF'] + [bd + '.TIF' for bd in load_bands]
file_info = self.download_gcp(sensor,
outdir=outdir_tmp,
outdir_brdf=outdir_brdf,
search_wildcards=search_wildcards,
n_jobs=n_jobs,
verbose=1)
logger.info(' Finished downloading files for yyyymm query, {}.'.format(yearmonth_query))
# Create pixel angle files
# TODO: this can be run in parallel
for finfo_key, finfo_dict in file_info.items():
# Incomplete dictionary because file was checked, existed, and cleaned
if 'meta' not in finfo_dict:
logger.warning(' The metadata does not exist.')
_clean_and_update(None, finfo_dict, None, check_angles=False)
continue
brdfp = '_'.join(Path(finfo_dict['meta'].name).name.split('_')[:-1])
out_brdf = outdir_brdf.joinpath(brdfp + '.tif')
out_angles = outdir_brdf.joinpath(brdfp + '_angles.tif')
if sensor in ['s2', 's2a', 's2b', 's2c']:
outdir_angles = outdir_tmp.joinpath('angles_{}'.format(Path(finfo_dict['meta'].name).name.replace('_MTD_TL.xml', '')))
else:
outdir_angles = outdir_tmp.joinpath('angles_{}'.format(Path(finfo_dict['meta'].name).name.replace('_MTL.txt', '')))
if not Path(finfo_dict['meta'].name).is_file():
logger.warning(' The metadata does not exist.')
_clean_and_update(outdir_angles, finfo_dict, finfo_dict['meta'].name, check_angles=False)
continue
if out_brdf.is_file():
logger.warning(' The output BRDF file, {}, already exists.'.format(brdfp))
_clean_and_update(outdir_angles, finfo_dict, finfo_dict['meta'].name, check_angles=False)
continue
if load_bands[0] not in finfo_dict:
logger.warning(' The download for {} was incomplete.'.format(brdfp))
_clean_and_update(outdir_angles, finfo_dict, finfo_dict['meta'].name, check_angles=False)
continue
outdir_angles.mkdir(parents=True, exist_ok=True)
ref_file = finfo_dict[load_bands[0]].name
logger.info(' Processing angles for {} ...'.format(brdfp))
if sensor.lower() in ['s2', 's2a', 's2b', 's2c']:
meta = rt.get_sentinel_coefficients(finfo_dict['meta'].name)
angle_info = sentinel_pixel_angles(finfo_dict['meta'].name,
ref_file,
str(outdir_angles),
nodata=-32768,
overwrite=False,
verbose=1)
if ' '.join(bands) == 'coastal blue green red nir1 nir2 nir3 nir rededge water cirrus swir1 swir2':
rad_sensor = 's2af' if angle_info.sensor == 's2a' else 's2bf'
elif ' '.join(bands) == 'coastal blue red nir1 nir rededge water cirrus swir1 swir2':
rad_sensor = 's2acloudless' if angle_info.sensor == 's2a' else 's2bcloudless'
elif ' '.join(bands) == 'blue green red nir1 nir2 nir3 nir rededge swir1 swir2':
rad_sensor = angle_info.sensor
elif ' '.join(bands) == 'blue green red nir swir1 swir2':
rad_sensor = 's2al7' if angle_info.sensor == 's2a' else 's2bl7'
elif ' '.join(bands) == 'nir1 nir2 nir3 rededge swir1 swir2':
rad_sensor = 's2a20' if angle_info.sensor == 's2a' else 's2b20'
elif ' '.join(bands) == 'blue green red nir':
rad_sensor = 's2a10' if angle_info.sensor == 's2a' else 's2b10'
else:
rad_sensor = angle_info.sensor
bandpass_sensor = angle_info.sensor
else:
meta = rt.get_landsat_coefficients(finfo_dict['meta'].name)
angle_info = landsat_pixel_angles(finfo_dict['angle'].name,
ref_file,
str(outdir_angles),
meta.sensor,
l57_angles_path=l57_angles_path,
l8_angles_path=l8_angles_path,
subsample=subsample,
resampling='bilinear',
num_threads=num_workers,
verbose=1)
if (len(bands) == 1) and (bands[0] == 'pan'):
rad_sensor = sensor + bands[0]
else:
if (len(bands) == 6) and (meta.sensor == 'l8'):
rad_sensor = 'l8l7'
elif (len(bands) == 7) and (meta.sensor == 'l8') and ('pan' in bands):
rad_sensor = 'l8l7mspan'
elif (len(bands) == 7) and (meta.sensor == 'l7') and ('pan' in bands):
rad_sensor = 'l7mspan'
else:
rad_sensor = meta.sensor
bandpass_sensor = sensor
if sensor in ['s2', 's2a', 's2b', 's2c']:
logger.info(f' Translating jp2 files to gtiff for {brdfp} ...')
load_bands_names = []
# Convert to GeoTiffs to avoid CRS issue with jp2 format
for bd in load_bands:
# Check if the file exists to avoid duplicate GCP filenames`
if Path(finfo_dict[bd].name).is_file():
warp(finfo_dict[bd].name,
finfo_dict[bd].name.replace('.jp2', '.tif'),
overwrite=True,
delete_input=True,
multithread=True,
warpMemoryLimit=256,
outputBounds=out_bounds,
xRes=ref_res[0],
yRes=ref_res[1],
resampleAlg=RESAMPLING_DICT[resampling],
creationOptions=['TILED=YES',
'COMPRESS=LZW',
'BLOCKXSIZE={CHUNKS:d}'.format(CHUNKS=chunks),
'BLOCKYSIZE={CHUNKS:d}'.format(CHUNKS=chunks)])
load_bands_names.append(finfo_dict[bd].name.replace('.jp2', '.tif'))
else:
# Get band names from user
try:
load_bands_names = [finfo_dict[bd].name for bd in load_bands]
except:
logger.exception(' Could not get all band name associations.')
raise NameError
logger.info(f' Applying BRDF and SR correction for {brdfp} ...')
with gw.config.update(sensor=rad_sensor,
ref_bounds=out_bounds,
ref_crs=crs,
ref_res=ref_res if ref_res else load_bands_names[-1],
ignore_warnings=True,
nasa_earthdata_user=earthdata_username,
nasa_earthdata_key=earthdata_key_file,
nasa_earthdata_code=earthdata_code_file):
valid_data = True
# Ensure there is data
with gw.open(load_bands_names[0],
band_names=[1],
chunks=chunks,
num_threads=num_threads) as data:
if data.sel(band=1).min().data.compute(num_workers=num_workers) > 10000:
valid_data = False
if valid_data:
if data.sel(band=1).max().data.compute(num_workers=num_workers) == 0:
valid_data = False
if valid_data:
with gw.open(angle_info.sza,
chunks=chunks,
resampling='bilinear') as sza, \
gw.open(angle_info.vza,
chunks=chunks,
resampling='bilinear') as vza, \
gw.open(angle_info.saa,
chunks=chunks,
resampling='bilinear') as saa, \
gw.open(angle_info.vaa,
chunks=chunks,
resampling='bilinear') as vaa, \
gw.open(load_bands_names,
band_names=bands,
stack_dim='band',
chunks=chunks,
resampling=resampling,
num_threads=num_threads) as data:
attrs = data.attrs.copy()
if mask_qa:
if sensor.lower() in ['s2', 's2a', 's2b', 's2c']:
if S2CLOUDLESS_INSTALLED:
cloud_detector = S2PixelCloudDetector(threshold=0.4,
average_over=1,
dilation_size=5,
all_bands=False)
# Get the S2Cloudless bands
data_cloudless = data.sel(band=['coastal', 'blue', 'red', 'nir1', 'nir', 'rededge', 'water', 'cirrus', 'swir1', 'swir2'])
# Scale from 0-10000 to 0-1 and reshape
X = (data_cloudless * 0.0001).clip(0, 1).data\
.compute(num_workers=num_workers)\
.transpose(1, 2, 0)[np.newaxis, :, :, :]
# Predict clouds
# Potential classes? Currently, only clear and clouds are returned.
# clear=0, clouds=1, shadow=2, snow=3, cirrus=4, water=5
mask = ndarray_to_xarray(data,
cloud_detector.get_cloud_masks(X),
['mask'])
else:
if bands_out:
# If there are extra bands, remove them because they
# are not supported in the BRDF kernels.
data = _assign_attrs(data, attrs, bands_out)
logger.warning(' S2Cloudless is not installed, so skipping Sentinel cloud masking.')
if sr_method == 'srem':
if sensor.lower() in ['s2', 's2a', 's2b', 's2c']:
# The S-2 data are in TOAR (0-10000)
toar_scaled = (data * 0.0001)\
.astype('float64')\
.clip(0, 1)\
.assign_attrs(**attrs)
# Convert TOAR to surface reflectance
sr = rt.toar_to_sr(toar_scaled,
sza, saa, vza, vaa,
rad_sensor,
method='srem',
dst_nodata=nodataval)
else:
# Convert DN to surface reflectance
sr = rt.dn_to_sr(data,
sza, saa, vza, vaa,
method='srem',
sensor=rad_sensor,
meta=meta,
src_nodata=nodataval,
dst_nodata=nodataval)
else:
if sensor.lower() in ['s2', 's2a', 's2b', 's2c']:
# The S-2 data are in TOAR (0-10000)
data = (data * 0.0001)\
.astype('float64')\
.assign_attrs(**attrs)
data_values = 'toar'
else:
data_values = 'dn'
if isinstance(earthdata_username, str) and \
isinstance(earthdata_key_file, str) and \
isinstance(earthdata_code_file, str):
altitude = sixs.get_mean_altitude(data,
srtm_outdir,
n_jobs=n_jobs)
altitude *= 0.0001
else:
altitude = 0.0
# Resample to 100m x 100m
data_coarse = data.sel(band=['blue', 'swir2']).gw\
.transform_crs(dst_res=500.0,
resampling='med')
aot = dos.get_aot(data_coarse,
meta.sza,
meta,
data_values=data_values,
dn_interp=data,
angle_factor=1.0,
interp_method='fast',
aot_fallback=0.3,
h2o=2.0,
o3=0.3, # global average of total ozone in a vertical column (3 cm)
altitude=altitude,
w=151,
n_jobs=n_jobs)
if sensor.lower() in ['s2', 's2a', 's2b', 's2c']:
sr = rt.toar_to_sr(data,
meta.sza,
None,
None,
None,
meta=meta,
src_nodata=nodataval,
dst_nodata=nodataval,
angle_factor=1.0,
method='6s',
interp_method='fast',
h2o=2.0,
o3=0.3,
aot=aot,
altitude=altitude,
n_jobs=n_jobs)
else:
sr = rt.dn_to_sr(data,
meta.sza,
None,
None,
None,
meta=meta,
src_nodata=nodataval,
dst_nodata=nodataval,
angle_factor=1.0,
method='6s',
interp_method='fast',
h2o=2.0,
o3=0.3,
aot=aot,
altitude=altitude,
n_jobs=n_jobs)
# BRDF normalization
sr_brdf = br.norm_brdf(sr,
sza, saa, vza, vaa,
sensor=rad_sensor,
wavelengths=data.band.values.tolist(),
out_range=10000.0,
src_nodata=nodataval,
dst_nodata=nodataval)
if bandpass_sensor.lower() in ['l5', 'l7', 's2', 's2a', 's2b', 's2c']:
# Linearly adjust to Landsat 8
sr_brdf = la.bandpass(sr_brdf,
bandpass_sensor.lower(),
to='l8',
scale_factor=0.0001,
src_nodata=nodataval,
dst_nodata=nodataval)
if mask_qa:
if sensor.lower() in ['s2', 's2a', 's2b', 's2c']:
if S2CLOUDLESS_INSTALLED:
wavel_sub = sr_brdf.gw.set_nodata(nodataval,
nodataval,
out_range=(0, 1),
dtype='float64')
# Estimate the cloud shadows
mask = estimate_cloud_shadows(wavel_sub,
mask,
sza,
saa,
vza,
vaa,
heights=cloud_heights,
num_workers=num_workers)
# Update the bands with the mask
sr_brdf = xr.where((mask.sel(band='mask') == 0) &
(sr_brdf != nodataval),
sr_brdf.clip(0, 10000),
nodataval).astype('uint16')
sr_brdf = _assign_attrs(sr_brdf, attrs, bands_out)
if write_format == 'gtiff':
sr_brdf.gw.to_raster(str(out_brdf), **kwargs)
else:
sr_brdf.gw.to_netcdf(str(out_brdf), zlib=True, complevel=5)
else:
with gw.open(finfo_dict['qa'].name,
band_names=['qa']) as qa:
if sensor.lower() == 'l8':
qa_sensor = 'l8-c1'
else:
qa_sensor = 'l-c1'
mask = QAMasker(qa,
qa_sensor,
mask_items=lqa_mask_items,
confidence_level='maybe').to_mask()
# Mask non-clear pixels
sr_brdf = xr.where(mask.sel(band='mask') < 2,
sr_brdf.clip(0, 10000),
nodataval).astype('uint16')
sr_brdf = _assign_attrs(sr_brdf, attrs, bands_out)
if write_format == 'gtiff':
sr_brdf.gw.to_raster(str(out_brdf), **kwargs)
else:
sr_brdf.gw.to_netcdf(str(out_brdf), zlib=True, complevel=5)
else:
# Set 'no data' values
sr_brdf = sr_brdf.gw.set_nodata(nodataval,
nodataval,
out_range=(0, 10000),
dtype='uint16')
sr_brdf = _assign_attrs(sr_brdf, attrs, bands_out)
if write_format == 'gtiff':
sr_brdf.gw.to_raster(str(out_brdf), **kwargs)
else:
sr_brdf.gw.to_netcdf(str(out_brdf).replace('.tif', '.nc'),
zlib=True,
complevel=5)
if write_angle_files:
angle_stack = xr.concat((sza, saa), dim='band')\
.astype('int16')\
.assign_coords(band=['sza', 'saa'])\
.assign_attrs(**sza.attrs.copy())
if write_format == 'gtiff':
angle_stack.gw.to_raster(str(out_angles), **kwargs)
else:
angle_stack.gw.to_netcdf(str(out_angles).replace('.tif', '.nc'),
zlib=True,
complevel=5)
else:
logger.warning(' Not enough data for {} to store on disk.'.format(str(out_brdf)))
# Write an empty file for tracking
with open(str(out_brdf).replace('.tif', '.nodata'), 'w') as tx:
tx.writelines([])
angle_infos[finfo_key] = angle_info
_clean_and_update(outdir_angles,
finfo_dict,
finfo_dict['meta'].name,
load_bands_names=load_bands_names)
year += 1
def list_gcp(self, sensor, query):
"""
Lists files from Google Cloud Platform
Args:
sensor (str): The sensor to query. Choices are ['l5', 'l7', 'l8', 's2a', 's2c'].
query (str): The query string.
Examples:
>>> dl = GeoDownloads()
>>>
>>> # Query from a known directory
>>> dl.list_gcp('landsat', 'LC08/01/042/034/LC08_L1TP_042034_20161104_20170219_01_T1/')
>>>
>>> # Query a date for Landsat 5
>>> dl.list_gcp('l5', '042/034/*2016*')
>>>
>>> # Query a date for Landsat 7
>>> dl.list_gcp('l7', '042/034/*2016*')
>>>
>>> # Query a date for Landsat 8
>>> dl.list_gcp('l8', '042/034/*2016*')
>>>
>>> # Query Sentinel-2
>>> dl.list_gcp('s2a', '21/H/UD/*2019*.SAFE/GRANULE/*')
Returns:
``dict``
"""
if sensor not in ['l5', 'l7', 'l8', 's2', 's2a', 's2b', 's2c']:
logger.exception(" The sensor must be 'l5', 'l7', 'l8', 's2', 's2a', 's2b', or 's2c'.")
raise NameError
if sensor in ['s2', 's2a', 's2b', 's2c']:
gcp_str = "gsutil ls -r gs://gcp-public-data-sentinel-2"
else:
gcp_str = "gsutil ls -r gs://gcp-public-data-landsat"
gsutil_str = gcp_str + "/" + self.gcp_dict[sensor] + "/" + query
try:
proc = subprocess.run(gsutil_str.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except:
logger.exception('gsutil must be installed.')
output = proc.stdout
gcp_search_list = [outp for outp in output.decode('utf-8').split('\n') if '$folder$' not in outp]
self._gcp_search_dict = {}
if gcp_search_list:
# Check for length-1 lists with empty strings
if gcp_search_list[0]:
if sensor in ['s2', 's2a', 's2b', 's2c']:
self._gcp_search_dict = self._prepare_gcp_dict(gcp_search_list, 'gs://gcp-public-data-sentinel-2/')
else:
self._gcp_search_dict = self._prepare_gcp_dict(gcp_search_list, 'gs://gcp-public-data-landsat/')
@property
def get_gcp_results(self):
return self._gcp_search_dict.copy()
@staticmethod
def _prepare_gcp_dict(search_list, gcp_str):
"""
Prepares a list of GCP keys into a dictionary
Args:
search_list (list)
Returns:
``dict``
"""
df = pd.DataFrame(data=search_list, columns=['url'])
df['mask'] = df.url.str.strip().str.endswith('/:')
mask_idx = np.where(df['mask'].values)[0]
mask_range = mask_idx.shape[0] - 1 if mask_idx.shape[0] > 1 else 1
url_dict = {}
for mi in range(0, mask_range):
m1 = mask_idx[mi]
if mask_range > 1:
m2 = mask_idx[mi + 1] - 1
else:
m2 = len(search_list)
key = search_list[m1].replace(gcp_str, '').replace('/:', '')
values = search_list[m1:m2]
values = [value for value in values if value]
url_dict[key] = [value for value in values if not value.endswith('/:')]
return url_dict
```
#### File: jgrss/geowombat/setup.py
```python
import setuptools
from pathlib import Path
from distutils.core import setup
from distutils.extension import Extension
import re
from collections import defaultdict
import subprocess
try:
from Cython.Build import cythonize
except:
raise ImportError('Cython must be installed to build GeoWombat.')
try:
import numpy as np
except:
raise ImportError('NumPy must be installed to build GeoWombat.')
# Parse the version from the module.
# Source: https://github.com/mapbox/rasterio/blob/master/setup.py
with open('geowombat/version.py') as f:
for line in f:
if line.find("__version__") >= 0:
version = line.split("=")[1].strip()
version = version.strip('"')
version = version.strip("'")
continue
pkg_name = 'geowombat'
maintainer = ''
maintainer_email = ''
description = 'GeoWombat: Utilities for geospatial data'
git_url = 'https://github.com/jgrss/geowombat'
download_url = '{GIT}/archive/{PKG}-{VERSION}'.format(GIT=git_url, PKG=pkg_name, VERSION=version)
keywords = ['raster', 'satellite']
extras = 'extra-requirements.txt'
with open('README.md') as f:
long_description = f.read()
with open('LICENSE.txt') as f:
license_file = f.read()
with open('requirements.txt') as f:
required_packages = f.readlines()
# Attempt to get the GDAL binary version
try:
process = subprocess.Popen(['gdalinfo', '--version'], stdout=subprocess.PIPE, stderr=None)
gdal_version = str(process.communicate()[0]).split(',')[0].split(' ')[1].strip()
except:
gdal_version = None
if gdal_version:
required_packages.append('GDAL=={GDAL_VERSION}\n'.format(GDAL_VERSION=gdal_version))
def get_extra_requires(path, add_all=True):
with open(path) as fp:
extra_deps = defaultdict(set)
for k in fp:
if k.strip() and not k.startswith('#'):
tags = set()
if ':' in k:
k, v = k.split(':')
tags.update(vv.strip() for vv in v.split(','))
tags.add(re.split('[<=>]', k)[0])
for t in tags:
extra_deps[t].add(k)
# add tag `all` at the end
if add_all:
extra_deps['all'] = set(vv for v in extra_deps.values() for vv in v)
return extra_deps
def get_packages():
return setuptools.find_packages()
def get_package_data():
return {'': ['*.md', '*.txt'],
'data': ['*.png'],
'geowombat': ['config.ini',
'data/*.tif',
'data/*.TIF',
'data/*.gpkg',
'data/*.tar.gz',
'moving/*.so',
'bin/*.tar.gz']}
def get_extensions():
extensions = [Extension('*',
sources=['geowombat/moving/_moving.pyx'],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'])]
if Path('geowombat/moving/_test.pyx').is_file():
extensions += [Extension('*',
sources=['geowombat/moving/_test.pyx'],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'])]
if Path('geowombat/radiometry/_fusion.pyx').is_file():
extensions += [Extension('*',
sources=['geowombat/radiometry/_fusion.pyx'],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'])]
return extensions
def setup_package():
include_dirs = [np.get_include()]
metadata = dict(name=pkg_name,
maintainer=maintainer,
maintainer_email=maintainer_email,
description=description,
license=license_file,
version=version,
long_description=long_description,
packages=get_packages(),
package_data=get_package_data(),
ext_modules=cythonize(get_extensions()),
zip_safe=False,
keywords=' '.join(keywords),
url=git_url,
download_url=download_url,
install_requires=required_packages,
extras_require=get_extra_requires(extras),
include_dirs=include_dirs,
classifiers=['Intended Audience :: Science/Research',
'License :: MIT',
'Topic :: Scientific :: Remote Sensing',
'Programming Language :: Cython',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'])
setup(**metadata)
if __name__ == '__main__':
setup_package()
```
|
{
"source": "jgrss/sacfei",
"score": 2
}
|
#### File: sacfei/sacfei/utils.py
```python
from __future__ import division
from future.utils import viewitems
from builtins import int, zip
import concurrent.futures
import os
import itertools
from ._adaptive_threshold import threshold as athreshold
from .pool import pooler
from ._moving_window import moving_window
# from mpglue.raster_tools import create_raster
# from mpglue import moving_window
import numpy as np
import cv2
# SciPy
from scipy.ndimage.measurements import label as nd_label
from scipy.ndimage.measurements import mean as nd_mean
import scipy.stats as sci_stats
from scipy.stats import mode as sci_mode
from sklearn.preprocessing import StandardScaler
# Scikit-image
from skimage.exposure import rescale_intensity
from skimage.filters import threshold_local
from skimage.morphology import remove_small_objects, skeletonize
from skimage.morphology import thin as sk_thin
from skimage.feature import peak_local_max
from skimage.measure import regionprops
from skimage.measure import label as sk_label
import pymorph
from mahotas import thin as mthin
from mahotas.morph import hitmiss as mhitmiss
# from tqdm import tqdm
# from joblib import Parallel, delayed
def local_straightness(arr, kernel_filter, w, sigma_color, sigma_space):
"""
https://ieeexplore-ieee-org.ezproxy.library.uq.edu.au/document/1334256
https://docs.opencv.org/master/d4/d70/tutorial_anisotropic_image_segmentation_by_a_gst.html
Example:
>>> conv_kernels = set_kernel_pairs(methods=['compass'])
>>> kernel_filter = conv_kernels['compass']['kernels']
>>> local_straightness(array, kernel_filter, 3, 1, 1)
"""
diff_x = cv2.filter2D(np.float32(arr),
cv2.CV_32F,
kernel_filter[1],
borderType=cv2.BORDER_CONSTANT)
diff_y = cv2.filter2D(np.float32(arr),
cv2.CV_32F,
kernel_filter[0],
borderType=cv2.BORDER_CONSTANT)
diff_xy = diff_x * diff_y
diff_xx = diff_x * diff_x
diff_yy = diff_y * diff_y
c11 = cv2.boxFilter(np.float32(diff_xx), cv2.CV_32F, (w, w))
c22 = cv2.boxFilter(np.float32(diff_yy), cv2.CV_32F, (w, w))
c12 = cv2.boxFilter(np.float32(diff_xy), cv2.CV_32F, (w, w))
# c11 = cv2.bilateralFilter(np.float32(diff_xx), w, sigma_color, sigma_space)
# c22 = cv2.bilateralFilter(np.float32(diff_yy), w, sigma_color, sigma_space)
# c12 = cv2.bilateralFilter(np.float32(diff_xy), w, sigma_color, sigma_space)
gamma_max = (c11 + c22 + np.sqrt((c11 - c22)**2 + 4*c12**2)) / 2.0
gamma_min = (c11 + c22 - np.sqrt((c11 - c22)**2 + 4*c12**2)) / 2.0
s = 1.0 - (gamma_min / gamma_max)
return s
def logistic(x, **params):
return sci_stats.logistic.cdf(x, **params)
def sigmoid(x, a, b):
return 1.0 / (1.0 + np.exp(-b * (x - a)))
def log_transform(egm, scale=1e-6, logistic_alpha=1.6, logistic_beta=0.5):
"""
Transforms an EGM to probabilities
Args:
egm (2d array)
scale (Optional[float]): The scaling factor
logistic_alpha (Optional[float])
logistic_beta (Optional[float])
Returns:
Probabilities (2d array)
"""
# Mask
egm[egm == 0] = np.nan
log_min = np.nanpercentile(np.log(egm * scale), 2)
egm[np.isnan(egm)] = 0
# Log transform
egm_proba = np.where(egm > 0, np.log(egm * scale), log_min)
# Scale and clip
r, c = egm_proba.shape
zegm = np.where(egm_proba.ravel() > log_min)[0]
scaler = StandardScaler().fit(egm_proba.ravel()[zegm][:, np.newaxis])
egm_proba = scaler.transform(egm_proba.ravel()[:, np.newaxis]).reshape(r, c)
egm_proba = rescale_intensity(egm_proba, in_range=(-3, 3), out_range=(-3, 3))
# CDF
return logistic(egm_proba,
loc=logistic_alpha,
scale=logistic_beta)
def bayes(prior_a, prior_b, likelihood):
"""
Bayes rule
Args:
prior_a (float): The class prior probability.
prior_b (float): The class prior probability.
likelihood (float)
"""
posterior = (likelihood * prior_a) / (likelihood * prior_a + prior_b * (1.0 - prior_a))
posterior[np.isnan(posterior)] = 0
return posterior
class Params(object):
def __init__(self, **kwargs):
for k, v in viewitems(kwargs):
setattr(self, k, v)
def mopen(array2morph, se, iters=1):
return cv2.morphologyEx(np.uint8(array2morph),
cv2.MORPH_OPEN,
se,
iterations=iters)
def mclose(array2morph, se, iters=1):
return cv2.morphologyEx(np.uint8(array2morph),
cv2.MORPH_CLOSE,
se,
iterations=iters)
def merode(array2morph, se, iters=1):
return cv2.morphologyEx(np.uint8(array2morph),
cv2.MORPH_ERODE,
se,
iterations=iters)
def mdilate(array2morph, se, iters=1):
return cv2.morphologyEx(np.uint8(array2morph),
cv2.MORPH_DILATE,
se,
iterations=iters)
def closerec(array2morph, se, r=3, iters=5):
"""
Close by reconstruction
Args:
array2morph (2d array)
se (str)
r (Optional[int])
iters (Optional[int])
"""
if se == 'disk':
se = np.uint8(pymorph.sedisk(r=r))
elif se == 'cross':
se = np.uint8(pymorph.secross(r=r))
evi2_dist = np.float32(cv2.distanceTransform(np.uint8(np.where(array2morph >= 20, 1, 0)), cv2.DIST_L2, 3))
seed = np.uint8(np.where(evi2_dist >= 2,
cv2.morphologyEx(np.uint8(array2morph),
cv2.MORPH_OPEN,
se,
iterations=1),
0))
im_result = seed.copy()
for iter in range(0, iters):
im_dilated = cv2.morphologyEx(np.uint8(im_result),
cv2.MORPH_DILATE,
se,
iterations=1)
im_rec = np.minimum(im_dilated, array2morph)
im_result = im_rec.copy()
if np.allclose(seed, im_rec):
break
return im_result
def openrec(array2morph, se, iters=5):
"""
Open by reconstruction
Args:
array2morph (2d array)
se (2d array)
iters (Optional[int])
"""
evi2_dist = np.float32(cv2.distanceTransform(np.uint8(np.where(array2morph >= 20, 1, 0)), cv2.DIST_L2, 3))
seed = np.uint8(np.where(evi2_dist >= 2,
cv2.morphologyEx(np.uint8(array2morph),
cv2.MORPH_OPEN,
se,
iterations=1),
0))
im_result = seed.copy()
for iter in range(0, iters):
im_dilated = merode(im_result, se, iters=1)
im_rec = np.minimum(im_dilated, array2morph)
im_result = im_rec.copy()
if np.allclose(seed, im_rec):
break
return im_result
def set_kernel_pairs(methods=None):
"""
Creates 2d convolution kernels
Args:
methods (Optional[str list]): Choices are ['compass', 'kirsch', 'prewitt', 'roberts', 'scharr', 'sobel'].
Returns:
List of kernel filters
"""
returned_filters = dict()
if methods:
returned_filters['custom'] = dict(kernels=methods,
compass=True)
methods = ['compass', 'kirsch', 'prewitt', 'roberts', 'sobel']
# Prewitt compass
compass_filters = np.array([[[-1, -1, -1],
[1, -2, 1],
[1, 1, 1]],
[[-1, -1, 1],
[-1, -2, 1],
[1, 1, 1]],
[[-1, 1, 1],
[-1, -2, 1],
[-1, 1, 1]],
[[1, 1, 1],
[-1, -2, 1],
[-1, -1, 1]],
[[1, 1, 1],
[1, -2, 1],
[-1, -1, -1]],
[[1, 1, 1],
[1, -2, -1],
[1, -1, -1]],
[[1, 1, -1],
[1, -2, -1],
[1, 1, -1]]], dtype='float32')
# Sobel
sobel_filters = np.array([[[1, 2, 0],
[2, 0, -2],
[0, -2, -1]],
[[-1, -2, 0],
[-2, 0, 2],
[0, 2, 1]],
[[0, 2, 1],
[-2, 0, 2],
[-1, -2, 0]],
[[0, -2, -1],
[2, 0, -2],
[1, 2, 0]],
[[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]],
[[1, 0, -1],
[2, 0, -2],
[1, 0, -1]],
[[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]],
[[1, 2, 1],
[0, 0, 0],
[-1, -2, -1]]], dtype='float32')
# Scharr
scharr_filters = np.array([[[10, 3, 0],
[3, 0, -3],
[0, -3, -10]],
[[-10, -3, 0],
[-3, 0, 3],
[0, 3, 10]],
[[0, 3, 10],
[-3, 0, 3],
[-10, -3, 0]],
[[0, -3, -10],
[3, 0, -3],
[10, 3, 0]],
[[-10, 0, 10],
[-3, 0, 3],
[-10, 0, 10]],
[[10, 0, -10],
[3, 0, -3],
[10, 0, -10]],
[[-10, -3, -10],
[0, 0, 0],
[10, 3, 10]],
[[10, 3, 10],
[0, 0, 0],
[-10, -3, -10]]], dtype='float32')
# Roberts cross
roberts_filters = np.array([[[0, -1],
[1, 0]],
[[0, 1],
[-1, 0]],
[[-1, 0],
[0, 1]],
[[1, 0],
[0, -1]]], dtype='float32')
# Prewitt
prewitt_filters = np.array([[[1, 1, 1],
[0, 0, 0],
[-1, -1, -1]],
[[-1, -1, -1],
[0, 0, 0],
[1, 1, 1]],
[[1, 1, 0],
[1, 0, -1],
[0, -1, -1]],
[[-1, -1, 0],
[-1, 0, 1],
[0, 1, 1]],
[[1, 0, -1],
[1, 0, -1],
[1, 0, -1]],
[[-1, 0, 1],
[-1, 0, 1],
[-1, 0, 1]],
[[0, 1, 1],
[-1, 0, 1],
[-1, -1, 0]],
[[0, -1, -1],
[1, 0, -1],
[1, 1, 0]]], dtype='float32')
# Kirsch compass
kirsch_filters = np.array([[[5, 5, 5],
[-3, 0, -3],
[-3, -3, -3]],
[[5, 5, -3],
[5, 0, -3],
[-3, -3, -3]],
[[5, -3, -3],
[5, 0, -3],
[5, -3, -3]],
[[-3, -3, -3],
[5, 0, -3],
[5, 5, -3]],
[[-3, -3, -3],
[-3, 0, -3],
[5, 5, 5]],
[[-3, -3, -3],
[-3, 0, 5],
[-3, 5, 5]],
[[-3, -3, 5],
[-3, 0, 5],
[-3, -3, 5]]], dtype='float32')
if 'compass' in methods:
returned_filters['compass'] = dict(kernels=compass_filters,
compass=True)
if 'kirsch' in methods:
returned_filters['kirsch'] = dict(kernels=kirsch_filters,
compass=True)
if 'prewitt' in methods:
returned_filters['prewitt'] = dict(kernels=prewitt_filters,
compass=False)
if 'roberts' in methods:
returned_filters['roberts'] = dict(kernels=roberts_filters,
compass=False)
if 'scharr' in methods:
returned_filters['scharr'] = dict(kernels=scharr_filters,
compass=False)
if 'sobel' in methods:
returned_filters['sobel'] = dict(kernels=sobel_filters,
compass=False)
return returned_filters
def find_circles(intensity_array, kernel_size):
"""
Finds circles
Args:
intensity_array (2d array)
kernel_size (int)
"""
kernel_radius = int(kernel_size / 2.0)
kernel_circle = np.uint8(pymorph.sedisk(r=kernel_radius,
dim=2,
metric='euclidean',
flat=True,
h=0) * 1)
kernel_square = np.uint8(pymorph.sebox(r=kernel_radius) * 1)
circles = cv2.filter2D(np.float32(intensity_array),
cv2.CV_32F,
kernel_circle,
borderType=cv2.BORDER_CONSTANT)
squares = cv2.filter2D(np.float32(intensity_array),
cv2.CV_32F,
kernel_square,
borderType=cv2.BORDER_CONSTANT)
diff = circles - squares
local_max_coords = peak_local_max(diff,
min_distance=kernel_size,
indices=True)
local_max = np.zeros(intensity_array.shape, dtype='uint8')
for local_coord in local_max_coords:
local_coord[0] -= kernel_radius
local_coord[1] -= kernel_radius
local_max[local_coord[0]:local_coord[0]+kernel_size,
local_coord[1]:local_coord[1]+kernel_size] = kernel_circle
se = np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]], dtype='uint8')
return cv2.morphologyEx(local_max,
cv2.MORPH_GRADIENT,
se)
def _get_magnitude(image2convolve, kernel_filter):
"""
Calculates the Edge Gradient Magnitude from x and y derivatives
Args:
image2convolve (2d array)
kernel_filter (tuple)
Returns:
EGM as 2d array
"""
return cv2.magnitude(cv2.filter2D(np.float32(image2convolve),
cv2.CV_32F,
kernel_filter[1],
borderType=cv2.BORDER_CONSTANT),
cv2.filter2D(np.float32(image2convolve),
cv2.CV_32F,
kernel_filter[0],
borderType=cv2.BORDER_CONSTANT))
def get_magnitude(im, kernels=None, pad=15):
"""
Gets the Edge Gradient Magnitude (EGM) over multiple edge kernels
Args:
im (2d array)
kernels (Optional[list]
pad (Optional[int])
Returns:
Gradient edge magnitude as 2d array.
[Mean EGM] * [Max EGM]
"""
n_rows, n_cols = im.shape
# Pad image edges.
if pad > 0:
im = np.float32(cv2.copyMakeBorder(im, pad, pad, pad, pad, cv2.BORDER_REFLECT))
# The convolution kernel pairs
conv_kernels = set_kernel_pairs(methods=kernels)
# Mean EGM
# mag_p = np.zeros((len(conv_kernels), im.shape[0], im.shape[1]), dtype='float32')
mag_p = np.zeros(im.shape, dtype='float32')
for kernel_name, kernel_dict in viewitems(conv_kernels):
kernel_filters = kernel_dict['kernels']
mag_c = np.zeros(im.shape, dtype='float32')
if kernel_dict['compass']:
if isinstance(kernel_filters, list):
kiter = len(kernel_filters)
# Get the maximum EGM over all kernel pairs.
for ki in range(0, kiter):
for kw in range(0, 2):
# Image convolution
temp_egm = cv2.filter2D(np.float32(im),
cv2.CV_32F,
np.array(kernel_filters[ki], dtype='float32')[kw],
borderType=cv2.BORDER_CONSTANT)
mag_c = np.maximum(mag_c, temp_egm)
else:
# Get the maximum EGM over all kernels.
for ki in range(0, kernel_filters.shape[0]):
# Image convolution
temp_egm = cv2.filter2D(np.float32(im),
cv2.CV_32F,
kernel_filters[ki],
borderType=cv2.BORDER_CONSTANT)
mag_c = np.maximum(mag_c, temp_egm)
else:
if isinstance(kernel_filters, list):
kiter = len(kernel_filters)
# Get the maximum EGM over all kernel pairs.
for ki in range(0, kiter):
# EGM
temp_egm = _get_magnitude(im, np.array(kernel_filters[ki], dtype='float32'))
mag_c = np.maximum(mag_c, temp_egm)
else:
kiter = kernel_filters.shape[0]
# Get the maximum EGM over all kernel pairs.
for ki in range(0, kiter, 2):
# EGM
temp_egm = _get_magnitude(im, kernel_filters[ki:ki+2])
mag_c = np.maximum(mag_c, temp_egm)
mag_p += mag_c
if pad > 0:
# mag_p = mag_p.mean(axis=0)[pad:n_rows+pad, pad:n_cols+pad] * mag_p.max(axis=0)[pad:n_rows+pad, pad:n_cols+pad]
mag_p = mag_p[pad:n_rows+pad, pad:n_cols+pad] / len(conv_kernels)
else:
# mag_p = mag_p.mean(axis=0) * mag_p.max(axis=0)
mag_p = mag_p / len(conv_kernels)
mag_p[np.isnan(mag_p) | np.isinf(mag_p)] = 0.0
return mag_p
def get_mag_egm(ts_array, ts_r, ts_c, kernels):
# EGM holder
mag_egm = np.zeros((ts_array.shape[0], ts_r, ts_c), dtype='float32')
se = np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]], dtype='uint8')
# count = np.zeros((ts_r, ts_c), dtype='uint8')
# Get the EGM from each day.
for ti in range(0, ts_array.shape[0]):
mask = mdilate(np.where(ts_array[ti] == 0, 1, 0), se, iters=10)
# count[mask == 0] += 1
# Get the EGM over all 'kernels'.
magg_ = get_magnitude(ts_array[ti],
kernels=kernels,
pad=0)
# magg_[mask == 1] = 0
magg_[mask == 1] = np.nan
mag_egm[ti] = magg_
# Get the mean EGM over all layers
# mag_egm_mean = mag_egm.sum(axis=0) / np.float32(count)
mag_egm_mean = np.nanmean(mag_egm, axis=0)
mag_egm_med = np.nanmedian(mag_egm, axis=0)
mag_egm_cv = np.nanstd(mag_egm, axis=0) / mag_egm_med
mag_egm_cv = ((mag_egm_cv + mag_egm_med) / 2.0) * 10000.0
return mag_egm_mean, mag_egm_cv
def get_mag_dist(ts_array, ts_r, ts_c, cvm):
# EGM holder
mag_dist = np.zeros((ts_r, ts_c), dtype='float32')
# Get the edge distance from each day.
for ti in range(0, ts_array.shape[0]-3):
mag_dist_ = moving_window(ts_array[ti:ti+3],
statistic='distance',
window_size=3,
weights=cvm)
mag_dist += mag_dist_
return mag_dist / float(ts_array.shape[0]-3)
def _do_clahe(image2adjust, clip_perc, grid_tile):
"""
Contrast Limited Adaptive Histogram Equalization (CLAHE)
Args:
image2adjust (2d array)
clip_perc (float)
grid_tile (int)
Returns:
CLAHE adjusted 2d array
"""
clahe = cv2.createCLAHE(clipLimit=clip_perc, tileGridSize=grid_tile)
return clahe.apply(image2adjust)
def local_hist_eq(image2adjust, clip_percentages=None, grid_tiles=None, method='mean'):
"""
Computes multi-scale Contrast Limited Adaptive Histogram Equalization (CLAHE)
Args:
image2adjust (ndarray): The edge gradient magnitude array to adjust. Should be uint8 data type.
clip_percentages (Optional[float list]): A list of clip percentages for CLAHE. Default is [1.].
grid_tiles (Optional[tuple list]): A list of grid tuples for CLAHE. Default is [(16, 16)].
method (Optional[str]): The aggregation method.
Returns:
Adjusted image as 2d array.
"""
if not clip_percentages:
clip_percentages = [1.]
if grid_tiles:
grid_tiles = [(gk, gk) for gk in grid_tiles]
else:
grid_tiles = [(16, 16)]
rws, cls = image2adjust.shape
if method == 'mean':
temp_arr_eq = np.zeros((rws, cls), dtype='uint64')
elif method == 'median' or method == 'min':
temp_arr_eq = np.zeros((len(clip_percentages) * len(grid_tiles), rws, cls), dtype='uint64')
counter = 0
# Iterate over each clip percentage.
for clip_perc in clip_percentages:
# Iterate over each grid tile.
for grid_tile in grid_tiles:
# Compute CLAHE and add it to the output array.
if method == 'mean':
temp_arr_eq += _do_clahe(image2adjust, clip_perc, grid_tile)
# temp_arr_eq += rescale_intensity(exposure.equalize_adapthist(image2adjust,
# kernel_size=grid_tile[0],
# clip_limit=clip_perc),
# in_range=(0., 1.), out_range=(0, 255))
elif method == 'median' or method == 'min':
temp_arr_eq[counter] = _do_clahe(image2adjust, clip_perc, grid_tile)
counter += 1
# Return the mean CLAHE-adjusted edge gradient magnitude
if method == 'mean':
return np.float32(temp_arr_eq / float(len(clip_percentages) * len(grid_tiles))) / 255.0
# return np.uint8(np.divide(temp_arr_eq, float(len(clip_percentages) * len(grid_tiles))) / 255.)
elif method == 'median':
return np.float32(np.median(temp_arr_eq, axis=0) / 255.0)
elif method == 'min':
return np.float32(temp_arr_eq.min(axis=0) / 255.0)
def locate_endpoints(edge_image, locations='all'):
"""
Locates edge endpoints
Args:
edge_image (2d array)
locations (Optional[str]): Choices are ['all', 'small', 'broken'].
Returns:
Image endpoints, where endpoints = 1.
"""
# Setup the endpoint structuring elements for
# hit or miss morphology.
if locations == 'all':
endpoints = [np.array([[0, 0, 0], [0, 1, 0], [2, 1, 2]], dtype='uint8'),
np.array([[0, 0, 0], [0, 1, 2], [0, 2, 1]], dtype='uint8'),
np.array([[0, 0, 2], [0, 1, 1], [0, 0, 2]], dtype='uint8'),
np.array([[0, 2, 1], [0, 1, 2], [0, 0, 0]], dtype='uint8'),
np.array([[2, 1, 2], [0, 1, 0], [0, 0, 0]], dtype='uint8'),
np.array([[1, 2, 0], [2, 1, 0], [0, 0, 0]], dtype='uint8'),
np.array([[2, 0, 0], [1, 1, 0], [2, 0, 0]], dtype='uint8'),
np.array([[0, 0, 0], [2, 1, 0], [1, 2, 0]], dtype='uint8'),
np.array([[0, 0, 0], [0, 1, 0], [1, 2, 1]], dtype='uint8'),
np.array([[0, 0, 1], [0, 1, 2], [0, 0, 1]], dtype='uint8'),
np.array([[1, 2, 1], [0, 1, 0], [0, 0, 0]], dtype='uint8')]
elif locations == 'small':
endpoints = [np.array([[0, 0, 0], [0, 1, 0], [1, 1, 1]], dtype='uint8'),
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 1]], dtype='uint8'),
np.array([[0, 0, 1], [0, 1, 1], [0, 0, 1]], dtype='uint8'),
np.array([[0, 1, 1], [0, 1, 1], [0, 0, 0]], dtype='uint8'),
np.array([[1, 1, 1], [0, 1, 0], [0, 0, 0]], dtype='uint8'),
np.array([[1, 1, 0], [1, 1, 0], [0, 0, 0]], dtype='uint8'),
np.array([[1, 0, 0], [1, 1, 0], [1, 0, 0]], dtype='uint8'),
np.array([[0, 0, 0], [1, 1, 0], [1, 1, 0]], dtype='uint8'),
np.array([[0, 0, 0], [0, 1, 0], [1, 1, 1]], dtype='uint8'),
np.array([[0, 0, 1], [0, 1, 1], [0, 0, 1]], dtype='uint8'),
np.array([[1, 1, 1], [0, 1, 0], [0, 0, 0]], dtype='uint8'),
np.array([[1, 0, 0], [1, 1, 0], [1, 0, 0]], dtype='uint8')]
elif locations == 'broken':
endpoints = [np.array([[0, 0, 0], [0, 1, 0], [1, 0, 1]], dtype='uint8'),
np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1]], dtype='uint8'),
np.array([[1, 0, 1], [0, 1, 0], [0, 0, 0]], dtype='uint8'),
np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0]], dtype='uint8')]
end_points = np.zeros(edge_image.shape, dtype='uint8')
# Find the endpoints.
for endpoint in endpoints:
end_points += mhitmiss(np.uint8(edge_image), endpoint)
end_points[end_points > 1] = 1
return end_points
def _locate_islands(edge_image):
"""
Locates single pixel islands
Args:
edge_image (2d array)
Returns:
Image endpoint islands, where islands = 1.
"""
# Setup the endpoint structuring elements for
# hit or miss morphology.
endpoint = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype='uint8')
end_points = np.zeros(edge_image.shape, dtype='uint8')
end_points += mhitmiss(edge_image, endpoint)
end_points[end_points > 1] = 1
return end_points
def _trim_endpoints(edge_image,
iterations,
locations='all',
filter=False,
filter_ws=15,
filter_pct=.1,
skeleton=False):
"""
Trims unconnected lines, starting from endpoints
Args:
edge_image (2d array)
iterations (int)
locations (str)
filter (bool)
filter_ws (int)
filter_pct (float)
skeleton (bool)
"""
if filter:
edge_image_sum = moving_window(edge_image, statistic='sum', window_size=filter_ws)
for iter in range(0, iterations):
# Locate the endpoints
ep = locate_endpoints(edge_image, locations=locations)
# Filter high density areas.
if filter:
ep[edge_image_sum >= int((filter_ws * filter_ws) * filter_pct)] = 0
# Remove the endpoints from the edge image.
edge_image[ep == 1] = 0
# Fill small gaps after the first iteration.
if iter == 0:
edge_image = moving_window(edge_image, statistic='fill', window_size=3, n_neighbors=2)
# Remove remaining single pixels.
ep = _locate_islands(edge_image)
edge_image[ep == 1] = 0
if skeleton:
return _do_skeleton(edge_image)
else:
return edge_image
def _link_edge_endpoints(cr, max_gap, mag_image, **kwargs):
"""
Links edge endpoints
Args:
cr (2d array)
max_gap (int)
mag_image (2d array)
"""
# Link endpoints
cr = moving_window(np.uint8(cr*1),
statistic='link',
window_size=max_gap,
endpoint_array=locate_endpoints(np.uint8(cr*1)),
gradient_array=mag_image,
**kwargs)
# Fill broken links
# __--__ to
# ______
cr = _trim_endpoints(cr, 1, locations='broken')
cr = moving_window(cr, statistic='fill', window_size=3)
# A little cleanup before linking.
cr = _trim_endpoints(cr, 1, locations='all', filter=True, filter_ws=15, filter_pct=.1)
# Link endpoints.
return moving_window(cr * 1,
statistic='link',
window_size=max_gap,
endpoint_array=locate_endpoints(cr * 1),
gradient_array=mag_image,
**kwargs)
def canny_morphology(value_array, egm_array, l1, l2, k_size, l_egm, link_window):
"""
Args:
value_array (2d array): Float32 0-1
egm_array (2d array): Float32 0-1
l1 (int): Canny lower threshold.
l2 (int): Canny upper threshold.
k_size (int): Canny aperture size.
l_egm (float): The EGM lower threshold.
link_window (int): The link window size.
"""
canny_edge = cv2.Canny(np.uint8(value_array * 255.),
l1,
l2,
apertureSize=k_size,
L2gradient=True)
# canny_edge = moving_window(egm_array,
# window_size=3,
# weights=egd,
# statistic='suppression')
canny_edge[canny_edge > 0] = 1
canny_edge = _trim_endpoints(canny_edge, 1, locations='broken')
# Remove small edge objects.
# canny_edge = nd_label(canny_edge)[0]
canny_edge = sk_label(np.uint8(canny_edge), connectivity=2)
# canny_edge = np.uint64(remove_small_objects(canny_edge, min_size=5, connectivity=1))
# Remove objects with low EGM.
props = regionprops(canny_edge, intensity_image=egm_array)
canny_edge = np.float32(canny_edge)
for prop in props:
canny_edge[canny_edge == prop.label] = prop.mean_intensity
canny_edge[canny_edge <= l_egm] = 0
canny_edge[canny_edge > 0] = 1
# Link endpoints
canny_edge = _trim_endpoints(np.uint8(canny_edge), 1, locations='broken')
canny_edge = moving_window(np.uint8(canny_edge),
statistic='link',
window_size=link_window,
endpoint_array=locate_endpoints(np.uint8(canny_edge)),
gradient_array=egm_array,
smallest_allowed_gap=5)
# Remove small objects.
# canny_edge = nd_label(np.uint8(canny_edge))[0]
canny_edge = sk_label(np.uint8(canny_edge), connectivity=2)
canny_edge = np.uint64(remove_small_objects(canny_edge, min_size=10, connectivity=1))
# props = regionprops(canny_edge, intensity_image=egm_array)
# canny_edge = np.float32(canny_edge)
canny_edge[canny_edge > 0] = 1
return _trim_endpoints(canny_edge, 1)
# for prop in props:
#
# if (prop.eccentricity < .4) and (prop.area < 100):
# canny_edge[canny_edge == prop.label] = 0
#
# # if ((prop.major_axis_length + .00001) / (prop.minor_axis_length + .00001) < 2) and (prop.area < 100):
# # canny_edge[canny_edge == prop.label] = 0
#
# canny_edge[canny_edge > 0] = 1
# cannycv_r = cv2.threshold(np.uint8(canny_edge), 0, 1, cv2.THRESH_BINARY_INV)[1]
#
# dist = cv2.distanceTransform(np.uint8(cannycv_r), cv2.DIST_L2, 3)
#
# canny_edge = moving_window(dist, statistic='seg-dist', window_size=3)
#
# canny_edge = moving_window(np.uint8(canny_edge),
# statistic='link',
# window_size=link_window,
# endpoint_array=locate_endpoints(np.uint8(canny_edge)),
# gradient_array=egm_array,
# smallest_allowed_gap=5)
return canny_edge
def _do_skeleton(cr):
"""
Computes the morphological skeleton
Args:
cr (2d array)
Returns:
Image skeleton as 2d array
"""
# Fill holes to keep straighter skeleton lines.
return np.uint8(skeletonize(moving_window(np.uint8(cr), statistic='fill', window_size=3)))
def morphological_cleanup(cr,
min_line_size,
theta_45_iters=0,
theta_90_iters=0,
theta_180_iters=0,
pre_thin=False,
endpoint_iterations=0,
skeleton=False,
link_ends=False,
egm_array=None,
extend_endpoints=False,
max_gap=25,
min_egm=25,
smallest_allowed_gap=3,
medium_allowed_gap=7,
link_iters=1,
link_window_size=7,
extend_iters=1,
value_array=None):
"""
A function to morphologically clean binary edges
Args:
cr (2d array)
min_line_size (int)
theta_45_iters (Optional[int])
theta_90_iters (Optional[int])
theta_180_iters (Optional[int])
pre_thin (Optional[bool])
endpoint_iterations (Optional[int])
skeleton (Optional[bool])
link_ends (Optional[bool])
egm_array (Optional[2d array]): Edge gradient magnitude
extend_endpoints (Optional[bool])
max_gap (Optional[int])
min_egm (Optional[int])
smallest_allowed_gap (Optional[int])
medium_allowed_gap (Optional[int])
link_iters (Optional[int])
link_window_size (Optional[int])
extend_iters (Optional[int])
value_array (Optional[2d array])
Returns:
Morphologically cleaned edges as 2d array
"""
if isinstance(value_array, np.ndarray):
low_value_edge_idx = np.where((cr == 1) & (value_array < 0.2))
if pre_thin:
# Thin edges with 1 iteration
cr = pymorph.thin(pymorph.binary(cr), n=1, Iab=pymorph.endpoints())
# Remove small edge objects.
# cr = nd_label(cr)[0]
cr = sk_label(np.uint8(cr), connectivity=2)
cr = np.uint64(remove_small_objects(cr, min_size=min_line_size, connectivity=1))
cr[cr > 0] = 1
# Extend endpoints along
# the same gradient
# orientation.
if extend_endpoints:
# The edge gradient direction
egd_array = moving_window(egm_array,
window_size=link_window_size,
statistic='edge-direction')
for iter in range(0, extend_iters):
cr = moving_window(cr,
statistic='extend-endpoints',
window_size=3,
endpoint_array=locate_endpoints(cr),
gradient_array=egm_array*255.,
weights=egd_array)
# Thin edges
if (theta_180_iters > 0) and (theta_90_iters > 0) and (theta_45_iters > 0):
# cr = np.uint8(pymorph.thin(pymorph.binary(np.uint8(cr)), theta=180, n=theta_180_iters))
# cr2 = np.uint8(pymorph.thin(pymorph.binary(np.uint8(cr)), theta=90, n=theta_90_iters))
# cr3 = np.uint8(pymorph.thin(pymorph.binary(np.uint8(cr)), n=theta_45_iters))
#
# cr[(cr2 == 1) | (cr3 == 1)] = 1
cr = sk_thin(np.uint8(cr), max_iter=1)
else:
if theta_180_iters > 0:
cr = np.uint8(pymorph.thin(pymorph.binary(np.uint8(cr)), theta=180, n=theta_180_iters))
if theta_90_iters > 0:
cr = np.uint8(pymorph.thin(pymorph.binary(np.uint8(cr)), theta=90, n=theta_90_iters))
if theta_45_iters > 0:
cr = np.uint8(mthin(np.uint8(cr), max_iter=theta_45_iters))
# Remove small objects again after
# thinning and trimming.
if min_line_size > 0:
# cr, __ = nd_label(cr)
cr = sk_label(np.uint8(cr), connectivity=2)
cr = np.uint64(remove_small_objects(cr, min_size=min_line_size, connectivity=1))
cr[cr > 0] = 1
# if skeleton:
# crc = _do_skeleton(cr.copy())
# Link endpoints with small gaps.
if link_ends:
for link_iter in range(0, link_iters):
cr = _link_edge_endpoints(cr,
max_gap,
egm_array,
min_egm=min_egm,
smallest_allowed_gap=smallest_allowed_gap,
medium_allowed_gap=medium_allowed_gap)
cr = _trim_endpoints(cr, 1)
# import matplotlib.pyplot as plt
# cr = _do_skeleton(cr)
# plt.subplot(121)
# plt.imshow(crc)
# plt.subplot(122)
# plt.imshow(cr)
# plt.show()
# import sys
# sys.exit()
# Compute the morphological skeleton.
# The skeleton is morphological thinning with
# infinite iterations.
if skeleton:
cr = _do_skeleton(cr)
# Trim endpoints with ``endpoint_iterations`` iterations.
if endpoint_iterations > 0:
cr = _trim_endpoints(cr, endpoint_iterations, skeleton=True)
# Fill small holes
if isinstance(value_array, np.ndarray):
cr[low_value_edge_idx] = 1
cr = moving_window(cr, statistic='fill', window_size=3, n_neighbors=2)
# Fill broken links
# __--__ to
# ______
cr = _trim_endpoints(cr, 1, locations='broken')
return moving_window(cr, statistic='fill', window_size=3)
def init_distance(egm_array, threshold):
"""
Initializes a euclidean distance transform array
Args:
egm_array (2d array)
threshold (float or int)
"""
# Threshold the EGM into a binary edge/no edge array.
binary_array = np.uint8(np.where(egm_array < threshold, 1, 0))
# Get the euclidean distance from edge pixels.
dist = np.float32(cv2.distanceTransform(binary_array, cv2.DIST_L2, 3))
dist[dist < 0] = 0
dist /= dist.max()
return dist
def init_level_set(egm_array, threshold):
"""
Initializes a level set array
Args:
egm_array (2d array)
threshold (float or int)
"""
# Threshold the EGM into a binary edge/no edge array.
binary_array = np.uint8(np.where(egm_array < threshold, 1, 0))
# Get the euclidean distance from edge pixels.
dist = np.float32(cv2.distanceTransform(binary_array, cv2.DIST_L2, 3))
dist = np.where((binary_array == 1) & (dist > 1), dist, 0)
binary_array_r = np.uint8(cv2.threshold(binary_array, 0, 1, cv2.THRESH_BINARY_INV)[1])
dist_r = cv2.distanceTransform(binary_array_r, cv2.DIST_L2, 3)
return np.where(dist == 0, dist_r * -1., dist)
def multiscale_threshold(egm_array,
min_object_size,
windows=None,
link_ends=False,
theta_180_iters=1,
theta_90_iters=1,
theta_45_iters=1,
skeleton=False,
endpoint_iterations=1,
method='wmean',
ignore_thresh=15.0,
inverse_dist=True,
n_jobs=-1):
"""
Computes multi-scale adaptive threshold and morphological "cleaning"
Args:
egm_array (ndarray):
min_object_size (int):
windows (Optional[int list]):
link_ends (Optional[bool]):
theta_180_iters (Optional[int]):
theta_90_iters (Optional[int]):
theta_45_iters (Optional[int]):
skeleton (Optional[bool]):
endpoint_iterations (Optional[int]):
method (Optional[str]): Choices area ['gaussian', 'mean', 'median', 'weighted'].
ignore_thresh (Optional[float])
inverse_dist (Optional[bool])
n_jobs (Optional[int])
Returns:
Binary edges as 2d array
"""
if not isinstance(windows, list):
windows = [11, 21, 31, 41, 51, 61, 71]
# Get the image shape.
im_rows, im_cols = egm_array.shape
# Setup the output binary edge array holder.
thresholded_edges = np.zeros((im_rows, im_cols), dtype='uint8')
wp = 64
egm_array = cv2.copyMakeBorder(egm_array, wp, wp, wp, wp, cv2.BORDER_REFLECT)
for w in windows:
# Threshold the array with the current window size.
if method == 'gaussian':
# The gaussian threshold is a weighted sum of the window,
# where the weights are a gaussian window.
binary_adaptive_m = cv2.adaptiveThreshold(egm_array, 1,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, w, 15.)
elif method == 'mean-c':
binary_adaptive_m = cv2.adaptiveThreshold(egm_array, 1,
cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY, w, 15.)
elif method == 'median':
binary_adaptive_m = threshold_local(egm_array, w, method=method)
elif method == 'wmean':
dist_transform = np.float64(init_distance(egm_array, 30))
dist_transform = np.float64(closerec(np.uint8(dist_transform*255.0), 'disk', r=3, iters=5))
dist_transform /= dist_transform.max()
binary_adaptive_m = athreshold(np.ascontiguousarray(egm_array, dtype='float64'),
w,
ignore_thresh=ignore_thresh,
rt=-25.0,
n_jobs=n_jobs,
method=method,
inverse_dist=inverse_dist,
edge_direction_array=None,
edge_distance_array=dist_transform)
elif method == 'bernson':
binary_adaptive_m = athreshold(np.ascontiguousarray(egm_array, dtype='float64'),
w,
ignore_thresh=15.,
rt=-10.,
n_jobs=n_jobs,
method=method)
elif method == 'niblack':
binary_adaptive_m = athreshold(np.ascontiguousarray(egm_array, dtype='float64'),
w,
ignore_thresh=15.,
rt=-10.,
k=-.01,
n_jobs=n_jobs,
method=method)
elif method == 'sauvola':
binary_adaptive_m = athreshold(np.ascontiguousarray(egm_array, dtype='float64'),
w,
ignore_thresh=15.,
rt=-10.,
k=-.01,
n_jobs=n_jobs,
method=method)
elif method == 'bradley':
binary_adaptive_m = athreshold(np.ascontiguousarray(egm_array, dtype='float64'),
w,
ignore_thresh=15.,
rt=1.,
n_jobs=n_jobs,
method=method)
elif method == 'otsu':
binary_adaptive_m = athreshold(np.ascontiguousarray(egm_array, dtype='float64'),
w,
ignore_thresh=15.,
rt=1.,
n_jobs=n_jobs,
method=method)
elif method == '60':
func = lambda arr: np.percentile(arr, 60)
binary_adaptive_m = threshold_local(egm_array, w, 'generic', param=func)
else:
raise ValueError('The method was not recognized.')
# Cleanup the binary edges with image morphology.
thresholded_edges += morphological_cleanup(binary_adaptive_m[wp:-wp, wp:-wp],
min_object_size,
theta_180_iters=theta_180_iters,
theta_90_iters=theta_90_iters,
theta_45_iters=theta_45_iters,
skeleton=skeleton,
endpoint_iterations=endpoint_iterations,
link_ends=link_ends,
egm_array=egm_array)
thresholded_edges[thresholded_edges > 1] = 1
return thresholded_edges
# def _remove_interior_islands(prop,
# min_area_int_,
# mean_threshold_,
# boundary_mean_,
# prop_area_weight_,
# bbox_pad,
# arows,
# acols,
# segments_g,
# original_binary_edge_g,
# se_cross):
def _remove_interior_islands(*args):
"""
Gets indices to remove interior island objects
"""
prop, min_area_int_, mean_threshold_, boundary_mean_, prop_area_weight_, bbox_pad, arows, acols, segments_g, original_binary_edge_g, se_cross = list(itertools.chain(*args))
# mean_threshold_ = 0.2 # The minimum EVI2 threshold allowed for objects
# boundary_mean_ = 0.25 # The maximum EVI2 threshold allowed for boundaries
# min_area_int_ = 222 # The minimum pixel count allowed for interior objects
# Get the bounding box of the current segment.
min_row, min_col, max_row, max_col = prop.bbox
# Expand the box.
min_row = min_row - bbox_pad if (min_row - bbox_pad) > 0 else 0
max_row = max_row + bbox_pad if (max_row + bbox_pad) < (arows - 1) else arows - 1
min_col = min_col - bbox_pad if (min_col - bbox_pad) > 0 else 0
max_col = max_col + bbox_pad if (max_col + bbox_pad) < (acols - 1) else acols - 1
# Get a subset of the current object.
labels_sub = segments_g[min_row:max_row, min_col:max_col]
# Get a subset of the pre-cleaned edges
if isinstance(original_binary_edge_g, np.ndarray):
binary_sub = original_binary_edge_g[min_row:max_row, min_col:max_col]
# Get the count of pre-cleaned
# edges in the object.
binary_edge_count = ((binary_sub == 1) & (labels_sub == prop.label)).sum()
# Don't include objects half covered by pre-cleaned edges.
if binary_edge_count >= int(prop.area * prop_area_weight_):
idx = list(np.where(labels_sub == prop.label))
idx[0] = idx[0] + min_row
idx[1] = idx[1] + min_col
return list(idx[0]), list(idx[1])
# Don't include objects with low EVI2.
if hasattr(prop, 'mean_intensity'):
if prop.mean_intensity < mean_threshold_:
idx = list(np.where(labels_sub == prop.label))
idx[0] = idx[0] + min_row
idx[1] = idx[1] + min_col
return list(idx[0]), list(idx[1])
# Get the current object.
labels_sub_center = np.uint8(np.where(labels_sub == prop.label, 1, 0))
# Get the boundary labels.
label_boundary = cv2.morphologyEx(labels_sub_center,
cv2.MORPH_DILATE,
se_cross,
iterations=2) - labels_sub_center
boundary_idx = np.where(label_boundary == 1)
# Check if the current object is completely
# surrounded by 1-2 other objects.
if np.any(boundary_idx):
boundary_values = labels_sub[boundary_idx]
# The parcel should be surrounded
# by other vegetation.
if boundary_values.mean() >= boundary_mean_:
unique_boundary_values = list(np.unique(boundary_values))
if (0 in unique_boundary_values) and (0 < len(unique_boundary_values) <= 2) and (prop.area < min_area_int_):
idx = list(np.where(labels_sub_center == 1))
idx[0] = idx[0] + min_row
idx[1] = idx[1] + min_col
return list(idx[0]), list(idx[1])
else:
return list(), list()
else:
return list(), list()
else:
return list(), list()
# def _clean_objects(prop,
# min_area_,
# min_area_int_,
# mean_threshold_,
# boundary_mean_,
# bbox_pad,
# arows,
# acols,
# segments_g,
# morphed_sep,
# morphed,
# se_cross,
# se_square):
def _clean_objects(*args):
"""
Area:
15m:
0.1 ha / [(15m x 15m) x 0.0001] = 5 pixels
5 ha / [(15m x 15m) x 0.0001] = 222 pixels
10 ha / [(15m x 15m) x 0.0001] = 444 pixels
20 ha / [(15m x 15m) x 0.0001] = 888 pixels
5,000 ha / [(15m x 15m) x 0.0001] = 222,222 pixels
10,000 ha / [(15m x 15m) x 0.0001] = 444,444 pixels
20,000 ha / [(15m x 15m) x 0.0001] = 888,888 pixels
"""
prop, min_area_, min_area_int_, mean_threshold_, boundary_mean_, bbox_pad, arows, acols, segments_g, morphed_sep, morphed, se_cross, se_square = list(itertools.chain(*args))
el_ = []
# mean_threshold_ = 0.2 # The minimum EVI2 threshold allowed for objects
# boundary_mean_ = 0.25 # The maximum EVI2 threshold allowed for boundaries
# min_area_ = 5 # The minimum pixel count allowed for any object
# max_area_ = 250000 # The maximum pixel count allowed for any object
# min_area_int_ = 222 # The minimum pixel count allowed for interior objects
# if prop.area > 10000:
# return el_, el_, el_, el_, el_
if hasattr(prop, 'mean_intensity'):
if prop.mean_intensity < mean_threshold_:
return el_, el_, el_, el_, el_
# Get the bounding box of the current segment.
min_row, min_col, max_row, max_col = prop.bbox
# Expand the box.
min_row = min_row - bbox_pad if (min_row - bbox_pad) > 0 else 0
max_row = max_row + bbox_pad if (max_row + bbox_pad) < (arows - 1) else arows - 1
min_col = min_col - bbox_pad if (min_col - bbox_pad) > 0 else 0
max_col = max_col + bbox_pad if (max_col + bbox_pad) < (acols - 1) else acols - 1
# Get a subset of the current object.
labels_sub = segments_g[min_row:max_row, min_col:max_col]
morphed_sep_sub = morphed_sep[min_row:max_row, min_col:max_col]
morphed_sub = morphed[min_row:max_row, min_col:max_col]
# Get the current object.
labels_sub_center = np.uint8(np.where(labels_sub == prop.label, 1, 0))
# Get the boundary labels.
label_boundary = cv2.morphologyEx(labels_sub_center,
cv2.MORPH_DILATE,
se_cross,
iterations=2) - labels_sub_center
boundary_idx = np.where(label_boundary == 1)
# Check if the current object is completely
# surrounded by 1-3 other objects.
if np.any(boundary_idx):
boundary_values = labels_sub[boundary_idx]
# The parcel should be surrounded
# by other vegetation.
if boundary_values.mean() >= boundary_mean_:
unique_boundary_values = list(np.unique(boundary_values))
if (0 in unique_boundary_values) and (0 < len(unique_boundary_values) <= 2) and (prop.area < min_area_int_):
return el_, el_, el_, el_, el_
# Morphological closing by reconstruction
closerec_sub = pymorph.closerec(pymorph.binary(labels_sub_center))
closerec_sub = merode(closerec_sub, se_cross)
closerec_sub = mopen(closerec_sub, se_square)
if (closerec_sub == 1).sum() < min_area_:
return el_, el_, el_, el_, el_
else:
idxs = list(np.where((morphed_sep_sub == 0) & (closerec_sub == 1)))
idxs[0] = idxs[0] + min_row
idxs[1] = idxs[1] + min_col
# Decrease the gaps between land cover
closerec_sub = cv2.morphologyEx(np.uint8(closerec_sub),
cv2.MORPH_DILATE,
se_cross,
iterations=2)
idx = list(np.where((morphed_sub == 0) & (closerec_sub == 1)))
idx[0] = idx[0] + min_row
idx[1] = idx[1] + min_col
return list(idxs[0]), list(idxs[1]), \
list(np.zeros(len(idx[0]), dtype='uint64') + prop.label), \
list(idx[0]), list(idx[1])
def clean_objects(segments,
intensity_array=None,
original_binary_edge=None,
binary=True,
min_object_area=5,
min_interior_count=222,
mean_threshold=0.2,
boundary_mean=0.25,
prop_area_weight=0.9,
bbox_pad=10,
chunk_size=100000,
n_jobs=1):
"""
Cleans objects with morphological operations
Args:
segments (2d array): The segmented objects array to be cleaned.
intensity_array (2d array): The intensity values.
original_binary_edge (2d array): The original edges as binary.
binary (Optional[bool]): Whether the input segments are binary (True) or labelled (False). Default is True.
min_object_area (Optional[int]): The minimum object area.
min_interior_count (Optional[int]): The minimum pixel count of interior pixels.
mean_threshold (float): The vegetation index mean threshold.
boundary_mean (float): The vegetation index boundary threshold.
prop_area_weight (float): The object property area weighting.
bbox_pad (Optional[int]): The `regionprops bbox` padding. Default is 10.
chunk_size (Optional[int]): The chunk size for multiprocessing. Default is 100,000.
n_jobs (Optional[int]):
Returns:
Segments dilated, Segments with separate boundaries
"""
# global morphed, segments_g, morphed_sep, se_cross, se_square, original_binary_edge_g
# segments_g = segments
# original_binary_edge_g = original_binary_edge
arows, acols = segments.shape
if binary:
segments = nd_label(segments)[0]
# segments = sk_label(np.uint8(segments), connectivity=1)
morphed_sep = np.zeros((arows, acols), dtype='uint8')
morphed = np.zeros((arows, acols), dtype='uint64')
se_cross = np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]], dtype='uint8')
se_square = np.array([[1, 1],
[1, 1]], dtype='uint8')
props = regionprops(segments,
intensity_image=intensity_array)
# Clean parcels
for chi in range(0, len(props), chunk_size):
data_gen = ((prop_,
min_object_area,
min_interior_count,
mean_threshold,
boundary_mean,
bbox_pad,
arows,
acols,
segments,
morphed_sep,
morphed,
se_cross,
se_square) for prop_ in props[chi:chi+chunk_size])
cleaned_parcels = []
with concurrent.futures.ThreadPoolExecutor(n_jobs) as executor:
for res in executor.map(_clean_objects, data_gen):
cleaned_parcels.append(res)
# cleaned_parcels = Parallel(backend='multiprocessing',
# n_jobs=n_jobs)(delayed(_clean_objects)(prop_,
# min_object_area,
# min_interior_count,
# mean_threshold,
# boundary_mean,
# bbox_pad,
# arows,
# acols,
# segments,
# morphed_sep,
# morphed,
# se_cross,
# se_square) for prop_ in props[chi:chi+chunk_size])
rowidx_sep_list, colidx_sep_list, labels_list, rowidx_list, colidx_list = zip(*cleaned_parcels)
labels_list = np.array(list(itertools.chain.from_iterable(labels_list)), dtype='uint64')
# piece together the parcels
if np.any(labels_list):
# Objects with separate boundaries
rowidx_sep_list = np.array(list(itertools.chain.from_iterable(rowidx_sep_list)), dtype='uint64')
colidx_sep_list = np.array(list(itertools.chain.from_iterable(colidx_sep_list)), dtype='uint64')
morphed_sep[(rowidx_sep_list, colidx_sep_list)] = 1
# Objects with dilated boundaries
rowidx_list = np.array(list(itertools.chain.from_iterable(rowidx_list)), dtype='uint64')
colidx_list = np.array(list(itertools.chain.from_iterable(colidx_list)), dtype='uint64')
morphed[(rowidx_list, colidx_list)] = labels_list
# One last check for interior islands
props = regionprops(morphed,
intensity_image=intensity_array)
# segments_g = morphed
for chi in range(0, len(props), chunk_size):
data_gen = ((prop_,
min_interior_count,
mean_threshold,
boundary_mean,
prop_area_weight,
bbox_pad,
arows,
acols,
morphed,
original_binary_edge,
se_cross) for prop_ in props[chi:chi+chunk_size])
cleaned_parcels = []
with concurrent.futures.ThreadPoolExecutor(n_jobs) as executor:
for res in executor.map(_remove_interior_islands, data_gen):
cleaned_parcels.append(res)
# cleaned_parcels = Parallel(backend='multiprocessing',
# n_jobs=n_jobs)(delayed(_remove_interior_islands)(prop_,
# min_interior_count,
# mean_threshold,
# boundary_mean,
# prop_area_weight,
# bbox_pad,
# arows,
# acols,
# morphed,
# original_binary_edge,
# se_cross) for prop_ in props[chi:chi+chunk_size])
rowidx_list, colidx_list = zip(*cleaned_parcels)
rowidx_list = np.array(list(itertools.chain.from_iterable(rowidx_list)), dtype='uint64')
colidx_list = np.array(list(itertools.chain.from_iterable(colidx_list)), dtype='uint64')
# piece together the parcels
if np.any(rowidx_list):
morphed[(rowidx_list, colidx_list)] = 0
morphed_sep[morphed == 0] = 0
return morphed, morphed_sep
def invert_size_check(im_edges, min_size_object, binary=True):
"""
Inverts binary edges and checks sizes
Args:
im_edges (2d array): Edge array, where edges = 1.
min_size_object (int): The minimum size of line to be retained.
binary (Optional[bool]): Whether to recode the output labelled edges to binary. Default is True.
Returns:
Image objects as 2d array
"""
# Invert edges to objects
im_objects = cv2.threshold(np.uint8(im_edges), 0, 1, cv2.THRESH_BINARY_INV)[1]
# Remove potential field objects smaller
# than size threshold.
im_objects = nd_label(im_objects)[0]
# im_objects = sk_label(np.uint8(im_objects), connectivity=1)
im_objects = np.uint64(remove_small_objects(im_objects,
min_size=min_size_object,
connectivity=1))
if binary:
im_objects[im_objects > 0] = 1
return im_objects
def _intersect_objects(prop):
lc_values = list()
area_values = list()
orient_values = list()
solidity_values = list()
eccentricity_values = list()
if prop.label == 0:
return lc_values, area_values, orient_values, solidity_values, eccentricity_values, list(), list()
# Get the bounding box of the current segment.
min_row, min_col, max_row, max_col = prop.bbox
# Get the label sub-array
# labels_sub = lc_objects_g[min_row:max_row, min_col:max_col]
# Get the indices of the current object.
# labels_sub_idx = list(np.where(labels_sub == prop.label))
labels_sub_idx = (prop.coords[:, 0], prop.coords[:, 1])
labels_sub_idx_object = (prop.coords[:, 0] - min_row, prop.coords[:, 1] - min_col)
n_samples = len(labels_sub_idx[0])
# labels_sub_idx[0] = labels_sub_idx[0] + min_row
# labels_sub_idx[1] = labels_sub_idx[1] + min_col
##################################
# LAND COVER CLASS ID INTERSECTION
##################################
if get_class_id_g:
# Get the land cover
# class for the object.
lc_array_sub = lc_array_g[min_row:max_row, min_col:max_col]
# Get the majority land cover class
lc_mode_object = sci_mode(lc_array_sub[tuple(labels_sub_idx_object)])
lc_mode = int(lc_mode_object.mode)
lc_count = int(lc_mode_object.count)
# Check that the land cover count
# is above the required threshold.
# The pixel count needed
# to meet the threshold
pix_count = int(prop.area * object_fraction_g)
# There must be at least
# `object_fraction_g` of the
# target class in the object.
if lc_count >= pix_count:
# Return the majority class
lc_values = list(np.zeros(n_samples, dtype='uint8') + lc_mode)
else:
# Return empty
lc_values = list(np.zeros(n_samples, dtype='uint8'))
# Get the current object.
# labels_sub_center = np.uint8(np.where(labels_sub == idx, 1, 0))
##########################
# OBJECT AREA INTERSECTION
##########################
if get_object_area_g:
object_area = round(prop.area * pixel_ha, 2)
area_values = list(np.zeros(n_samples, dtype='float32') + object_area)
########################
# OBJECT ID INTERSECTION
########################
if get_orientation_g:
orient_values = list(np.zeros(n_samples, dtype='float32') + prop.orientation)
if get_solidity_g:
solidity_values = list(np.zeros(n_samples, dtype='float32') + prop.solidity)
if get_eccentricity_g:
eccentricity_values = list(np.zeros(n_samples, dtype='float32') + prop.eccentricity)
# Return the object value
return lc_values, \
area_values, \
orient_values, \
solidity_values, \
eccentricity_values, \
list(labels_sub_idx[0]), \
list(labels_sub_idx[1])
def intersect_objects(lc_objects,
lc_objects_sep=None,
lc_array=None,
var_array=None,
objects_are_unique=False,
object_fraction=0.5,
get_object_area=True,
get_object_id=False,
get_class_id=False,
get_orientation=False,
get_solidity=False,
get_eccentricity=False,
cell_size=30.0,
n_jobs=1,
chunk_size=100000):
""""
Intersects land cover objects with a thematic land cover map
Args:
lc_objects (2d array): The segmented objects.
lc_objects_sep (2d array): The eroded segmented objects.
lc_array (Optional[2d array]): The land cover array, needed if `get_object_area = False`. Default is None.
var_array (Optional[2d array]): The image variables array. Default is None.
objects_are_unique (Optional[bool]): Whether the land cover objects of `lc_objects` are unique.
Default is False.
object_fraction (Optional[float]): The fraction of an object in `lc_objects` to be considered for intersection.
Default is 0.5.
get_object_area (Optional[bool]): Whether to return the object area. Default is True.
get_object_id (Optional[bool]): Whether to return the object id from `lc_objects`. Default is False.
get_class_id (Optional[bool]): Whether to return the land cover class id from `lc_array`. Default is False.
get_orientation (Optional[bool]): Whether to return the object orientation. Default is False.
get_solidity (Optional[bool]): Whether to return the object solidity. Default is False.
get_eccentricity (Optional[bool]): Whether to return the object eccentricity. Default is False.
cell_size (Optional[float]): The cell size, used when `get_object_area = True`. Default is 30.
n_jobs (Optional[int]): The number of parallel jobs. Default is 1.
chunk_size (Optional[int]): The chunk size for Pool. Default is 100,000.
"""
global object_fraction_g, get_object_area_g, get_object_id_g, \
get_class_id_g, get_orientation_g, get_solidity_g, get_eccentricity_g, \
pixel_ha, lc_objects_g, lc_array_g
object_fraction_g = object_fraction
get_object_area_g = get_object_area
get_object_id_g = get_object_id
get_class_id_g = get_class_id
get_orientation_g = get_orientation
get_solidity_g = get_solidity
get_eccentricity_g = get_eccentricity
lc_objects_g = lc_objects
lc_array_g = lc_array
# Square meters to hectares
pixel_ha = (cell_size * cell_size) * 0.0001
out_array = np.zeros((5, lc_objects.shape[0], lc_objects.shape[1]), dtype='float32')
# Get unique object ids.
if not objects_are_unique:
lc_objects[lc_objects > 0] = 1
lc_objects, n_objects = nd_label(lc_objects)
# Get object properties.
# zo = prop.area
props_int = regionprops(lc_objects)
for chi in range(0, len(props_int), chunk_size):
with pooler(processes=n_jobs) as pool:
# Get object statistics
intersected_objects = pool.map(_intersect_objects,
props_int[chi:chi+chunk_size],
chunk_size)
lc_values_, area_values_, ori_values_, sol_values_, ecc_values_, rowidx_list, colidx_list = zip(*intersected_objects)
# Join the lists
lc_values_ = np.array(list(itertools.chain.from_iterable(lc_values_)), dtype='uint8')
area_values_ = np.array(list(itertools.chain.from_iterable(area_values_)), dtype='float32')
ori_values_ = np.array(list(itertools.chain.from_iterable(ori_values_)), dtype='float32')
sol_values_ = np.array(list(itertools.chain.from_iterable(sol_values_)), dtype='float32')
ecc_values_ = np.array(list(itertools.chain.from_iterable(ecc_values_)), dtype='float32')
rowidx_list = np.array(list(itertools.chain.from_iterable(rowidx_list)), dtype='uint64')
colidx_list = np.array(list(itertools.chain.from_iterable(colidx_list)), dtype='uint64')
# Piece together the parcels
# land cover
if lc_values_.shape[0] > 0:
out_array[0, rowidx_list, colidx_list] = lc_values_
# area
if area_values_.shape[0] > 0:
out_array[1, rowidx_list, colidx_list] = area_values_
# orientation
if ori_values_.shape[0] > 0:
out_array[2, rowidx_list, colidx_list] = ori_values_
# solidarity
if sol_values_.shape[0] > 0:
out_array[3, rowidx_list, colidx_list] = sol_values_
# eccentricity
if ecc_values_.shape[0] > 0:
out_array[4, rowidx_list, colidx_list] = ecc_values_
if isinstance(lc_objects_sep, np.ndarray):
# Swap the land cover with the eroded objects
out_array[0] = np.where(lc_objects_sep > 0, out_array[0], 0)
if isinstance(var_array, np.ndarray):
# Give the objects unique labels
lc_objects_labs_ = sk_label(np.uint8(lc_objects_sep), connectivity=2)
lc_objects_labs_index = np.unique(lc_objects_labs_)
# Get the mean for each variable layer
for var_idx, var_layer in enumerate(var_array):
layer_means = nd_mean(var_layer, labels=lc_objects_labs_, index=lc_objects_labs_index)
new_layer = np.zeros(var_layer.shape, dtype='float32')
# Update the layer values
for object_index in lc_objects_labs_index:
new_layer[lc_objects_labs_ == object_index] = layer_means[object_index]
# Add the layer to the output array
out_array = np.vstack((out_array, new_layer[np.newaxis, :, :]))
return out_array
def smooth(ts_array,
iterations=1,
window_size=5,
sigma_color=1.0,
sigma_space=1.0):
"""
Spatially smooths a series of arrays
Args:
ts_array (3d array): The time series array to smooth [time dimensions x rows x columns].
iterations (Optional[int]): The number of smoothing iterations. Default is 1.
window_size (Optional[int]): The bi-lateral filter window size (in pixels). Default is 5.
sigma_color (Optional[float]): The bi-lateral filter color sigma. Default is 1.0.
sigma_space (Optional[float]): The bi-lateral filter space (distance) sigma. Default is 1.0.
Returns:
The smoothed 3d `ts_array`.
"""
for tsp in range(0, ts_array.shape[0]):
for iteration in range(0, iterations):
# Subtract the inhibition term.
# ts_array[tsp] = moving_window(ts_array[tsp],
# statistic='inhibition',
# window_size=5,
# inhibition_ws=3,
# iterations=1,
# inhibition_scale=.5,
# inhibition_operation=1)
# Add the inhibition term.
# ts_array[tsp] = moving_window(ts_array[tsp],
# statistic='inhibition',
# window_size=5,
# inhibition_ws=3,
# iterations=1,
# inhibition_scale=.5,
# inhibition_operation=2)
# Fill basins
ts_array[tsp] = moving_window(ts_array[tsp],
statistic='fill-basins',
window_size=3,
iterations=2)
# Fill peaks
ts_array[tsp] = moving_window(ts_array[tsp],
statistic='fill-peaks',
window_size=3,
iterations=2)
# Bilateral filter
ts_array[tsp] = cv2.bilateralFilter(ts_array[tsp],
window_size,
sigma_color,
sigma_space)
return ts_array
class PixelStats(object):
@staticmethod
def calc(array2proc, statistic, lower_percentile, upper_percentile):
if statistic == 'cv':
ts_stat = array2proc.std(axis=0) / np.median(array2proc, axis=0)
ts_stat = np.where(ts_stat > 1, 1, ts_stat)
elif statistic == 'max':
ts_stat = array2proc.max(axis=0)
elif statistic == 'mean':
ts_stat = array2proc.mean(axis=0)
elif statistic in ['25', '50', '75']:
ts_stat = np.percentile(array2proc, int(statistic), axis=0)
ts_stat = moving_window(ts_stat, statistic='fill-basins', window_size=3, iterations=2)
ts_stat = moving_window(ts_stat, statistic='fill-peaks', window_size=3, iterations=2)
ts_stat[np.isnan(ts_stat) | np.isinf(ts_stat)] = 0
return rescale_intensity(ts_stat,
in_range=(np.percentile(ts_stat, lower_percentile),
np.percentile(ts_stat, upper_percentile)),
out_range=(0, 1))
class ArrayWriter(object):
def write2file(self,
output_image,
write_mag=False,
write_egm=False,
write_edges=False,
write_objects=False,
write_cv=False):
out_bands = 0
if write_mag:
out_bands += 1
if write_egm:
out_bands += 1
if write_edges:
out_bands += 1
if write_objects:
out_bands += 1
if write_cv:
out_bands += 1
band_pos = 1
if os.path.isfile(output_image):
os.remove(output_image)
with create_raster(output_image,
None,
compress='deflate',
rows=self.parameter_info.n_rows,
cols=self.parameter_info.n_cols,
bands=out_bands,
projection=self.parameter_info.projection,
cellY=self.parameter_info.extent_dict['cell_y'],
cellX=self.parameter_info.extent_dict['cell_x'],
left=self.parameter_info.extent_dict['left'],
top=self.parameter_info.extent_dict['top'],
storage='float32') as out_rst:
if write_mag:
out_rst.write_array(self.mag[self.parameter_info.ipad:self.parameter_info.ipad+self.parameter_info.n_rows,
self.parameter_info.jpad:self.parameter_info.jpad+self.parameter_info.n_cols],
band=band_pos)
out_rst.close_band()
band_pos += 1
if write_egm:
out_rst.write_array(self.egm[self.parameter_info.ipad:self.parameter_info.ipad+self.parameter_info.n_rows,
self.parameter_info.jpad:self.parameter_info.jpad+self.parameter_info.n_cols],
band=band_pos)
out_rst.close_band()
band_pos += 1
if write_edges:
out_rst.write_array(self.im_edges, band=band_pos)
out_rst.close_band()
band_pos += 1
if write_objects:
out_rst.write_array(self.objects[self.parameter_info.ipad:self.parameter_info.ipad+self.parameter_info.n_rows,
self.parameter_info.jpad:self.parameter_info.jpad+self.parameter_info.n_cols],
band=band_pos)
out_rst.close_band()
band_pos += 1
if write_cv:
out_rst.write_array(self.ts_array.var(axis=0) / self.ts_array.mean(axis=0), band=band_pos)
out_rst.close_band()
out_rst.close_file()
del out_rst
def adjust_edges(original_edges,
edges2adjust,
lower_threshold=.1,
scale_factor=2.,
scale_range=(0, 1),
min_object_size=100,
iterations=1):
for iteration in range(0, iterations):
edges2adjust[edges2adjust > lower_threshold] = 1
edges2adjust = sk_label(np.uint8(edges2adjust), connectivity=2)
edges2adjust = np.uint64(remove_small_objects(edges2adjust, min_size=min_object_size, connectivity=1))
edges2adjust[edges2adjust > 0] = 1
edges2adjust = np.float32(np.where(edges2adjust == 1,
original_edges * scale_factor,
original_edges / scale_factor))
edges2adjust = rescale_intensity(edges2adjust, in_range=scale_range, out_range=(0, 1))
if iterations > 1:
original_edges = edges2adjust.copy()
scale_factor += .1
scale_range = scale_range[0], scale_range[1]+.1
return rescale_intensity((edges2adjust + (original_edges * .5)) / 1.5,
in_range=(0, 1),
out_range=(0, 1))
# import matplotlib.pyplot as plt
# plt.imshow(edges2adjust)
# plt.show()
# import sys
# sys.exit()
def hist_match(source, template):
"""
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image
Args:
source (2d array): Image to transform; the histogram is computed over the flattened array.
template (2d array): Template image; can have different dimensions to source.
Reference:
https://stackoverflow.com/questions/32655686/histogram-matching-of-two-images-in-python-2-x
Returns:
matched (2d array): The transformed output image.
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# Get the set of unique pixel values and
# their corresponding indices and counts
s_values, bin_idx, s_counts = np.unique(source,
return_inverse=True,
return_counts=True)
t_values, t_counts = np.unique(template,
return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.float32(s_counts.cumsum())
s_quantiles /= s_quantiles[-1]
t_quantiles = np.float32(t_counts.cumsum())
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
```
|
{
"source": "jgrss/satsmooth",
"score": 2
}
|
#### File: satsmooth/anc/anc_smoothers.py
```python
import os
import itertools
import multiprocessing as multi
from ._dl import gd
# from ._lowess_smooth import lowess_smooth
from .. import remove_outliers, interp2d, LinterpMulti
from ..utils import nd_to_columns, columns_to_nd, scale_min_max
from .. import rolling_mean2d, peaks_valleys2d, group_peaks_valleys2d
# from ._bspline import bezier_curve
import numpy as np
from scipy.interpolate import UnivariateSpline
from scipy.signal import savgol_filter
import scipy.sparse as sparse
from scipy.sparse.linalg import splu
from skimage.segmentation import active_contour
#from sklearn.gaussian_process import GaussianProcessRegressor
#from sklearn.gaussian_process.kernels import WhiteKernel, ExpSineSquared
# from numba import jit, prange, set_num_threads
def _cspline_func(*args):
xd, xd_smooth, y_, s, optimize = list(itertools.chain(*args))
w = np.where(y_ > 0, 1, 0.1).astype('float64')
if optimize:
min_gcv = 1e9
eps = 0.00001
for s_ in s:
spl_ = UnivariateSpline(xd, y_, w=w, s=s_)
# Degrees of freedom (degree = 3)
dof = spl_.get_knots().shape[0] + 3 + 1
n = spl_.get_coeffs().shape[0]
# Weighted sum of squared errors
sse = spl_.get_residual()
# Roughness penalty
denom = n - dof*s_ + eps
gcv = (n / denom) / ((sse / denom) + eps)
if gcv < min_gcv:
min_gcv = gcv
spl = spl_
else:
spl = UnivariateSpline(xd, y_, w=w, s=s, k=3)
return spl(xd_smooth)
# @jit(parallel=True)
def cspline_func(xinfo, indices, yarray, s, optimize, n_jobs, chunksize):
data_gen = ((xinfo.xd, xinfo.xd_smooth, yarray[i], s, optimize) for i in range(0, yarray.shape[0]))
with multi.Pool(processes=n_jobs) as executor:
yout = [result[indices] for result in executor.imap(_cspline_func, data_gen, chunksize=chunksize)]
# yout = np.empty((yarray.shape[0], indices.shape[0]), dtype='float64')
#
# for i in range(0, yarray.shape[0]):
# yout[i] = _cspline_func(xinfo, yarray[i], s, optimize)[indices]
return np.array(yout)
def speyediff(N, d, format='csc'):
"""
(utility function)
Construct a d-th order sparse difference matrix based on
an initial N x N identity matrix
Final matrix (N-d) x N
"""
assert not (d < 0), "d must be non negative"
shape = (N-d, N)
diagonals = np.zeros(2*d + 1)
diagonals[d] = 1.
for i in range(d):
diff = diagonals[:-1] - diagonals[1:]
diagonals = diff
offsets = np.arange(d+1)
spmat = sparse.diags(diagonals, offsets, shape, format=format)
return spmat
def _whittaker_func(*args):
coefmat, y_ = list(itertools.chain(*args))
return splu(coefmat).solve(y_)
def whittaker_func(yarray, s, order, n_jobs, chunksize):
"""
Source:
https://github.com/mhvwerts/whittaker-eilers-smoother/blob/master/whittaker_smooth.py
"""
m = yarray.shape[1]
E = sparse.eye(m, format='csc')
D = speyediff(m, order, format='csc')
coefmat = E + s * D.conj().T.dot(D)
data_gen = ((coefmat, yarray[i]) for i in range(0, yarray.shape[0]))
with multi.Pool(processes=n_jobs) as executor:
yout = [result for result in executor.imap(_whittaker_func, data_gen, chunksize=chunksize)]
return np.array(yout)
def _gpr_func(*args):
X, y_ = list(itertools.chain(*args))
gp_kernel = ExpSineSquared(length_scale=1.0, periodicity=365.25/2.0, periodicity_bounds=(1e-2, 1e1)) + WhiteKernel(1e-1)
gpr = GaussianProcessRegressor(kernel=gp_kernel)
gpr.fit(X, y_)
return gpr.predict(X, return_std=False)
def gpr_func(xinfo, indices, yarray, n_jobs, chunksize):
X = xinfo.xd_smooth[:, np.newaxis]
# yout = np.empty((yarray.shape[0], indices.shape[0]), dtype='float64')
data_gen = ((X, yarray[i]) for i in range(0, yarray.shape[0]))
with multi.Pool(processes=n_jobs) as executor:
yout = [result[indices] for result in executor.imap(_gpr_func, data_gen, chunksize=chunksize)]
# for i in range(0, yarray.shape[0]):
# yout[i] = _gpr_func(X, yarray[i])[indices]
return np.array(yout)
def _snake_contour(*args):
X, y, pad, gain = list(itertools.chain(*args))
data_idx = np.arange(y.shape[0])
n = X.shape[0]
ymax = int(y.max() / gain)
c = np.zeros((ymax, n), dtype='float64') - 1
ridx = (ymax-np.uint8(y / gain)).clip(0, ymax-1).astype('int64')
ridx[0] = ridx[1]
ridx[-1] = ridx[-2]
idx = (ridx, data_idx)
# Fill the array with the time series
c[idx] = y
min_mse = 1e9
for alpha, beta, gamma in itertools.product([0.001, 0.01, 0.1], [0.1, 0.5, 1.0], [0.1]):
snake = active_contour(c,
np.array(idx).T,
boundary_condition='fixed',
alpha=alpha,
beta=beta,
w_line=1,
w_edge=0,
gamma=gamma,
max_iterations=100,
convergence=1)
ysnake_ = ((ymax - snake[:, 0]) * gain).clip(0, 1)
roughness = scale_min_max(np.abs(np.diff(ysnake_, n=4).mean()), 1e-6, 1e-5, 0.01, 1)
weights = np.where(y > ysnake_, 1, 0.5)
weights[:pad] = 0
weights[-pad:] = 0
se = (ysnake_ - y)**2
mse = np.average(se, weights=weights) * roughness
if mse < min_mse:
min_mse = mse
ysnake = ysnake_.copy()
return ysnake[pad:-pad]
def snake_contour(X, yarray, pad=10, n_jobs=1, chunksize=10):
X = np.pad(X, (pad, pad), mode='linear_ramp')
yarray = np.pad(yarray, ((0, 0), (pad, pad)), mode='reflect')
data_gen = ((X, yarray[i], pad, 0.01) for i in range(0, yarray.shape[0]))
with multi.Pool(processes=n_jobs) as executor:
yout = [result for result in executor.imap(_snake_contour, data_gen, chunksize=chunksize)]
return np.array(yout)
class Fourier(object):
def __init__(self, period=365.25, poly_order=1, harmonic_order=1):
self.period = period
self.poly_order = poly_order
self.harmonic_order = harmonic_order
self.coef_matrix = None
def _matrix(self, dates):
w = 2.0 * np.pi / self.period
if self.poly_order == 0:
self.coef_matrix = np.ones(shape=(len(dates), 1), order='F')
else:
self.coef_matrix = np.zeros(shape=(len(dates), self.harmonic_order*2+self.poly_order),
order='F')
w12 = w * dates
for p_order in range(1, self.poly_order+1):
self.coef_matrix[:, p_order-1] = dates**p_order
if self.harmonic_order > 0:
for h_order in range(1, self.harmonic_order+1):
self.coef_matrix[:, h_order+self.poly_order-1] = np.cos(w12*h_order)
self.coef_matrix[:, h_order+1+self.poly_order-1] = np.sin(w12*h_order)
if self.poly_order > 0:
self.coef_matrix = np.c_[self.coef_matrix, np.ones(self.coef_matrix.shape[0], dtype='float64')[:, np.newaxis]]
def fit_predict(self, X, yarray, indices=None):
self._matrix(X)
if isinstance(indices, np.ndarray):
est = np.zeros((yarray.shape[0], indices.shape[0]), dtype='float64')
else:
est = np.zeros(yarray.shape, dtype='float64')
# popt coefficients -> n coeffs x n samples
popt = np.linalg.lstsq(self.coef_matrix, yarray.T, rcond=None)[0]
def _pred_func(a):
return (a*popt).sum(axis=0)
for t in range(0, self.coef_matrix[indices].shape[0]):
est[:, t] = _pred_func(self.coef_matrix[indices][t][:, np.newaxis])
return est
class SmoothersMixin(object):
@property
def shape_in(self):
return self._shape_in
@shape_in.setter
def shape_in(self, shape):
self._shape_in = shape
@property
def shape_out(self):
if self.shape_is_3d:
return self.ndims_out, self.nrows, self.ncols
else:
return self.nsamples, self.ndims_out
@property
def shape_is_3d(self):
return True if len(self.shape_in) == 3 else False
@property
def ndims_in(self):
return self._ndims_in
@ndims_in.setter
def ndims_in(self, ndims):
self._ndims_in = ndims
@property
def ndims_out(self):
return self.indices.shape[0]
@property
def nrows(self):
return self._nrows
@nrows.setter
def nrows(self, nrows):
self._nrows = nrows
@property
def ncols(self):
return self._ncols
@ncols.setter
def ncols(self, ncols):
self._ncols = ncols
@property
def nsamples(self):
return self.nrows * self.ncols
def _reshape_inputs(self):
if self.shape_is_3d:
return nd_to_columns(self.data, *self.shape_in)
else:
return self.data
def _reshape_outputs(self, outputs):
if self.shape_is_3d:
return columns_to_nd(outputs, *self.shape_out)
else:
return outputs
def pre_remove_outliers(xinfo, yarray, n_jobs, **kwargs):
ytest = remove_outliers(np.ascontiguousarray(xinfo.xd, dtype='float64'),
np.ascontiguousarray(yarray, dtype='float64'),
**kwargs)
return interp2d(ytest, no_data_value=0.0, n_jobs=n_jobs)
def _dbl_pvs(y: np.ndarray) -> np.ndarray:
"""Detects peaks and valleys for the double logistic function
Args:
y (2d array): (samples x time)
"""
peak_valley_kwargs = dict(min_value=0.05,
min_dist=5,
min_sp_dist=0.1,
min_prop_sp_dist=0.001,
n_jobs=os.cpu_count())
def gaussian_func(x, sigma):
"""Gaussian function for window weights"""
return np.exp(-pow(x, 2) / (2.0 * pow(sigma, 2)))
# The peaks/valleys array holder
pvs = np.zeros((2, *y.shape), dtype='float64')
# Iterate over multiple window sizes
for k in [21, 28]:
# Smooth the curve with a weighted rolling mean
weights = gaussian_func(np.linspace(-1, 1, k), 0.5)
ymean = rolling_mean2d(np.pad(y.copy(), ((0, 0), (k, k)), mode='reflect'),
w=k, weights=weights, n_jobs=os.cpu_count())[:, k:-k]
# Estimate peak/valley locations
pvs += peaks_valleys2d(np.ascontiguousarray(ymean, dtype='float64'),
order=k,
**peak_valley_kwargs)[0]
pvs[pvs > 1] = 1
return group_peaks_valleys2d(np.int64(pvs), y.copy(),
w=21,
min_prop_sp_dist=peak_valley_kwargs['min_prop_sp_dist'],
n_jobs=os.cpu_count())
class AncSmoothers(SmoothersMixin):
def __init__(self,
xinfo,
data,
pad=50,
index_by_indices=False,
remove_outliers=True,
max_outlier_days1=120,
max_outlier_days2=120,
min_outlier_values=7,
dev_thresh1=0.2,
dev_thresh2=0.2,
n_jobs=1):
self.xinfo = xinfo
self.data = data.copy()
self.pad = pad
self.index_by_indices = index_by_indices
self.remove_outliers = remove_outliers
self.max_outlier_days1 = max_outlier_days1
self.max_outlier_days2 = max_outlier_days2
self.min_outlier_values = min_outlier_values
self.dev_thresh1 = dev_thresh1
self.dev_thresh2 = dev_thresh2
self.n_jobs = n_jobs
# set_num_threads(n_jobs)
self.shape_in = self.data.shape
if self.shape_is_3d:
self.ndims, self.nrows, self.ncols = self.shape_in
else:
nsamples, self.ndims = self.shape_in
self.nrows = int(nsamples / 2)
self.ncols = 2
if self.index_by_indices:
self.indices = np.ascontiguousarray(xinfo.skip_idx + xinfo.start_idx, dtype='uint64')
else:
self.indices = np.ascontiguousarray(np.arange(0, self.xinfo.xd_smooth.shape[0]), dtype='uint64')
self._preprocess()
def _preprocess(self):
if not self.remove_outliers:
self.data = interp2d(np.float64(self._reshape_inputs()), no_data_value=0.0, n_jobs=self.n_jobs)
else:
self.data = pre_remove_outliers(self.xinfo,
self._reshape_inputs(),
max_outlier_days1=self.max_outlier_days1,
max_outlier_days2=self.max_outlier_days2,
min_outlier_values=self.min_outlier_values,
dev_thresh1=self.dev_thresh1,
dev_thresh2=self.dev_thresh2,
n_jobs=self.n_jobs)
def csp(self, s=0.1, optimize=False, chunksize=10):
"""Cubic smoothing spline
"""
return self._reshape_outputs(cspline_func(self.xinfo,
self.indices,
self.data,
s,
optimize,
self.n_jobs,
chunksize)[:, self.indices])
# def bsp(self, window_size=50, mfactor=3, max_iter=1):
#
# return bezier_curve(self.data.copy(),
# window_size=window_size,
# mfactor=mfactor,
# max_iters=max_iter)
#
# # interp = LinterpMulti(self.xinfo.xd, self.xinfo.xd_smooth)
# #
# # return self._reshape_outputs(interp.interpolate(bezier_curve(self.data.copy(),
# # window_size=window_size,
# # mfactor=mfactor,
# # max_iters=max_iter))[:, self.indices],
# # *self.shape_out)
def dbl(self,
lr=None,
max_iters=1000,
reltol=1e-08,
init_params=None,
beta1=0.9,
beta2=0.99,
chunksize=10):
"""Double logistic function
"""
# Interpolate and regrid
interp = LinterpMulti(self.xinfo.xd, self.xinfo.xd_smooth)
y = interp.interpolate(self.data,
fill_no_data=True,
no_data_value=0,
n_jobs=self.n_jobs)
# Detect peaks/valleys
pv_array = _dbl_pvs(y)
if lr is None:
lr = np.array([0.01, 0.1, 1.0, 0.5, 1.0, 0.5, 0.001], dtype='float64')
if init_params is None:
init_params = np.ascontiguousarray([0.03, 0.6, 75, 20.0, 300, 20.0, 0.0001], dtype='float64')
return self._reshape_outputs(gd(ordinals=np.ascontiguousarray(self.xinfo.xd_smooth, dtype='int64'),
y=y,
pv_array=np.ascontiguousarray(pv_array, dtype='int64'),
lr=lr,
max_iters=max_iters,
reltol=reltol,
init_params=init_params,
constraints=np.array([[0.0, 0.2],
[0.2, 2.0],
[0.0, 185.0],
[5.0, 40.0],
[185.0, 367.0],
[5.0, 40.0],
[1e-8, 0.01]]),
beta1=beta1,
beta2=beta2,
n_jobs=self.n_jobs,
chunksize=chunksize)[:, self.indices])
def harm(self, period=365.25, poly_order=1, harmonic_order=1):
"""Linear harmonic regression
"""
interp = LinterpMulti(self.xinfo.xd, self.xinfo.xd_smooth)
clf = Fourier(period=period,
poly_order=poly_order,
harmonic_order=harmonic_order)
ypred = clf.fit_predict(self.xinfo.xd_smooth[self.xinfo.start_idx:self.xinfo.end_idx],
interp.interpolate(self.data,
fill_no_data=True,
no_data_value=0,
n_jobs=self.n_jobs)[:, self.xinfo.start_idx:self.xinfo.end_idx],
indices=self.xinfo.skip_idx)
return self._reshape_outputs(ypred)
def sg(self, w=7, p=3):
"""Savitsky-Golay smoothing
"""
interp = LinterpMulti(self.xinfo.xd, self.xinfo.xd_smooth)
return self._reshape_outputs(interp.interpolate(savgol_filter(self.data, w, p),
fill_no_data=True,
no_data_value=0,
n_jobs=self.n_jobs)[:, self.indices])
def wh(self, s=1.0, order=2, chunksize=10):
"""Whittaker smoothing
"""
interp = LinterpMulti(self.xinfo.xd, self.xinfo.xd_smooth)
return self._reshape_outputs(interp.interpolate(whittaker_func(self.data, s, order, self.n_jobs, chunksize),
fill_no_data=True,
no_data_value=0,
n_jobs=self.n_jobs)[:, self.indices])
def lw(self, w=31, chunksize=10):
"""Lowess smoothing
"""
interp = LinterpMulti(self.xinfo.xd, self.xinfo.xd_smooth)
return self._reshape_outputs(
interp.interpolate(lowess_smooth(ordinals=np.ascontiguousarray(self.xinfo.xd, dtype='int64'),
y=self.data,
w=w,
n_jobs=self.n_jobs,
chunksize=chunksize),
fill_no_data=True,
no_data_value=0,
n_jobs=self.n_jobs)[:, self.indices])
def gpr(self, chunksize=10):
interp = LinterpMulti(self.xinfo.xd, self.xinfo.xd_smooth)
return self._reshape_outputs(gpr_func(self.xinfo,
self.indices,
interp.interpolate(self.data,
fill_no_data=True,
no_data_value=0,
n_jobs=self.n_jobs),
self.n_jobs,
chunksize))
def ac(self, pad=10, chunksize=10):
"""Active contour smoothing
"""
interp = LinterpMulti(self.xinfo.xd, self.xinfo.xd_smooth)
return self._reshape_outputs(rolling_mean2d(np.ascontiguousarray(interp.interpolate(snake_contour(self.xinfo.xd,
self.data,
pad=pad,
n_jobs=self.n_jobs,
chunksize=chunksize),
fill_no_data=True,
no_data_value=0,
n_jobs=self.n_jobs)[:,
self.indices], dtype='float64'),
w=21,
no_data_value=0,
weights=np.ones(21, dtype='float64'),
n_jobs=self.n_jobs))
```
#### File: satsmooth/utils/tfill.py
```python
from pathlib import Path
from datetime import datetime
from datetime import timedelta
from collections import namedtuple
import calendar
import concurrent.futures
from ..preprocessing import interp2d, fill_gaps
# from ..preprocessing import interp2d
# from ..preprocessing.fill_gaps_py import fill_gaps
from ..preprocessing.fill_gaps_py import fill_x
from . import nd_to_columns, columns_to_nd, prepare_x
import numpy as np
import cv2
import pandas as pd
from numba import jit
from tqdm.auto import trange, tqdm
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
ThreadPoolExecutor = concurrent.futures.ThreadPoolExecutor
@jit(nopython=True, nogil=True)
def calc_gap_lengths(ordinals, darray):
"""
Calculates the length of gaps
Args:
ordinals (1d array)
darray (2d array)
"""
nr, nc = darray.shape
gap_counts = np.zeros((nr, nc), dtype='int64')
for i in range(0, nr):
j = 0
while True:
if darray[i, j] == 0:
gap_total = 0
# Find all gaps
for g in range(j, nc):
if darray[i, g] == 0:
gap_total += ordinals[g]
else:
break
for q in range(j, g):
gap_counts[i, q] = gap_total
j = g + 1
else:
j += 1
if j >= nc:
break
return gap_counts
def spatial_mean(yarray, w=5):
"""
Spatial mean over rows and columns
"""
wh = int(w / 2.0)
yarray = np.pad(yarray.copy(), ((0, 0), (w, w), (w, w)), mode='reflect')
ymean = np.zeros(yarray.shape, dtype='float64')
ymask = np.where(yarray > 0, 1, 0).astype('uint8')
ycount = np.zeros(yarray.shape, dtype='uint8')
offsets = np.linspace(-wh, wh, w).astype(int).tolist()
for yoff in offsets:
for xoff in offsets:
ymean += np.roll(yarray, (0, yoff, xoff), (0, 1, 2))
ycount += np.roll(ymask, (0, yoff, xoff), (0, 1, 2))
ymean /= ycount.astype('float64')
return ymean[:, w:-w, w:-w]
def tsmooth(yarray, q, method='linear', w=7):
"""
Temporal smoothing over time dimension
"""
return (pd.DataFrame(data=yarray.copy())
.interpolate(method=method, axis=1)
.rolling(w, center=True, axis=1)
.quantile(q)
.bfill(axis=1)
.ffill(axis=1)).values
def kmeans(yarray, n_classes=5, max_iters=20, n_attempts=20, n_jobs=1):
"""
Clusters data by k-means
Args:
yarray (2d array): The data to cluster.
n_classes (Optional[int]): The number of clusters.
max_iters (Optional[int]): The maximum number of iterations.
n_attempts (Optional[int]): The number of attempts.
n_jobs (Optional[int]): The number of concurrent threads for interpolation.
"""
# Define criteria = ( type, max_iter = 10 , epsilon = 1.0 )
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, max_iters, 1.0)
# Set flags (Just to avoid line break in the code)
flags = cv2.KMEANS_RANDOM_CENTERS
# Apply KMeans
labels = cv2.kmeans(np.float32(interp2d(yarray.copy(),
no_data_value=0.0,
n_jobs=n_jobs)),
n_classes,
None,
criteria,
n_attempts,
flags)[1].flatten()
return labels
class FillMixin(object):
@staticmethod
def get_gap_lengths(X, y):
ordinals = np.array([X[i+1].toordinal() - X[i].toordinal() for i in range(0, X.shape[0]-1)] + [0], dtype='int64')
return calc_gap_lengths(ordinals, y)
@staticmethod
def check_day_dist(dta, dtb, max_days):
"""
Checks if two dates fall within a day range
Args:
dta (object): The first ``datetime.datetime`` object.
dtb (object): The second ``datetime.datetime`` object.
max_days (int): The maximum number of days.
Returns:
``bool``
"""
# Get the maximum number of days in the current month
max_month_days = calendar.monthrange(dta.year, dtb.month)[1]
month_day = min(dtb.day, max_month_days)
dtc = datetime.strptime(f'{dta.year}-{dtb.month}-{month_day}', '%Y-%m-%d')
if abs(dta - dtc).days <= max_days:
return True
# Get the maximum number of days in the current month
max_month_days = calendar.monthrange(dta.year-1, dtb.month)[1]
month_day = min(dtb.day, max_month_days)
dtc = datetime.strptime(f'{dta.year-1}-{dtb.month}-{month_day}', '%Y-%m-%d')
if abs(dta - dtc).days <= max_days:
return True
# Get the maximum number of days in the current month
max_month_days = calendar.monthrange(dta.year+1, dtb.month)[1]
month_day = min(dtb.day, max_month_days)
dtc = datetime.strptime(f'{dta.year+1}-{dtb.month}-{month_day}', '%Y-%m-%d')
if abs(dta - dtc).days <= max_days:
return True
return False
def check_day_dist(dta, dtb, max_days):
"""
Checks if two dates fall within a day range
Args:
dta (object): The first ``datetime.datetime`` object.
dtb (object): The second ``datetime.datetime`` object.
max_days (int): The maximum number of days.
Returns:
``bool``
"""
# Get the maximum number of days in the current month
max_month_days = calendar.monthrange(dta.year, dtb.month)[1]
month_day = min(dtb.day, max_month_days)
dtc = datetime.strptime(f'{dta.year}-{dtb.month}-{month_day}', '%Y-%m-%d')
if abs(dta - dtc).days <= max_days:
return True
# Get the maximum number of days in the current month
max_month_days = calendar.monthrange(dta.year-1, dtb.month)[1]
month_day = min(dtb.day, max_month_days)
dtc = datetime.strptime(f'{dta.year-1}-{dtb.month}-{month_day}', '%Y-%m-%d')
if abs(dta - dtc).days <= max_days:
return True
# Get the maximum number of days in the current month
max_month_days = calendar.monthrange(dta.year+1, dtb.month)[1]
month_day = min(dtb.day, max_month_days)
dtc = datetime.strptime(f'{dta.year+1}-{dtb.month}-{month_day}', '%Y-%m-%d')
if abs(dta - dtc).days <= max_days:
return True
return False
def sort_by_date(xinfo, data, time_index, max_days=30, max_years=2):
"""
Sorts images by nearest date to reference
Args:
xinfo (object): The data object.
data (3d array): The data to slice and sort.
time_index (int): The current time reference.
max_days (Optional[int]): The maximum number of days difference.
max_years (Optional[int]): The maximum number of years difference.
Returns:
3d array
"""
# filter references by date
try:
target_date = xinfo.dates[time_index]
except:
return None, np.array([0])
ref_idx = np.array([i for i in range(0, xinfo.dates.shape[0]) if
(abs(target_date.year - xinfo.dates[i].year) <= max_years) and
check_day_dist(target_date, xinfo.dates[i], max_days)], dtype='int64')
if ref_idx.shape[0] == 0:
return None, np.array([0])
else:
slice_idx = ref_idx[np.argsort(np.abs(ref_idx - time_index))]
# Sort by nearest to reference ``time_index``
return xinfo.dates[slice_idx], data[slice_idx]
def fill_layer(xinfo,
y,
didx,
nrows,
ncols,
wmax,
wmin,
min_count,
dev_thresh,
max_days,
max_years,
nodata,
num_threads,
chunksize):
"""
Fills gaps
"""
Results = namedtuple('Results', 'index array')
# Sort the arrays in ascending order to the target date
gap_dates, gap_array = sort_by_date(xinfo,
y,
didx,
max_days=max_days,
max_years=max_years)
if (gap_array.shape[1] == 1) or (gap_array.max() == nodata):
return Results(index=didx, array=y[didx])
# The target date needs some data
if gap_array[0].max() > 0:
gdims, grows, gcols = gap_array.shape
# Get the cluster labels
cluster_labels = kmeans(nd_to_columns(gap_array, gdims, grows, gcols),
n_classes=5,
max_iters=20,
n_attempts=20,
n_jobs=num_threads).reshape(grows, gcols)
if len(y.shape) > 3:
gap_array = np.float64(gap_array)
else:
gap_array = np.float64(gap_array.reshape(gdims, 1, nrows, ncols))
prefill_wmax_half = int(wmax / 2.0)
gap_array = np.pad(gap_array, ((0, 0),
(0, 0),
(prefill_wmax_half, prefill_wmax_half),
(prefill_wmax_half, prefill_wmax_half)),
mode='reflect')
cluster_labels = np.pad(cluster_labels,
((prefill_wmax_half, prefill_wmax_half),
(prefill_wmax_half, prefill_wmax_half)),
mode='reflect')
# Fill gaps at time ``didx``
# y[didx] = np.squeeze(fill_gaps(gap_array,
# np.ascontiguousarray(cluster_labels, dtype='int64'),
# np.array([1], dtype='float64'), # not currently used
# wmax=self.wmax,
# wmin=self.wmin,
# nodata=self.nodata,
# min_count=self.min_count,
# dev_thresh=self.dev_thresh,
# n_jobs=self.num_threads,
# chunksize=self.chunksize))[prefill_wmax_half:-prefill_wmax_half,
# prefill_wmax_half:-prefill_wmax_half]
ygap = gap_array[0]
X, day_weights = fill_x(gap_dates, gap_array[1:], nodata)
if X.max() == nodata:
return Results(index=didx, array=y[didx])
return Results(index=didx, array=np.squeeze(fill_gaps(X,
ygap,
np.ascontiguousarray(cluster_labels, dtype='int64'),
np.ascontiguousarray(day_weights, dtype='float64'),
wmax=wmax,
wmin=wmin,
nodata=nodata,
min_count=min_count,
dev_thresh=dev_thresh,
n_jobs=num_threads,
chunksize=chunksize))[prefill_wmax_half:-prefill_wmax_half,
prefill_wmax_half:-prefill_wmax_half])
# return Results(index=didx, array=np.squeeze(fill_gaps(gap_array,
# np.ascontiguousarray(cluster_labels, dtype='int64'),
# w=wmax,
# nodata=nodata,
# dev_thresh=dev_thresh)))
else:
return Results(index=didx, array=y[didx])
class SFill(FillMixin):
def __init__(self,
start='2000-07-01',
end='2020-07-01',
rule='D',
skip='WMS',
wmax=25,
wmin=5,
nodata=0,
n_iters=1,
max_days=30,
max_years=0,
min_count=20,
dev_thresh=0.03,
num_threads=1,
chunksize=10):
self.start = start
self.end = end
self.rule = rule
self.skip = skip
self.wmax = wmax
self.wmin = wmin
self.nodata = nodata
self.n_iters = n_iters
self.max_days = max_days
self.max_years = max_years
self.min_count = min_count
self.dev_thresh = dev_thresh
self.num_threads = num_threads
self.chunksize = chunksize
def impute(self, X, y):
"""
Args:
X (1d array)
y (3d array)
"""
xinfo = prepare_x(X, self.start, self.end, rule=self.rule, skip=self.skip)
ndims, nrows, ncols = y.shape
y = np.nan_to_num(y, nan=self.nodata, posinf=self.nodata, neginf=self.nodata)
yout = y.copy()
with ThreadPoolExecutor(max_workers=int(self.num_threads / 2.0)) as executor:
for iter_ in range(0, self.n_iters):
# for didx in range(0, ndims):
#
# res = fill_layer(xinfo, y, didx,
# self.wmax,
# self.dev_thresh,
# self.max_days,
# self.max_years,
# self.nodata,
# int(self.num_threads / 2.0))
# CYTHON
data_gen = ((xinfo,
y,
didx,
nrows,
ncols,
self.wmax,
self.wmin,
self.min_count,
self.dev_thresh,
self.max_days,
self.max_years,
self.nodata,
2,
self.chunksize) for didx in range(0, ndims))
# STRIDES
# data_gen = ((xinfo,
# y,
# didx,
# nrows,
# ncols,
# self.wmax,
# self.dev_thresh,
# self.max_days,
# self.max_years,
# self.nodata,
# 2) for didx in range(0, ndims))
futures = [executor.submit(fill_layer, *args) for args in data_gen]
for f in tqdm(concurrent.futures.as_completed(futures), total=ndims):
results = f.result()
yout[results.index] = results.array
y = np.nan_to_num(yout.copy(), nan=self.nodata, posinf=self.nodata, neginf=self.nodata)
futures = None
return np.nan_to_num(yout, nan=self.nodata, posinf=self.nodata, neginf=self.nodata)
def sort_by_date(self, xinfo, data, time_index, max_days=30, max_years=2):
"""
Sorts images by nearest date to reference
Args:
xinfo (object): The data object.
data (3d array): The data to slice and sort.
time_index (int): The current time reference.
max_days (Optional[int]): The maximum number of days difference.
max_years (Optional[int]): The maximum number of years difference.
Returns:
3d array
"""
# filter references by date
try:
target_date = xinfo.dates[time_index]
except:
return None, np.array([0])
ref_idx = np.array([i for i in range(0, xinfo.dates.shape[0]) if
(abs(target_date.year - xinfo.dates[i].year) <= max_years) and
self.check_day_dist(target_date, xinfo.dates[i], max_days)], dtype='int64')
if ref_idx.shape[0] == 0:
return None, np.array([0])
else:
# dates = xinfo.dates[ref_idx[np.argsort(np.abs(ref_idx - time_index))]]
# days = [abs((dt - dates[0]).days) for dt in dates]
# Sort by nearest to reference ``time_index``
return data[ref_idx[np.argsort(np.abs(ref_idx - time_index))]]
class TFill(FillMixin):
"""
Args:
month (Optional[int]): The start and end month.
min_gap_length (Optional[int]): The minimum allowed gap length to not be imputed.
Example:
>>> tf = TFill()
>>> tf.impute(X, y)
"""
def __init__(self,
start_year=2000,
end_year=2020,
month=7,
min_gap_length=30,
batch_size=5,
num_threads=1):
self.start_year = start_year
self.end_year = end_year
self.month = month
self.min_gap_length = min_gap_length
self.batch_size = batch_size
self.num_threads = num_threads
self.half_batch = int(self.batch_size / 2.0)
self.df = None
def impute(self, X, y):
"""
Args:
X (1d array)
y (3d array)
"""
ndims, nrows, ncols = y.shape
# 5x5 spatial average
# yfill = spatial_mean(y, w=5)
y = nd_to_columns(y, ndims, nrows, ncols)
yfill = y.copy()
self.df = pd.DataFrame(data=range(0, X.shape[0]), columns=['name'], index=X)
gap_lengths = self.get_gap_lengths(X, y)
for batch_year in trange(self.start_year, self.end_year+1):
#########################
# Center the batch window
#########################
# Time series start
if batch_year - self.half_batch < self.start_year:
batch_start = batch_year
batch_end = batch_year + self.batch_size
# Time series end
elif self.end_year - batch_year < self.batch_size:
batch_start = self.end_year - self.batch_size
batch_end = self.end_year
else:
batch_start = batch_year - self.half_batch
batch_end = batch_year - self.half_batch + self.batch_size
dates_batch = self.df.loc[f'{batch_start}-{self.month:02d}-01':f'{batch_end}-{self.month:02d}-01']\
.index.to_pydatetime()
if dates_batch.shape[0] > 0:
date_list = self.get_unique_dates(dates_batch)
if date_list.shape[0] > 2:
yh_med, yh_lwr, yh_upr = self.stack(date_list, y, batch_year, batch_year+self.batch_size)
yfill = self.fill(date_list, gap_lengths, y, yh_med, yh_lwr, yh_upr, yfill, batch_year)
return yfill
def fill(self, X_batch, gap_lengths, y, yh_med, yh_lwr, yh_upr, yfill, year_0):
# Get a 12-month slice
frame_slice = self.df.loc[f'{year_0}-{self.month:02d}-01':f'{year_0+1}-{self.month:02d}-01']
year_slice = frame_slice.index.to_pydatetime().tolist()
# Get all dates for the current year slice
date_slice_list = self._adjust_dates(year_0, year_slice, base_year=year_0)
# Get the indices that fit into the full array
idx_full = frame_slice.name.values
idx, put_indices = self.get_indices(X_batch, idx_full, date_slice_list)
# Impute with the rolling median
yfill[:, idx] = np.where(y[:, idx] == 0,
np.where((yh_med[:, put_indices] <= yh_upr[:, put_indices]) &
(yh_med[:, put_indices] >= yh_lwr[:, put_indices]) &
(gap_lengths[:, idx] > self.min_gap_length),
yh_med[:, put_indices], y[:, idx]),
y[:, idx])
return yfill
@staticmethod
def get_indices(X_batch, idx_full, date_slice_list):
put_indices = []
idx = []
for pix_idx, dta in zip(idx_full, date_slice_list):
pix_put_idx = np.where(X_batch == dta)[0]
if pix_put_idx.shape[0] > 0:
idx.append(pix_idx)
put_indices.append(int(pix_put_idx[0]))
return idx, put_indices
def stack(self, X_batch, y, year_0, year_n):
# Create the array holder
yh = np.zeros((y.shape[0], X_batch.shape[0]), dtype='float64')
# TODO: iterate in batches
for year in range(year_0, year_n+1):
# Get a 12-month slice
frame_slice = self.df.loc[f'{year}-{self.month:02d}-01':f'{year+1}-{self.month:02d}-01']
year_slice = frame_slice.index.to_pydatetime().tolist()
# Get all dates for the current year slice
date_slice_list = self._adjust_dates(year, year_slice, base_year=year_0, check_monotonic=True)
# Get the indices that fit into the full array
idx_full = frame_slice.name.values
idx, put_indices = self.get_indices(X_batch, idx_full, date_slice_list)
yh[:, put_indices] = np.where(yh[:, put_indices] == 0,
y[:, idx],
np.where(y[:, idx] > 0.01, y[:, idx], yh[:, put_indices]))
# Get the cluster labels
cluster_labels = kmeans(yh,
n_classes=5,
max_iters=20,
n_attempts=20,
n_jobs=self.num_threads)
yh[yh == 0] = np.nan
yh_impute = yh.copy()
yh_impute[np.isnan(yh_impute)] = 0
yh_lwr = np.zeros(yh_impute.shape, dtype='float64')
yh_upr = np.zeros(yh_impute.shape, dtype='float64')
imp = IterativeImputer(missing_values=np.nan,
sample_posterior=False,
n_nearest_features=10,
max_iter=10,
initial_strategy='mean',
min_value=0,
max_value=1)
# Impute for each cluster
for clab in np.unique(cluster_labels):
# Get the current cluster
clab_idx = np.where(cluster_labels == clab)[0]
yh_clab = yh[clab_idx]
yh_impute_clab = yh_impute[clab_idx]
# Get the columns with data
valid_idx = np.where(yh_impute_clab.max(axis=0) > 0)[0]
# Impute the current cluster
yh_impute_clab[:, valid_idx] = imp.fit_transform(yh_clab)
# MICE
# mice_iters = 5
# for __ in range(0, mice_iters):
# yh_impute[:, valid_idx] += imp.fit_transform(yh)
#
# yh_impute[:, valid_idx] /= float(mice_iters)
# yh_impute = tsmooth(yh, 0.5)
yh_lwr_clab = tsmooth(yh_clab, 0.1, w=15)
yh_upr_clab = tsmooth(yh_clab, 0.9, w=15)
# Update the cluster
yh_impute[clab_idx] = yh_impute_clab
yh_lwr[clab_idx] = yh_lwr_clab
yh_upr[clab_idx] = yh_upr_clab
return yh_impute, yh_lwr, yh_upr
def get_unique_dates(self, dates_batch):
"""
Args:
dates_batch (list): A list of datetime objects.
"""
year_0 = dates_batch[0].year
year_n = dates_batch[-1].year
date_list = []
for year in range(year_0, year_n):
date_list = self._adjust_dates(year, dates_batch, adj_date_list=date_list, base_year=year_0)
return np.sort(np.unique(date_list))
def _adjust_dates(self, year, dt_objs, adj_date_list=None, base_year=2000, check_monotonic=False):
if not adj_date_list:
adj_date_list = []
for dt in dt_objs:
# Add 6 months to set July as fake January for sorting
month = (dt + pd.DateOffset(months=12-self.month+1)).to_pydatetime().month
if dt.year in [year, year+1]:
if dt.day > calendar.monthrange(base_year+1, month)[1]:
adj_dt = datetime.strptime(f'{base_year+1}{month:02d}{calendar.monthrange(base_year+1, month)[1]:02d}', '%Y%m%d')
else:
adj_dt = datetime.strptime(f'{base_year+1}{month:02d}{dt.day:02d}', '%Y%m%d')
else:
adj_dt = None
if adj_dt is not None:
adj_date_list.append(adj_dt)
if check_monotonic:
mono_increase = [True] + [True if adj_date_list[i] > adj_date_list[i-1] else False for i in range(1, len(adj_date_list))]
if not all(mono_increase):
for mi, mono_bool in enumerate(mono_increase):
if not mono_bool:
new_dt = adj_date_list[mi] - timedelta(days=1)
adj_date_list[mi] = datetime.strptime(f'{base_year+1}{new_dt.month:02d}{new_dt.day:02d}', '%Y%m%d')
return adj_date_list
```
#### File: jgrss/satsmooth/setup.py
```python
import setuptools
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import platform
try:
from Cython.Distutils import build_ext
except:
from distutils.command import build_ext
import numpy as np
# Parse the version from the module.
# Source: https://github.com/mapbox/rasterio/blob/master/setup.py
with open('satsmooth/version.py') as f:
for line in f:
if line.find("__version__") >= 0:
version = line.split("=")[1].strip()
version = version.strip('"')
version = version.strip("'")
continue
pkg_name = 'satsmooth'
maintainer = '<NAME>'
maintainer_email = ''
description = 'Satellite n-dimensional signal smoothing'
git_url = 'http://github.com/jgrss/satsmooth.git'
with open('README.md') as f:
long_description = f.read()
with open('LICENSE.txt') as f:
license_file = f.read()
required_packages = ['cython', 'numpy']
compile_args = ['-fopenmp']
link_args = ['-fopenmp']
if platform.system().lower() == 'darwin':
compile_args.insert(0, '-Xpreprocessor')
link_args = ['-lomp']
def get_packages():
return setuptools.find_packages()
def get_package_data():
return {'': ['*.md', '*.txt'],
'satsmooth': ['detect/*.so',
'smooth/*.so',
'preprocessing/*.so',
'testing/*.so',
'utils/*.pxd']}
def get_extensions():
# extra_compile_args=['-O3', '-ffast-math', '-march=native', '-fopenmp']
return [Extension('*',
sources=['satsmooth/detect/_signal.pyx'],
extra_compile_args=compile_args,
extra_link_args=link_args),
Extension('*',
sources=['satsmooth/preprocessing/_linear_interp.pyx'],
extra_compile_args=compile_args,
extra_link_args=link_args),
Extension('*',
sources=['satsmooth/preprocessing/_linear_interp_regrid.pyx'],
language='c++'),
Extension('*',
sources=['satsmooth/preprocessing/_linear_interp_regrid_multi.pyx'],
extra_compile_args=compile_args,
extra_link_args=link_args,
language='c++'),
Extension('*',
sources=['satsmooth/preprocessing/_linear_interp_regrid_multi_indexing.pyx'],
extra_compile_args=compile_args,
extra_link_args=link_args),
Extension('*',
sources=['satsmooth/preprocessing/_fill_gaps.pyx'],
extra_compile_args=compile_args,
extra_link_args=link_args),
Extension('*',
sources=['satsmooth/preprocessing/_outlier_removal.pyx'],
extra_compile_args=compile_args,
extra_link_args=link_args),
Extension('*',
sources=['satsmooth/anc/_dl.pyx'],
extra_compile_args=compile_args,
extra_link_args=link_args),
Extension('*',
sources=['satsmooth/anc/_lowess_smooth.pyx'],
extra_compile_args=compile_args,
extra_link_args=link_args,
language='c++'),
Extension('*',
sources=['satsmooth/smooth/_adaptive_bilateral.pyx'],
language='c++'),
Extension('*',
sources=['satsmooth/smooth/_rolling1d.pyx']),
Extension('*',
sources=['satsmooth/smooth/_rolling2d.pyx'],
extra_compile_args=compile_args,
extra_link_args=link_args),
Extension('*',
sources=['satsmooth/smooth/_spatial_temporal.pyx'],
extra_compile_args=compile_args,
extra_link_args=link_args,
language='c++')]
def setup_package():
include_dirs = [np.get_include()]
metadata = dict(name=pkg_name,
maintainer=maintainer,
maintainer_email=maintainer_email,
description=description,
license=license_file,
version=version,
long_description=long_description,
packages=get_packages(),
package_data=get_package_data(),
ext_modules=cythonize(get_extensions()),
cmdclass=dict(build_ext=build_ext),
download_url=git_url,
install_requires=required_packages,
include_dirs=include_dirs)
setup(**metadata)
if __name__ == '__main__':
setup_package()
```
|
{
"source": "jgrss/tsaug",
"score": 4
}
|
#### File: tsaug/_augmenter/crop.py
```python
from typing import List, Optional, Tuple, Union
import numpy as np
from .base import _Augmenter, _default_seed
from .resize import Resize
class Crop(_Augmenter):
"""
Crop random sub-sequences from time series.
To guarantee all output series have the same length, if the crop size is not
deterministic, all crops must be resize to a fixed length.
Parameters
----------
size : int, tuple, list
The length of random crops.
- If int, all crops have the same length.
- If list, a crop from a series has a length sampled from this list
randomly.
- If 2-tuple, a crop from a series has a length sampled from this
interval randomly.
resize : int, optional
The length that all crops are resized to. Only necessary if the crop
size is not fixed.
repeats : int, optional
The number of times a series is augmented. If greater than one, a series
will be augmented so many times independently. This parameter can also
be set by operator `*`. Default: 1.
prob : float, optional
The probability of a series is augmented. It must be in (0.0, 1.0]. If
multiple output is expected, this value must be 1.0, so that all output
have the same length. This parameter can also be set by operator `@`.
Default: 1.0.
seed : int, optional
The random seed. Default: None.
"""
def __init__(
self,
size: Union[int, Tuple[int, int], List[int]],
resize: Optional[int] = None,
repeats: int = 1,
prob: float = 1.0,
seed: Optional[int] = _default_seed,
):
self.size = size
self.resize = resize
super().__init__(repeats=repeats, prob=prob, seed=seed)
@classmethod
def _get_param_name(cls) -> Tuple[str, ...]:
return ("size", "resize")
@property
def size(self) -> Union[int, Tuple[int, int], List[int]]:
return self._size
@size.setter
def size(self, n: Union[int, Tuple[int, int], List[int]]) -> None:
SIZE_ERROR_MSG = (
"Parameter `size` must be a positive integer, "
"a 2-tuple of positive integers representing an interval, "
"or a list of positive integers."
)
if not isinstance(n, int):
if isinstance(n, list):
if len(n) == 0:
raise ValueError(SIZE_ERROR_MSG)
if not all([isinstance(nn, int) for nn in n]):
raise TypeError(SIZE_ERROR_MSG)
if not all([nn > 0 for nn in n]):
raise ValueError(SIZE_ERROR_MSG)
elif isinstance(n, tuple):
if len(n) != 2:
raise ValueError(SIZE_ERROR_MSG)
if (not isinstance(n[0], int)) or (not isinstance(n[1], int)):
raise TypeError(SIZE_ERROR_MSG)
if n[0] >= n[1]:
raise ValueError(SIZE_ERROR_MSG)
if (n[0] <= 0) or (n[1] <= 0):
raise ValueError(SIZE_ERROR_MSG)
else:
raise TypeError(SIZE_ERROR_MSG)
elif n <= 0:
raise ValueError(SIZE_ERROR_MSG)
self._size = n
@property
def resize(self) -> Optional[int]:
return self._resize
@resize.setter
def resize(self, s: Optional[int]) -> None:
if (s is not None) and (not isinstance(s, int)):
raise TypeError("Parameter `resize` must be a positive integer.")
if (s is not None) and (s <= 0):
raise ValueError("Parameter `resize` must be a positive integer.")
self._resize = s
def _augmented_series_length(self, T: int) -> int:
if isinstance(self.size, int):
size = [self.size]
elif isinstance(self.size, tuple):
size = list(range(self.size[0], self.size[1]))
else:
size = self.size
if self.resize is not None:
resize = self.resize
else:
if len(size) > 1:
raise ValueError(
"Parameter `resize` must be specified if parameter `size` "
"is not a single value."
)
else:
resize = size[0]
return resize
def _augment(
self, X: np.ndarray, Y: Optional[np.ndarray]
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""
Overwrite the memory-expensive base method.
"""
N, T, C = X.shape
rand = np.random.RandomState(self.seed)
if self.prob != 1.0:
# it implies N == 1 and self.repeats == 1
if rand.uniform() > self.prob:
if Y is None:
return X.copy(), None
else:
return X.copy(), Y.copy()
if isinstance(self.size, int):
size = [self.size]
elif isinstance(self.size, tuple):
size = list(range(self.size[0], self.size[1]))
else:
size = self.size
if self.resize is not None:
resize = self.resize
else:
if len(size) > 1:
raise ValueError(
"Parameter `resize` must be specified if parameter `size` "
"is not a single value."
)
else:
resize = size[0]
X_aug = np.zeros((N * self.repeats, resize, C))
if Y is None:
Y_aug = None
else:
L = Y.shape[2]
Y_aug = np.zeros((N * self.repeats, resize, L))
crop_size = rand.choice(size, size=N * self.repeats)
resizer = Resize(resize)
for s in np.unique(crop_size):
n = (crop_size == s).sum()
crop_start = rand.choice(T - s + 1, size=n)
X_aug[crop_size == s, :, :] = resizer.augment(
X[
np.repeat(
np.repeat(np.arange(N), self.repeats)[crop_size == s],
s,
)
.reshape(n, s)
.astype(int),
(
crop_start.reshape(n, 1) + np.arange(s).reshape(1, s)
).astype(int),
:,
].reshape((n, s, C))
)
if (Y is not None) and (Y_aug is not None):
Y_aug[crop_size == s, :, :] = resizer.augment(
Y[
np.repeat(
np.repeat(np.arange(N), self.repeats)[
crop_size == s
],
s,
)
.reshape(n, s)
.astype(int),
(
crop_start.reshape(n, 1)
+ np.arange(s).reshape(1, s)
).astype(int),
:,
].reshape((n, s, L))
)
return X_aug, Y_aug
def _augment_core(
self, X: np.ndarray, Y: Optional[np.ndarray]
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"Method _augment is overwritten, therefore this method is not needed."
pass
```
#### File: tsaug/_augmenter/quantize.py
```python
from typing import List, Optional, Tuple, Union
import numpy as np
from .base import _Augmenter, _default_seed
class Quantize(_Augmenter):
"""
Quantize time series to a level set.
Values in a time series are rounded to the nearest level in the level set.
Parameters
----------
n_levels : int, tuple, or list, optional
The number levels in a level set.
- If int, all series (all channels if `per_channel` is True) are
quantized to a level set of this size.
- If list, a series (a channel if `per_channel` is True) is quantized
to a level set whose size is sampled from this list randomly.
- If 2-tuple, a series (a channel if `per_channel` is True) is quantized
to a level set whose size is sampled from this interval randomly.
Default: 10.
how : str, optional
The method that a level set is defined.
- If 'uniform', a level set is defined by uniformly discretizing the
range of this channel in this series.
- If 'quantile', a level set is defined by the quantiles of values in
this channel in this series.
- If 'kmeans', a level set is defined by k-means clustering of values
in this channel in this series. Note that this method could be slow.
Default: 'uniform'.
per_channel : bool, optional
Whether to sample a level set size for each channel in a time series or
to use the same size for all channels in a time series. Only used if
the level set size is not deterministic. Default: False.
repeats : int, optional
The number of times a series is augmented. If greater than one, a series
will be augmented so many times independently. This parameter can also
be set by operator `*`. Default: 1.
prob : float, optional
The probability of a series is augmented. It must be in (0.0, 1.0]. This
parameter can also be set by operator `@`. Default: 1.0.
seed : int, optional
The random seed. Default: None.
"""
def __init__(
self,
n_levels: Union[int, Tuple[int, int], List[int]] = 10,
how: str = "uniform",
per_channel: bool = False,
repeats: int = 1,
prob: float = 1.0,
seed: Optional[int] = _default_seed,
):
self.n_levels = n_levels
self.how = how
self.per_channel = per_channel
super().__init__(repeats=repeats, prob=prob, seed=seed)
@classmethod
def _get_param_name(cls) -> Tuple[str, ...]:
return ("n_levels", "how", "per_channel")
@property
def n_levels(self) -> Union[int, Tuple[int, int], List[int]]:
return self._n_levels
@n_levels.setter
def n_levels(self, n: Union[int, Tuple[int, int], List[int]]) -> None:
N_LEVELS_ERROR_MSG = (
"Parameter `n_levels` must be a positive integer, "
"a 2-tuple of positive integers representing an interval, "
"or a list of positive integers."
)
if not isinstance(n, int):
if isinstance(n, list):
if len(n) == 0:
raise ValueError(N_LEVELS_ERROR_MSG)
if not all([isinstance(nn, int) for nn in n]):
raise TypeError(N_LEVELS_ERROR_MSG)
if not all([nn > 0 for nn in n]):
raise ValueError(N_LEVELS_ERROR_MSG)
elif isinstance(n, tuple):
if len(n) != 2:
raise ValueError(N_LEVELS_ERROR_MSG)
if (not isinstance(n[0], int)) or (not isinstance(n[1], int)):
raise TypeError(N_LEVELS_ERROR_MSG)
if n[0] >= n[1]:
raise ValueError(N_LEVELS_ERROR_MSG)
if (n[0] <= 0) or (n[1] <= 0):
raise ValueError(N_LEVELS_ERROR_MSG)
else:
raise TypeError(N_LEVELS_ERROR_MSG)
elif n <= 0:
raise ValueError(N_LEVELS_ERROR_MSG)
self._n_levels = n
@property
def how(self) -> str:
return self._how
@how.setter
def how(self, h: str) -> None:
HOW_ERROR_MSG = "Parameter `how` must be one of 'uniform', 'quantile', and 'kmeans'."
if not isinstance(h, str):
raise TypeError(HOW_ERROR_MSG)
if h not in ["uniform", "quantile", "kmeans"]:
raise ValueError(HOW_ERROR_MSG)
self._how = h
@property
def per_channel(self) -> bool:
return self._per_channel
@per_channel.setter
def per_channel(self, p: bool) -> None:
if not isinstance(p, bool):
raise TypeError("Paremeter `per_channel` must be boolean.")
self._per_channel = p
def _augment_core(
self, X: np.ndarray, Y: Optional[np.ndarray]
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
rand = np.random.RandomState(self.seed)
N, T, C = X.shape
if isinstance(self.n_levels, int):
n_levels = (np.ones((N, 1, C)) * self.n_levels).astype(int)
elif isinstance(self.n_levels, list):
if self.per_channel:
n_levels = rand.choice(self.n_levels, size=(N, 1, C)).astype(
int
)
else:
n_levels = rand.choice(self.n_levels, size=(N, 1, 1)).astype(
int
)
n_levels = np.repeat(n_levels, C, axis=2)
else:
if self.per_channel:
n_levels = rand.choice(
range(self.n_levels[0], self.n_levels[1]), size=(N, 1, C)
).astype(int)
else:
n_levels = rand.choice(
range(self.n_levels[0], self.n_levels[1]), size=(N, 1, 1)
).astype(int)
n_levels = np.repeat(n_levels, C, axis=2)
if self.how == "uniform":
series_min = X.min(axis=1, keepdims=True)
series_max = X.max(axis=1, keepdims=True)
series_range = series_max - series_min
series_range[series_range == 0] = 1
X_aug = (X - series_min) / series_range
X_aug = X_aug * n_levels
X_aug = X_aug.round()
X_aug = X_aug.clip(0, n_levels - 1)
X_aug = X_aug + 0.5
X_aug = X_aug / n_levels
X_aug = X_aug * series_range + series_min
elif self.how == "quantile":
n_levels = n_levels.flatten()
X_aug = X.copy()
X_aug = X_aug.swapaxes(1, 2).reshape((N * C, T))
for i in range(len(X_aug)):
bins = np.percentile(
X_aug[i, :], np.arange(n_levels[i] + 1) / n_levels[i] / 100
)
bins_center = np.percentile(
X_aug[i, :],
np.arange(0.5, n_levels[i]) / n_levels[i] / 100,
)
X_aug[i, :] = bins_center[
np.digitize(X_aug[i, :], bins).clip(0, n_levels[i] - 1),
]
X_aug = X_aug.reshape(N, C, T).swapaxes(1, 2)
else:
try:
from sklearn.cluster import KMeans
except ImportError:
raise ImportError(
"To use kmeans quantization, sklearn>=0.22 must be installed."
)
n_levels = n_levels.flatten()
X_aug = X.copy()
X_aug = X.swapaxes(1, 2).reshape((N * C, T))
model = KMeans(n_clusters=2, n_jobs=-1, random_state=self.seed)
for i in range(len(X_aug)):
model.n_clusters = n_levels[i]
ind = model.fit_predict(X_aug[i].reshape(-1, 1))
X_aug[i, :] = model.cluster_centers_[ind, :].flatten()
X_aug = X_aug.reshape(N, C, T).swapaxes(1, 2)
if Y is not None:
Y_aug = Y.copy()
else:
Y_aug = None
return X_aug, Y_aug
```
#### File: tsaug/_augmenter/reverse.py
```python
from typing import Optional, Tuple
import numpy as np
from .base import _Augmenter, _default_seed
class Reverse(_Augmenter):
"""
Reverse the time line of series.
Parameters
----------
repeats : int, optional
The number of times a series is augmented. If greater than one, a series
will be augmented so many times independently. This parameter can also
be set by operator `*`. Default: 1.
prob : float, optional
The probability of a series is augmented. It must be in (0.0, 1.0]. This
parameter can also be set by operator `@`. Default: 1.0.
seed : int, optional
The random seed. Default: None.
"""
def __init__(
self,
repeats: int = 1,
prob: float = 1.0,
seed: Optional[int] = _default_seed,
):
super().__init__(repeats=repeats, prob=prob, seed=seed)
@classmethod
def _get_param_name(cls) -> Tuple[str, ...]:
return tuple()
def _augment_core(
self, X: np.ndarray, Y: Optional[np.ndarray]
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
X_aug = X[:, ::-1, :].copy() # type: np.ndarray
if Y is None:
Y_aug = None # type: Optional[np.ndarray]
else:
Y_aug = Y[:, ::-1, :].copy()
return X_aug, Y_aug
```
#### File: tsaug/tests/test_pipe.py
```python
import numpy as np
import pytest
from tsaug import (
AddNoise,
Convolve,
Crop,
Drift,
Dropout,
Pool,
Quantize,
Resize,
Reverse,
TimeWarp,
)
rand = np.random.RandomState(123)
N = 10
T = 100
C = 3
L = 2
X1 = np.random.uniform(size=T)
X2 = np.random.uniform(size=(N, T))
X3 = np.random.uniform(size=(N, T, C))
Y1 = np.random.choice(2, size=T).astype(int)
Y2 = np.random.choice(2, size=(N, T)).astype(int)
Y3 = np.random.choice(2, size=(N, T, L)).astype(int)
def test_pipe():
augmenter = (
AddNoise() * 2 @ 0.5
+ (Crop(size=int(T / 2)) * 2 + Drift())
+ (Dropout() @ 0.5 + Pool())
+ Quantize() * 2
)
augmenter.augment(X1)
augmenter.augment(X1, Y1)
augmenter.augment(X2)
augmenter.augment(X2, Y2)
augmenter.augment(X2, Y3)
augmenter.augment(X3)
augmenter.augment(X3, Y2)
augmenter.augment(X3, Y3)
augmenter.summary()
assert len(augmenter) == 6
exchange = Resize(size=int(T / 2)) * 2 @ 0.5
augmenter[3] = exchange
assert augmenter[3] is exchange
exchange.resize = int(T / 3)
exchange.repeats = 3
exchange.prob = 0.4
assert isinstance(augmenter[3], Resize)
assert augmenter[3].resize == int(T / 3)
assert augmenter[3].repeats == 3
assert augmenter[3].prob == 0.4
```
|
{
"source": "jgruber99/Sampford_Sampling",
"score": 3
}
|
#### File: fairseq/modules/bert_layer_norm.py
```python
import torch
import torch.nn as nn
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""
Construct a layernorm module in the TF style used with BERT
(epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
```
#### File: Sampford_Sampling/fairseq/search.py
```python
import math
import torch
import torch.nn.functional as F
from fairseq.gumbel import gumbel_like, gumbel_with_maximum
import numpy as np
from fairseq.cps_dp import sample
from fairseq.cps_dp import sampford_sample #changed by the A-Team
class Search(object):
def __init__(self, tgt_dict):
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.scores_buf = None
self.log_ps_buf = None
self.log_ps_t_buf = None
self.indices_buf = None
self.beams_buf = None
def _init_buffers(self, t):
if self.scores_buf is None:
self.scores_buf = t.new()
self.log_ps_buf = t.new()
self.log_ps_t_buf = t.new()
self.indices_buf = torch.LongTensor().to(device=t.device)
self.beams_buf = torch.LongTensor().to(device=t.device)
def step(self, step, lprobs, scores, beam_size):
"""Take a single search step.
Args:
step: the current search step, starting at 0
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
scores: (bsz x input_beam_size x step)
the historical model scores of each hypothesis up to this point
Return: A tuple of (scores, indices, beams) where:
scores: (bsz x output_beam_size)
the scores of the chosen elements; output_beam_size can be
larger than input_beam_size, e.g., we may return
2*input_beam_size to account for EOS
indices: (bsz x output_beam_size)
the indices of the chosen elements
beams: (bsz x output_beam_size)
the hypothesis ids of the chosen elements, in the range [0, input_beam_size)
"""
raise NotImplementedError
def set_src_lengths(self, src_lengths):
self.src_lengths = src_lengths
# class CPS(Search):
# def __init__(self, tgt_dict, sampling_topk=-1, sampling_temperature=1.0, nucleus_p=1.):
# super().__init__(tgt_dict)
# self.sampling_topk = sampling_topk
# assert self.sampling_topk == -1
# self.sampling_temperature = sampling_temperature
# self.log_threshold = np.log(nucleus_p) # threshold for nucleus sampling
# def _init_buffers(self, t):
# super()._init_buffers(t)
# self.remaining_subsetsum_product_probs = None
# self.subset_sum_product_probs = None
# self.dp = None
# self.p = None
# self.samples_idx = torch.LongTensor().to(device=t.device)
# self.log_inclusion_probs = torch.FloatTensor().to(device=t.device)
# def log_nucleus_multinomial_sample(self, logp, k):
# """
# This function filters elements that have more than a threshold probability mass
# @param logp: log-probability distribution (unnormalized is ok) over discrete random variable
# @param k: final sample size
# @return: nucleus samples indices and their log probabilities
# """
# assert self.log_threshold <= 0
# def log_softmax(x):
# c = x.max()
# logsumexp = np.log(np.exp(x - c).sum())
# return x - c - logsumexp
# logp = log_softmax(logp)
# sorted_inds = np.argsort(-logp)
# sorted_logits = logp[sorted_inds]
# cumulative_lprobs = np.logaddexp.accumulate(sorted_logits)
# sorted_indices_to_pick = cumulative_lprobs <= self.log_threshold
# inds = sorted_inds[sorted_indices_to_pick]
# if len(inds) < k: # we don't want to end up having less than k samples
# inds = sorted_inds[0:k]
# return inds, logp[inds]
# def cps_sample(self, logp, k, bsz, maxlen):
# """
# This function iterates through the batches and use cps sampling function in Cython to generate samples
# @param logp: log probabilities of candidates
# @param k: sample size
# @param bsz: batch size
# @param maxlen: maximum number of hypothesis which should be vocab size in NMT models
# @return: inclusion probabilities for all the candidates and samples indices
# """
# n = logp.size()[1]
# torch.zeros([bsz, k], dtype=torch.int64, out=self.samples_idx)
# torch.zeros([bsz, n], out=self.log_inclusion_probs)
# logp_np = logp.detach().cpu().numpy()
# logp_np = logp_np.astype(np.float64)
# samples = []
# incs = []
# for j in range(bsz):
# selected_inds, logits = self.log_nucleus_multinomial_sample(logp_np[j, :], k)
# sample_idx, sample_inc = sample(logits, selected_inds, k) #Returns k words sampled at a given timestep
# extended_inc = np.zeros(maxlen)
# extended_inc[sample_idx] = sample_inc
# samples.append(sample_idx)
# incs.append(extended_inc)
# sample_idx_np = np.asarray(samples)
# log_inclusion_probs_np = np.asarray(incs)
# self.samples_idx = torch.from_numpy(sample_idx_np).to(device=logp.device)
# self.log_inclusion_probs = torch.from_numpy(log_inclusion_probs_np).to(device=logp.device)
# _, indices = torch.sort(torch.gather(logp, -1, self.samples_idx), descending=True)
# return self.log_inclusion_probs, torch.gather(self.samples_idx, -1, indices)
# def step(self, step, lprobs, log_ps_t_copy, log_inc_probs, log_ps_t):
# bsz, beam_size, vocab_size = lprobs.size()
# self._init_buffers(lprobs)
# lprobs_t = lprobs.clone()
# if self.sampling_temperature != 1.0:
# lprobs_t = F.log_softmax(lprobs / self.sampling_temperature, -1)
# if step == 0:
# # at the first step all hypotheses are equally likely, so use
# # only the first beam
# lprobs_t = lprobs_t[:, ::beam_size, :].contiguous()
# else:
# # Gather cumulative
# lprobs_t.add_(log_ps_t[:, :, step - 1].unsqueeze(-1))
# maxlen = lprobs_t.view(bsz, -1).size(1)
# cand_scores, self.indices_buf = self.cps_sample(lprobs_t.view(bsz, -1),
# min(beam_size * 2, maxlen - 1),
# bsz, maxlen)
# cand_scores = cand_scores.to(dtype=torch.float32) # scores are inclusion probs
# if step != 0:
# cand_scores = cand_scores.view(bsz, beam_size, -1)
# cand_scores.add_(log_inc_probs[:, :, step - 1].unsqueeze(-1))
# torch.gather(
# lprobs_t.view(bsz, -1), -1, self.indices_buf, out=self.log_ps_t_buf
# )
# torch.gather(
# cand_scores.view(bsz, -1), -1, self.indices_buf, out=self.scores_buf
# )
# torch.floor_divide(self.indices_buf, vocab_size, out=self.beams_buf)
# self.indices_buf.fmod_(vocab_size)
# return self.log_ps_t_buf, self.scores_buf, self.log_ps_t_buf, self.indices_buf, self.beams_buf
class CPS(Search):
def __init__(self, tgt_dict, sampling_topk=-1, sampling_temperature=1.0, nucleus_p=1.):
super().__init__(tgt_dict)
self.sampling_topk = sampling_topk
assert self.sampling_topk == -1
self.sampling_temperature = sampling_temperature
self.log_threshold = np.log(nucleus_p) # threshold for nucleus sampling
def _init_buffers(self, t):
super()._init_buffers(t)
self.remaining_subsetsum_product_probs = None
self.subset_sum_product_probs = None
self.dp = None
self.p = None
self.samples_idx = torch.LongTensor().to(device=t.device)
self.log_inclusion_probs = torch.FloatTensor().to(device=t.device)
def log_nucleus_multinomial_sample(self, logp, k):
"""
This function filters elements that have more than a threshold probability mass
@param logp: log-probability distribution (unnormalized is ok) over discrete random variable
@param k: final sample size
@return: nucleus samples indices and their log probabilities
"""
assert self.log_threshold <= 0
def log_softmax(x):
c = x.max()
logsumexp = np.log(np.exp(x - c).sum())
return x - c - logsumexp
logp = log_softmax(logp)
sorted_inds = np.argsort(-logp)
sorted_logits = logp[sorted_inds]
cumulative_lprobs = np.logaddexp.accumulate(sorted_logits)
sorted_indices_to_pick = cumulative_lprobs <= self.log_threshold
inds = sorted_inds[sorted_indices_to_pick]
if len(inds) < k: # we don't want to end up having less than k samples
inds = sorted_inds[0:k]
return inds, logp[inds]
def cps_sample(self, logp, k, bsz, maxlen):
"""
This function iterates through the batches and use cps sampling function in Cython to generate samples
@param logp: log probabilities of candidates
@param k: sample size
@param bsz: batch size
@param maxlen: maximum number of hypothesis which should be vocab size in NMT models
@return: inclusion probabilities for all the candidates and samples indices
"""
n = logp.size()[1]
torch.zeros([bsz, k], dtype=torch.int64, out=self.samples_idx)
torch.zeros([bsz, n], out=self.log_inclusion_probs)
logp_np = logp.detach().cpu().numpy()
logp_np = logp_np.astype(np.float64)
samples = []
incs = []
for j in range(bsz):
selected_inds, logits = self.log_nucleus_multinomial_sample(logp_np[j, :], k)
sample_idx, sample_inc = sampford_sample(logits, selected_inds, k) #Returns k words sampled at a given timestep
extended_inc = np.zeros(maxlen)
extended_inc[sample_idx] = sample_inc
samples.append(sample_idx)
incs.append(extended_inc)
sample_idx_np = np.asarray(samples)
log_inclusion_probs_np = np.asarray(incs)
self.samples_idx = torch.from_numpy(sample_idx_np).to(device=logp.device)
self.log_inclusion_probs = torch.from_numpy(log_inclusion_probs_np).to(device=logp.device)
_, indices = torch.sort(torch.gather(logp, -1, self.samples_idx), descending=True)
return self.log_inclusion_probs, torch.gather(self.samples_idx, -1, indices)
def step(self, step, lprobs, log_ps_t_copy, log_inc_probs, log_ps_t):
bsz, beam_size, vocab_size = lprobs.size()
self._init_buffers(lprobs)
lprobs_t = lprobs.clone()
if self.sampling_temperature != 1.0:
lprobs_t = F.log_softmax(lprobs / self.sampling_temperature, -1)
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs_t = lprobs_t[:, ::beam_size, :].contiguous()
else:
# Gather cumulative
lprobs_t.add_(log_ps_t[:, :, step - 1].unsqueeze(-1))
maxlen = lprobs_t.view(bsz, -1).size(1)
cand_scores, self.indices_buf = self.cps_sample(lprobs_t.view(bsz, -1),
min(beam_size * 2, maxlen - 1),
bsz, maxlen)
cand_scores = cand_scores.to(dtype=torch.float32) # scores are inclusion probs
if step != 0:
cand_scores = cand_scores.view(bsz, beam_size, -1)
cand_scores.add_(log_inc_probs[:, :, step - 1].unsqueeze(-1))
torch.gather(
lprobs_t.view(bsz, -1), -1, self.indices_buf, out=self.log_ps_t_buf
)
torch.gather(
cand_scores.view(bsz, -1), -1, self.indices_buf, out=self.scores_buf
)
torch.floor_divide(self.indices_buf, vocab_size, out=self.beams_buf)
self.indices_buf.fmod_(vocab_size)
return self.log_ps_t_buf, self.scores_buf, self.log_ps_t_buf, self.indices_buf, self.beams_buf
class BeamSearch(Search):
def __init__(self, tgt_dict, naive_stochastic=False, stochastic=False, sampling_topk=-1, sampling_temperature=1.0):
super().__init__(tgt_dict)
self.stochastic = stochastic
self.naive_stochastic = naive_stochastic
self.sampling_topk = sampling_topk
assert self.sampling_topk == -1, "Sampling top-k for beam search not yet supported"
self.sampling_temperature = sampling_temperature
def step(self, step, lprobs, scores, log_ps, log_ps_t):
super()._init_buffers(lprobs)
bsz, beam_size, vocab_size = lprobs.size()
lprobs_t = lprobs.clone()
if self.sampling_temperature != 1.0:
lprobs_t = F.log_softmax(lprobs / self.sampling_temperature, -1)
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs_t = lprobs_t[:, ::beam_size, :].contiguous()
lprobs = lprobs[:, ::beam_size, :].contiguous()
if self.stochastic or self.naive_stochastic:
cand_scores = gumbel_like(lprobs_t) + lprobs_t
else:
cand_scores = lprobs_t
else:
# make probs contain cumulative scores for each hypothesis
lprobs_t.add_(log_ps_t[:, :, step - 1].unsqueeze(-1))
lprobs.add_(log_ps[:, :, step - 1].unsqueeze(-1))
if self.stochastic:
assert self.sampling_topk == -1
cand_scores, _ = gumbel_with_maximum(lprobs_t, scores[:, :, step - 1], -1)
else:
cand_scores = lprobs_t
torch.topk(
cand_scores.view(bsz, -1),
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
cand_scores.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
out=(self.scores_buf, self.indices_buf),
)
# Gather cumulative
torch.gather(
lprobs.view(bsz, -1), -1, self.indices_buf, out=self.log_ps_buf
)
if self.stochastic or self.naive_stochastic:
torch.gather(
lprobs_t.view(bsz, -1), -1, self.indices_buf, out=self.log_ps_t_buf
)
else:
self.log_ps_t_buf = self.scores_buf
torch.floor_divide(self.indices_buf, vocab_size, out=self.beams_buf)
self.indices_buf.fmod_(vocab_size)
return self.scores_buf, self.log_ps_buf, self.log_ps_t_buf, self.indices_buf, self.beams_buf
class LengthConstrainedBeamSearch(Search):
def __init__(self, tgt_dict, min_len_a, min_len_b, max_len_a, max_len_b):
super().__init__(tgt_dict)
self.min_len_a = min_len_a
self.min_len_b = min_len_b
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.beam = BeamSearch(tgt_dict)
def step(self, step, lprobs, scores):
min_lens = self.min_len_a * self.src_lengths + self.min_len_b
max_lens = self.max_len_a * self.src_lengths + self.max_len_b
lprobs[step < min_lens, :, self.eos] = -math.inf
lprobs[step == max_lens, :, self.eos] = 0
lprobs[step > max_lens, :, self.eos] = -math.inf
return self.beam.step(step, lprobs, scores)
class DiverseBeamSearch(Search):
"""Diverse Beam Search.
See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence
Models" for details.
We only implement the Hamming Diversity penalty here, which performed best
in the original paper.
"""
def __init__(self, tgt_dict, num_groups, diversity_strength):
super().__init__(tgt_dict)
self.num_groups = num_groups
self.diversity_strength = -diversity_strength
self.diversity_buf = None
self.beam = BeamSearch(tgt_dict)
def step(self, step, lprobs, scores, log_ps, log_ps_t):
super()._init_buffers(lprobs)
bsz, beam_size, vocab_size = lprobs.size()
if beam_size % self.num_groups != 0:
raise ValueError(
'DiverseBeamSearch requires --beam to be divisible by the number of groups'
)
# initialize diversity penalty
if self.diversity_buf is None:
self.diversity_buf = lprobs.new()
torch.zeros(lprobs[:, 0, :].size(), out=self.diversity_buf)
scores_G, indices_G, beams_G = [], [], []
for g in range(self.num_groups):
lprobs_g = lprobs[:, g::self.num_groups, :]
scores_g = scores[:, g::self.num_groups, :] if step > 0 else None
# apply diversity penalty
if g > 0:
lprobs_g = torch.add(lprobs_g, self.diversity_strength, self.diversity_buf.unsqueeze(1))
else:
lprobs_g = lprobs_g.contiguous()
scores_buf, _, _, indices_buf, beams_buf = self.beam.step(step, lprobs_g, scores_g, scores_g, scores_g)
beams_buf.mul_(self.num_groups).add_(g)
scores_G.append(scores_buf.clone())
indices_G.append(indices_buf.clone())
beams_G.append(beams_buf.clone())
# update diversity penalty
self.diversity_buf.scatter_add_(
1,
indices_buf,
self.diversity_buf.new_ones(indices_buf.size())
)
# interleave results from different groups
self.scores_buf = torch.stack(scores_G, dim=2, out=self.scores_buf).view(bsz, -1)
self.indices_buf = torch.stack(indices_G, dim=2, out=self.indices_buf).view(bsz, -1)
self.beams_buf = torch.stack(beams_G, dim=2, out=self.beams_buf).view(bsz, -1)
return self.scores_buf, self.scores_buf, self.scores_buf, self.indices_buf, self.beams_buf
class Sampling(Search):
def __init__(self, tgt_dict, sampling_topk=-1, sampling_temperature=1.):
super().__init__(tgt_dict)
self.sampling_topk = sampling_topk
self.sampling_temperature = sampling_temperature
def step(self, step, lprobs, scores, log_ps, log_ps_t):
super()._init_buffers(lprobs)
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
# we exclude the first two vocab items, one of which is pad
assert self.pad <= 1, 'sampling assumes the first two symbols can be ignored'
lprobs_nopad = lprobs[:, :, 2:]
# only sample from top-k candidates
if self.sampling_topk > 0:
lprobs_nopad, topk_indices = lprobs_nopad.topk(self.sampling_topk)
# sampling temperature
if self.sampling_temperature != 1.:
lprobs_nopad_t = F.log_softmax(lprobs_nopad / self.sampling_temperature, -1)
else:
lprobs_nopad_t = lprobs_nopad
# sample
probs_nopad_t = lprobs_nopad_t.exp()
if step == 0:
self.indices_buf = torch.multinomial(
probs_nopad_t.view(bsz, -1),
beam_size,
replacement=True,
out=self.indices_buf,
).view(bsz, beam_size)
else:
self.indices_buf = torch.multinomial(
probs_nopad_t.view(bsz * beam_size, -1),
1,
replacement=True,
out=self.indices_buf,
).view(bsz, beam_size)
if step == 0:
# expand to beam size
lprobs_nopad = lprobs_nopad.expand(bsz, beam_size, -1)
lprobs_nopad_t = lprobs_nopad_t.expand(bsz, beam_size, -1)
# gather probs
torch.gather(
lprobs_nopad,
dim=2,
index=self.indices_buf.unsqueeze(-1),
out=self.log_ps_buf,
)
torch.gather(
lprobs_nopad_t,
dim=2,
index=self.indices_buf.unsqueeze(-1),
out=self.log_ps_t_buf,
)
self.log_ps_buf = self.log_ps_buf.view(bsz, -1)
self.log_ps_t_buf = self.log_ps_t_buf.view(bsz, -1)
# remap indices if using top-k sampling
if self.sampling_topk > 0:
self.indices_buf = torch.gather(
topk_indices.expand(bsz, beam_size, -1),
dim=2,
index=self.indices_buf.unsqueeze(-1),
).squeeze(2)
# remap indices since we excluded the first two vocab items
self.indices_buf.add_(2)
if step == 0:
self.beams_buf = self.indices_buf.new_zeros(bsz, beam_size)
else:
self.beams_buf = torch.arange(0, beam_size, out=self.beams_buf).repeat(bsz, 1)
# make log_ps cumulative
self.log_ps_buf.add_(
torch.gather(
log_ps[:, :, step - 1],
dim=1,
index=self.beams_buf,
)
)
# make log_ps_t cumulative
self.log_ps_t_buf.add_(
torch.gather(
log_ps_t[:, :, step - 1],
dim=1,
index=self.beams_buf,
)
)
# Scores buf is not used for sampling
self.scores_buf = self.log_ps_buf.clone()
return self.scores_buf, self.log_ps_buf, self.log_ps_t_buf, self.indices_buf, self.beams_buf
```
|
{
"source": "jgruberf5/beacon-airflow",
"score": 2
}
|
#### File: beacon_export_plugin/operators/beacon_metric_exporter_operator.py
```python
import time
import datetime
import json
import os
import re
import subprocess
from airflow.models.baseoperator import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.exceptions import AirflowException
from beacon_export_plugin.hooks.beacon_hook import BeaconHook
from requests.exceptions import HTTPError
from line_protocol_parser import parse_line
# schema test7
SCHEMA_SKEL = [
{'name': 'ts', 'type': 'TIMESTAMP', 'mode': 'REQUIRED'},
{'name': 'account_id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'source_id', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'evt', 'type': 'RECORD', 'mode': 'NULLABLE',
'fields': [
{'name': 'version', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'sourceName', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'sourceDescription', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'fields', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': []},
{'name': 'tags', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': []},
{'name': 'timestamp', 'type': 'TIMESTAMP', 'mode': 'NULLABLE'}
]
}
]
WRITE_FILE_DELAY_SECS = 1
class F5BeaconMetricQueryExporterOperator(BaseOperator):
@apply_defaults
def __init__(self, # pylint: disable=too-many-arguments
beacon_conn_id: str = 'f5_beacon_default',
destination_dir: str = '/home/airflow/gcs/data',
start_timestamp: int = 0,
stop_timestamp: int = 0,
metric_file: str = None,
schema_file: str = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.beacon_conn_id = beacon_conn_id
self.destination_dir = destination_dir
self.start_timestamp = start_timestamp
self.stop_timestamp = stop_timestamp
self.beacon_hook = BeaconHook(beacon_conn_id)
self.schema = SCHEMA_SKEL
self.mfn = metric_file
self.sfn = schema_file
self.tags = {}
self.fields = {}
def execute(self, context):
if not self.mfn:
self.mfn = self.get_metrics_fn(context['dag_run'].run_id)
if not self.sfn:
self.sfn = self.get_schema_fn(context['dag_run'].run_id)
conn = self.beacon_hook.get_conn()
account_id = 'primary'
if 'account_id' in conn.extra_dejson:
account_id = conn.extra_dejson['account_id']
self.log.info('Executing extract metrics from f5 Beacon account %s between %s:%s into: %s',
account_id, self.start_timestamp, self.stop_timestamp, self.destination_dir)
known_measurements = self.beacon_hook.get_measurements()
self.log.info('found %s measurement for f5 Beacon account %s',
known_measurements, account_id)
if os.path.exists(self.mfn):
os.unlink(self.mfn)
for measurement in known_measurements:
self.get_measurement_records(
measurement, context['dag_run'].run_id)
self.write_schema(context['dag_run'].run_id)
def get_metrics_fn(self, run_id):
dest_dir = os.path.join(self.destination_dir, run_id)
os.makedirs(dest_dir, exist_ok=True)
return os.path.join(dest_dir, 'line_metrics.json')
def get_schema_fn(self, run_id):
dest_dir = os.path.join(self.destination_dir, run_id)
os.makedirs(dest_dir, exist_ok=True)
return os.path.join(dest_dir, 'line_schema.json')
def get_field(self, field_name):
field_name = self.format_col_name(field_name)
key = field_name.lower()
if key in self.fields:
return self.fields[key]
return None
def get_tag(self, tag_name):
tag_name = self.format_col_name(tag_name)
key = tag_name.lower()
if key in self.tags:
return self.tags[key]
return None
def get_measurement_records(self, account_id, measurement, run_id):
batch_size = 9000
have_records = True
start_timestamp = int(self.start_timestamp)
stop_timestamp = int(self.stop_timestamp) + 1
offset_seconds = 0
self.load_schema()
while have_records:
if offset_seconds > 0:
start_timestamp = offset_seconds
query = "SELECT * FROM \"%s\" WHERE time > %s000000000 and time < %s000000000 ORDER BY time LIMIT %s" % (
measurement, start_timestamp, stop_timestamp, batch_size)
self.log.info(
'submitting query: %s to f5 Beacon metric API.', query)
try:
line_data = self.beacon_hook.query_metric(
query, output_line=True)
records = line_data.split("\n")
number_of_records = len(records)
if number_of_records:
self.log.info('writing %d records from f5 Beacon metrics API to %s',
number_of_records, self.destination_dir)
offset_seconds = self.output_to_file(
records, run_id, account_id)
if number_of_records < batch_size:
have_records = False
except HTTPError as he:
self.log.error(he)
if batch_size > 1:
batch_size = int(batch_size*0.9)
else:
raise AirflowException(
'could not export f5 Beacon metric for measurement %s after reducing record limit to 1', measurement)
self.save_schema()
def load_schema(self):
if os.path.exists(self.sfn):
with open(self.sfn, 'r') as sf:
try:
self.schema = json.load(sf)
if not self.schema:
self.schema = SCHEMA_SKEL
except json.JSONDecodeError:
self.schema = SCHEMA_SKEL
self.populate_cols_from_schema()
def populate_cols_from_schema(self):
# reduce tags and fields through dict
for col in self.schema:
if col['name'] == 'evt':
for event_cols in col['fields']:
if event_cols['name'] == 'fields':
for field_cols in event_cols['fields']:
key = field_cols['name'].lower()
self.fields[key] = field_cols['name']
if event_cols['name'] == 'tags':
for tag_cols in event_cols['fields']:
key = tag_cols['name'].lower()
self.tags[key] = tag_cols['name']
def save_schema(self):
with open(self.sfn, 'w+') as sf:
json.dump(self.schema, sf, indent=4,
separators=(',', ': '))
time.sleep(WRITE_FILE_DELAY_SECS)
def format_col_name(self, existing_tag):
# converting to Camel case or all lower case
components = existing_tag.split('_')
converted = components[0] + ''.join(x.title() for x in components[1:])
components = converted.split('-')
converted = components[0] + ''.join(x.title() for x in components[1:])
if converted.isupper():
converted = converted.lower()
if converted[0].isalpha() and converted[0].isupper():
converted = converted[0].lower() + converted[1:]
if not (converted[0].isalpha() or converted[0] == '_'):
converted = "beacon%s" % converted
return converted
def add_field_to_schema(self, field_name, field_type):
field_name = self.format_col_name(field_name)
for col in self.schema:
if col['name'] == 'evt':
for c in col['fields']:
if c['name'] == 'fields':
c['fields'].append(
{
'name': field_name,
'type': field_type,
'mode': 'NULLABLE'
}
)
key = field_name.lower()
self.fields[key] = field_name
return field_name
def add_tag_to_schema(self, tag_name):
tag_name = self.format_col_name(tag_name)
for col in self.schema:
if col['name'] == 'evt':
for c in col['fields']:
if c['name'] == 'tags':
c['fields'].append(
{
'name': tag_name,
'type': 'STRING',
'mode': 'NULLABLE'
}
)
key = tag_name.lower()
self.tags[key] = tag_name
return tag_name
def output_to_file(self, lines, run_id, account_id):
df = open(self.mfn, 'a+')
largest_timestamp = 0
for line in lines:
if line:
data = parse_line(line)
# Transform
ms_timestamp = float(int(data['time']) / 1000000000)
fields_dict = {}
for fn in data['fields']:
val = data['fields'][fn]
tfn = self.get_field(fn)
if not tfn:
if type(val) == bool:
tfn = self.add_field_to_schema(fn, 'BOOL')
elif type(val) == int:
tfn = self.add_field_to_schema(fn, 'INT64')
elif type(val) == float:
tfn = self.add_field_to_schema(fn, 'FLOAT64')
else:
tfn = self.add_field_to_schema(fn, 'STRING')
fields_dict[tfn] = val
tags_dict = {}
for tn in data['tags']:
ttn = self.get_tag(tn)
if not ttn:
ttn = self.add_tag_to_schema(tn)
tags_dict[ttn] = str(data['tags'][tn])
transformed_data = {
'ts': ms_timestamp,
'account_id': "urn:f5_cs::acccount:%s" % account_id,
'source_id': None,
'evt': {
'version': '1.0',
'sourceName': data['measurement'],
'sourceDescription': "data imported from beacon for account %s" % account_id,
'fields': fields_dict,
'tags': tags_dict,
'timestamp': ms_timestamp
}
}
df.write("%s\n" % json.dumps(transformed_data))
if ms_timestamp > largest_timestamp:
largest_timestamp = int(ms_timestamp)
time.sleep(WRITE_FILE_DELAY_SECS)
df.close()
return largest_timestamp
class F5BeaconMetricQueryDailyExporterOperator(F5BeaconMetricQueryExporterOperator):
@apply_defaults
def __init__(self, # pylint: disable=too-many-arguments
beacon_conn_id: str = 'f5_beacon_default',
destination_dir="/home/airflow/gcs/data",
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.beacon_conn_id = beacon_conn_id
self.destination_dir = destination_dir
self.beacon_hook = BeaconHook(self.beacon_conn_id)
self.date = None
def execute(self, context):
if not self.mfn:
self.mfn = self.get_metrics_fn(context['dag_run'].run_id)
if not self.sfn:
self.sfn = self.get_schema_fn(context['dag_run'].run_id)
self.date = str(context.get("execution_date").date())
conn = self.beacon_hook.get_conn()
account_id = 'primary'
if 'account_id' in conn.extra_dejson:
account_id = conn.extra_dejson['account_id']
self.log.info('Executing extract metrics from f5 Beacon account %s on %s into: %s',
account_id, self.date, self.destination_dir)
known_measurements = self.beacon_hook.get_measurements()
self.log.info('found %s measurement for f5 Beacon account %s',
known_measurements, account_id)
self.start_timestamp = start_timestamp = int(time.mktime(
datetime.datetime.strptime(self.date, '%Y-%m-%d').timetuple()))
self.stop_timestamp = start_timestamp + 86400
if os.path.exists(self.mfn):
os.unlink(self.mfn)
for measurement in known_measurements:
self.get_measurement_records(
account_id, measurement, context['dag_run'].run_id)
class F5BeaconMetricQueryHourlyExporterOperator(F5BeaconMetricQueryExporterOperator):
@apply_defaults
def __init__(self, # pylint: disable=too-many-arguments
beacon_conn_id: str = 'f5_beacon_default',
destination_dir="/home/airflow/gcs/data",
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.beacon_conn_id = beacon_conn_id
self.destination_dir = destination_dir
self.beacon_hook = BeaconHook(self.beacon_conn_id)
def execute(self, context):
if not self.mfn:
self.mfn = self.get_metrics_fn(context['dag_run'].run_id)
if not self.sfn:
self.sfn = self.get_schema_fn(context['dag_run'].run_id)
conn = self.beacon_hook.get_conn()
account_id = 'primary'
if 'account_id' in conn.extra_dejson:
account_id = conn.extra_dejson['account_id']
self.stop_timestamp = int(time.mktime(
context.get("execution_date").timetuple()))
self.start_timestamp = self.stop_timestamp - 3600
self.log.info('Executing extract metrics from f5 Beacon account %s for %s - %s into: %s',
account_id, self.start_timestamp, self.stop_timestamp, self.destination_dir)
known_measurements = self.beacon_hook.get_measurements()
self.log.info('found %s measurement for f5 Beacon account %s',
known_measurements, account_id)
if os.path.exists(self.mfn):
os.unlink(self.mfn)
for measurement in known_measurements:
self.get_measurement_records(
account_id, measurement, context['dag_run'].run_id)
```
|
{
"source": "jgruberf5/bigiq-cloudin",
"score": 2
}
|
#### File: site-packages/cloudinit/bigiq_onboard_utils.py
```python
import json
import logging
import os
import importlib
import subprocess
import urlparse
import socket
import time
import requests
import yaml
import shutil
import StringIO
from cloudinit import config_modules
from errno import ENOENT, ENOEXEC
LOG_FILE = '/var/log/f5-cloudinit.log'
OUT_DIR = '/var/lib/cloud'
MGMT_DHCP_TIMEOUT = 600
MCPD_TIMEOUT = 600
PROCESS_KILL_DELAY = 2
BIGSTART_DELAY = 5
ICONTROL_TIMEOUT = 600
URL_TIMEOUT = 600
BOOT_CLOUDINIT_DELAY = 5
MGMT_DHCP_LEASE_FILE = '/var/lib/dhclient/dhclient.leases'
ANSIBLE_VAR_FILE = '/var/lib/cloud/ansible/onboard/onboard_vars.yml'
DHCP_LEASE_DIR = OUT_DIR + '/dhclient'
SSH_KEY_FILE = '/root/.ssh/authorized_keys'
DEFAULT_DNS_SERVERS = ['8.8.8.8', '8.8.4.4']
DEFAULT_NTP_SERVERS = ['0.pool.ntp.org', '1.pool.ntp.org']
DEFAULT_TIMEZONE = 'UTC'
DEFAULT_DISCOVERY_INTERFACE = '1.1'
REMOVE_DHCP_LEASE_FILES = False
HOSTNAME_SET = False
NETWORKS_CONFIGURED = False
FIRST_BOOT_FILE = '/var/run/cloudinit-complete'
# because TMOS keeps moving CLI commands
SYSCMDS = {
'ansible-playbook': '/usr/local/bin/ansible-playbook',
'awk': '/bin/awk',
'bigstart': '/usr/bin/bigstart',
'blkid': '/sbin/blkid',
'cat': '/bin/cat',
'chpasswd': '/usr/sbin/chpasswd',
'curl': '/usr/bin/curl',
'cut': '/bin/cut',
'dhclient': '/sbin/dhclient',
'dmidecode': '/usr/sbin/dmidecode',
'echo': '/bin/echo',
'egrep': '/bin/egrep',
'grep': '/bin/grep',
'lsblk': '/bin/lsblk',
'mount': '/bin/mount',
'nohup': '/usr/bin/nohup',
'ip': '/sbin/ip',
'pkill': '/usr/bin/pkill',
'ps': '/bin/ps',
'route': '/sbin/route',
'sleep': '/bin/sleep',
'tmm': '/usr/bin/tmm',
'tmsh': '/usr/bin/tmsh',
'tr': '/usr/bin/tr',
'umount': '/bin/umount',
'usermod': '/usr/sbin/usermod',
'wc': '/usr/bin/wc'
}
MGMT_DEV_NAME = 'eth0'
def touch_file(filepath, times=None):
"""Touch file if needed"""
if not os.path.isfile(filepath):
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
with open(filepath, 'a'):
os.utime(filepath, times)
touch_file(LOG_FILE)
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
FORMATTER = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
LOGFILE = logging.FileHandler(LOG_FILE)
LOGFILE.setLevel(logging.DEBUG)
LOGFILE.setFormatter(FORMATTER)
LOG.addHandler(LOGFILE)
def inject_public_ssh_keys(keys):
"""Injects discovered and metadata supplied SSH keys into the root account"""
while not os.path.exists(SSH_KEY_FILE):
LOG.error(
"root SSH key authorized_keys file missing.. retrying")
time.sleep(BOOT_CLOUDINIT_DELAY)
with open(SSH_KEY_FILE, 'a+') as keyfile:
for k in keys:
LOG.debug("writing SSH authorized key to %s", SSH_KEY_FILE)
keyfile.write(k + '\n')
def is_v6(address):
"""Determines if the supplied address is a valid IPv6 address"""
try:
socket.inet_pton(socket.AF_INET6, address)
return True
except socket.error:
return False
def is_v4(address):
"""Determines if the supplied address is a valid IPv4 address"""
try:
socket.inet_pton(socket.AF_INET, address)
return True
except socket.error:
return False
def is_mgmt_ip():
"""Test if the mgmt interface has an IP address assigned"""
fnull = open(os.devnull, 'w')
mgmt_ip = subprocess.Popen(
"%s addr show %s | %s '^\\s*inet '| %s -v 169.254 | %s -l" %
(SYSCMDS['ip'], MGMT_DEV_NAME, SYSCMDS['grep'], SYSCMDS['grep'],
SYSCMDS['wc']),
stdout=subprocess.PIPE,
stderr=fnull,
shell=True).communicate()[0].replace('\n', '')
if int(mgmt_ip) == 1:
return True
mgmt_ip = subprocess.Popen(
"%s addr show %s | %s '^\\s*inet6 '| %s -v fe80 | %s -l" %
(SYSCMDS['ip'], MGMT_DEV_NAME, SYSCMDS['grep'], SYSCMDS['grep'],
SYSCMDS['wc']),
stdout=subprocess.PIPE,
shell=True).communicate()[0].replace('\n', '')
if int(mgmt_ip) == 1:
return True
return False
def get_mgmt_cidr():
"""Return the managment interface IP address in CIDR notation from tmsh"""
fnull = open(os.devnull, 'w')
mgmt_cidr = subprocess.Popen(
"%s list sys management-ip one-line | %s -d' ' -f3" %
(SYSCMDS['tmsh'], SYSCMDS['cut']),
stdout=subprocess.PIPE,
stderr=fnull,
shell=True).communicate()[0].replace('\n', '')
return mgmt_cidr
def is_mgmt_default_gateway():
"""Test if the mgmt subnet has a default gateway"""
fnull = open(os.devnull, 'w')
mgmt_gw = subprocess.Popen("%s -n | %s '^0.0.0.0'| %s %s | %s -l" %
(SYSCMDS['route'], SYSCMDS['grep'],
SYSCMDS['grep'], MGMT_DEV_NAME, SYSCMDS['wc']),
stdout=subprocess.PIPE,
stderr=fnull,
shell=True).communicate()[0].replace('\n', '')
if int(mgmt_gw) == 1:
return True
return False
def get_tmos_version():
"""Get the TMOS version string"""
fnull = open(os.devnull, 'w')
version = subprocess.Popen(
"%s /VERSION | %s -i sequence | %s -d':' -f2 | %s '[A-Z]' '[a-z]' | %s -d '[:space:]'"
% (SYSCMDS['cat'], SYSCMDS['grep'], SYSCMDS['cut'], SYSCMDS['tr'],
SYSCMDS['tr']),
stdout=subprocess.PIPE,
stderr=fnull,
shell=True).communicate()[0].replace('\n', '')
return version
def get_tmos_product():
"""Get the TMOS product string"""
fnull = open(os.devnull, 'w')
product = subprocess.Popen(
"%s show sys version | %s Product | %s '{print $NF}'" %
(SYSCMDS['tmsh'], SYSCMDS['grep'], SYSCMDS['awk']),
stdout=subprocess.PIPE,
stderr=fnull,
shell=True).communicate()[0].replace('\n', '')
return product
def get_dmi_uuid():
"""Get the system UUID from DMI"""
fnull = open(os.devnull, 'w')
uuid = subprocess.Popen(
"%s | %s -i UUID | %s -d':' -f2 | %s '[A-Z]' '[a-z]' | %s -d '[:space:]'"
% (SYSCMDS['dmidecode'], SYSCMDS['grep'], SYSCMDS['cut'],
SYSCMDS['tr'], SYSCMDS['tr']),
stdout=subprocess.PIPE,
stderr=fnull,
shell=True).communicate()[0].replace('\n', '')
return uuid
def get_hostname():
"""Get the system hostname"""
return socket.gethostname()
def set_password(username=None, password=None):
"""Set a local user password"""
LOG.info('setting password for user: %s' % username)
fnull = open(os.devnull, 'w')
if username:
if not password:
disable_user(username)
else:
stdin = subprocess.PIPE
stdout = subprocess.PIPE
stderr = subprocess.PIPE
cmd = subprocess.Popen(SYSCMDS['chpasswd'],
stdin=stdin,
stdout=stdout,
stderr=stderr)
cmd.communicate("%s:%s" % (username, password))
def set_password_tmsh(username=None, password=None):
"""Set a local user password via tmsh"""
LOG.info('setting password via tmsh for user: %s' % username)
wait_for_mcpd()
fnull = open(os.devnull, 'w')
cmd = "%s modify auth user %s password '%s'" % (SYSCMDS['tmsh'],
username,
password)
setpassword = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=fnull,
shell=True,
).communicate()[0].replace('\n', '')
def disable_user(username=None):
"""Disable local user"""
LOG.info('disabling user: %s' % username)
fnull = open(os.devnull, 'w')
if username:
subprocess.call([SYSCMDS['usermod'], '-L', username],
stdout=fnull,
stderr=fnull)
def enable_user(username=None):
"""Disable local user"""
LOG.info('enabling user: %s' % username)
fnull = open(os.devnull, 'w')
if username:
subprocess.call([SYSCMDS['usermod'], '-U', username],
stdout=fnull,
stderr=fnull)
def run_cmd(cmd):
"""Run a CLI command and return its output"""
fnull = open(os.devnull, 'w')
cmd_stdout = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=fnull,
shell=True).communicate()[0].replace(
'\n', '')
LOG.debug('running command %s produced %s', cmd, cmd_stdout)
return cmd_stdout
def wait_for_mgmt_dhcp(timeout=None):
"""Blocks until the mgmt DHCP lease file is present"""
if not timeout:
timeout = MGMT_DHCP_TIMEOUT
end_time = time.time() + timeout
while (end_time - time.time()) > 0:
if os.path.isfile(MGMT_DHCP_LEASE_FILE
) and os.path.getsize(MGMT_DHCP_LEASE_FILE) > 0:
return True
LOG.info('waiting for mgmt DHCP request to complete')
time.sleep(1)
return False
def is_mcpd():
"""Determines if the TMOS master control process is running"""
fnull = open(os.devnull, 'w')
running = subprocess.Popen(
"%s -a show sys mcp-state field-fmt | %s running | %s -l" %
(SYSCMDS['tmsh'], SYSCMDS['grep'], SYSCMDS['wc']),
stdout=subprocess.PIPE,
stderr=fnull,
shell=True).communicate()[0].replace('\n', '')
if int(running) == 1:
return True
return False
def wait_for_mcpd(timeout=None):
"""Blocks until the TMOS master control process is running"""
if not timeout:
timeout = MCPD_TIMEOUT
end_time = time.time() + timeout
while (end_time - time.time()) > 0:
if is_mcpd():
return True
time.sleep(1)
LOG.error('mcpd did not reach tunning state in %s seconds', timeout)
return False
def is_tmm():
"""Determines if the TMOS dataplane microkernels are running"""
tmm_running = int(
subprocess.Popen("%s -ef|%s %s|%s -v grep|%s -l| %s -d ';\n'" %
(SYSCMDS['ps'], SYSCMDS['grep'], SYSCMDS['tmm'],
SYSCMDS['grep'], SYSCMDS['wc'], SYSCMDS['tr']),
stdout=subprocess.PIPE,
shell=True).communicate()[0])
if tmm_running == 1:
return True
return False
def force_mgmt_mtu(mtu):
"""Force mgmt interface MTU"""
LOG.info('Forcing MGMT MTU to %s' % mtu)
fnull = open(os.devnull, 'w')
subprocess.call(
[SYSCMDS['ip'], 'link', 'set', MGMT_DEV_NAME, 'mtu',
"%s" % mtu],
stdout=fnull,
stderr=fnull)
time.sleep(BIGSTART_DELAY)
def force_tmm_down():
"""Forces all TMOS dataplane microkernels down"""
fnull = open(os.devnull, 'w')
subprocess.call([SYSCMDS['pkill'], 'tmm'], stdout=fnull, stderr=fnull)
subprocess.call([SYSCMDS['bigstart'], 'shutdown', 'tmm'],
stdout=fnull,
stderr=fnull)
subprocess.call([SYSCMDS['pkill'], 'tmm'], stdout=fnull, stderr=fnull)
time.sleep(PROCESS_KILL_DELAY)
def stop_tmm():
"""Stops TMOS dataplane microkernels"""
fnull = open(os.devnull, 'w')
subprocess.call([SYSCMDS['bigstart'], 'shutdown', 'tmm'],
stdout=fnull,
stderr=fnull)
time.sleep(BIGSTART_DELAY)
def start_tmm():
"""Starts TMOS dataplane microkernels"""
fnull = open(os.devnull, 'w')
subprocess.call([SYSCMDS['bigstart'], 'startup', 'tmm'],
stdout=fnull,
stderr=fnull)
time.sleep(BIGSTART_DELAY)
def bigstart_restart(service_name=None):
"""Restart a service with bigstart"""
fnull = open(os.devnull, 'w')
if not service_name:
service_name = ''
subprocess.call([SYSCMDS['bigstart'], 'resstart', service_name],
stdout=fnull,
stderr=fnull)
time.sleep(BIGSTART_DELAY)
def is_icontrol():
"""Determines if the TMOS control plane iControl REST service is running"""
try:
return requests.get('http://localhost:8100/shared/echo',
auth=('admin', '')).json()['stage'] == 'STARTED'
except Exception:
return False
def wait_for_icontrol(timeout=None):
"""Blocks until the TMOS control plane iControl REST service is running"""
if not timeout:
timeout = ICONTROL_TIMEOUT
end_time = time.time() + timeout
while (end_time - time.time()) > 0:
if is_icontrol():
return True
time.sleep(1)
LOG.error('iControl REST services could not be reached after %s seconds',
timeout)
return False
def is_rest_worker(workerpath):
"""Determines if the TMOS control plane iControl REST worker path exists"""
try:
return requests.get('http://localhost:8100' + workerpath,
auth=('admin', '')).status_code != 404
except Exception:
return False
def wait_for_rest_worker(workerpath, timeout=None):
"""Blocks until the TMOS control plane iControl REST worker path exists"""
task_url = 'http://localhost:8100' + workerpath
if not timeout:
timeout = ICONTROL_TIMEOUT
end_time = time.time() + timeout
while (end_time - time.time()) > 0:
try:
response = requests.get(task_url, auth=('admin', ''))
if response.status_code < 400:
return True
except Exception:
return False
time.sleep(1)
LOG.error('iControl REST worker %s could not be reached after %s seconds',
workerpath, timeout)
return False
def is_url(monitor_url, status_code=None):
"""Determins if the URL is reachable and optionally returns a status code"""
try:
response = requests.get(monitor_url)
LOG.debug('URL %s status %s', monitor_url, response.status_code)
if status_code:
if response.status_code == status_code:
return True
return True
except Exception as ex:
LOG.error('URL %s exception %s', monitor_url, ex)
return False
return False
def wait_for_url(monitor_url, status_code=None, timeout=None):
"""Blocks until the URL is availale"""
if not timeout:
timeout = URL_TIMEOUT
end_time = time.time() + timeout
while (end_time - time.time()) > 0:
if is_url(monitor_url, status_code):
return True
time.sleep(1)
LOG.error('URL %s could not be reached after %s seconds', monitor_url,
timeout)
return False
def dhcp_lease_dir_exists():
"""Ensures DHCP lease file copy directory exists"""
if not os.path.isdir(DHCP_LEASE_DIR):
os.makedirs(DHCP_LEASE_DIR)
def make_dhcp4_request(interface, timeout=120):
"""Makes DHCPv4 queries out a linux link device"""
dhcp_lease_dir_exists()
tmp_conf_file = DHCP_LEASE_DIR + '/dhclient.conf'
lease_file = DHCP_LEASE_DIR + '/' + interface + '.lease'
tmp_lease_file = '/tmp/' + interface + '.lease'
fnull = open(os.devnull, 'w')
dhclient_cf = open(tmp_conf_file, 'w')
dhclient_cf.write(
"\nrequest subnet-mask, broadcast-address, time-offset, routers,\n")
dhclient_cf.write(
" domain-name, domain-name-servers, domain-search, host-name,\n"
)
dhclient_cf.write(
" root-path, interface-mtu, classless-static-routes;\n")
dhclient_cf.close()
if os.path.isfile(lease_file):
del_file(lease_file)
subprocess.call([SYSCMDS['pkill'], 'dhclient'], stdout=fnull)
subprocess.call([SYSCMDS['ip'], 'link', 'set', interface, 'up'],
stdout=fnull)
subprocess.call([
SYSCMDS['dhclient'], '-lf', tmp_lease_file, '-cf', tmp_conf_file, '-1',
'-timeout',
str(timeout), '-pf', '/tmp/dhclient.' + interface + '.pid', '-sf',
SYSCMDS['echo'], interface
],
stdout=fnull)
if os.path.getsize(tmp_lease_file) > 0:
copy(tmp_lease_file, lease_file)
subprocess.call([SYSCMDS['pkill'], 'dhclient'], stdout=fnull)
del_file('/tmp/dhclient.' + interface + '.pid')
del_file(tmp_lease_file)
return True
else:
subprocess.call([SYSCMDS['pkill'], 'dhclient'], stdout=fnull)
del_file('/tmp/dhclient.' + interface + '.pid')
del_file(tmp_lease_file)
return False
def process_dhcp4_lease(interface, return_options=None):
"""Parses dhclient v4 lease file format for metadata"""
if not return_options:
return_options = [
'subnet-mask', 'routers', 'domain-name-servers', 'interface-mtu',
'classless-static-routes', 'host-name', 'domain-name'
]
return_data = {}
lease_file = DHCP_LEASE_DIR + '/' + interface + '.lease'
if os.path.isfile(interface):
lease_file = interface
for line in open(lease_file):
if 'fixed-address' not in return_data and "fixed-address" in line:
# format: fixed-address 192.168.127.12;
test_fixed_address = 'fixed-address '
lidx = line.index(test_fixed_address)
return_data['fixed-address'] = \
line[lidx + len(test_fixed_address):].replace(';\n', '')
for option in return_options:
test_option = option + ' '
if (option not in return_data) and (test_option in line):
# format: option routers 1.1.1.1;
lidx = line.index(test_option)
return_data[option] = \
line[lidx + len(test_option):].replace(
';\n', '').replace('"', '').replace("'", '')
return return_data
def process_dhcp4_routes(static_routes):
"""Processes dhclient v4 static routes metadata"""
dhcp_routes = []
if static_routes:
static_route_list = static_routes.split(',')
for static_route in static_route_list:
rap = static_route.split(' ')
route = process_dhcp4_route(rap[0], rap[1])
if route:
dhcp_routes.append(route)
return dhcp_routes
def process_dhcp4_route(static_route, gateway):
"""Parse single dhclient v4 route entry into a dictionary"""
if not static_route == '0':
route = {}
route['network'] = static_route[static_route.find('.') + 1:]
if len(route['network']) == 3:
route['network'] = route['network'] + '.0.0.0'
if route['network'].find('.') > 0:
dots = route['network'].count('.')
if dots == 1:
route['network'] = route['network'] + '.0.0'
if dots == 2:
route['network'] = route['network'] + '.0'
route['netmask'] = static_route[0:static_route.find('.')]
route['gateway'] = gateway
route['route_name'] = "route_%s_%s" % (route['network'],
route['netmask'])
route['route_name'] = route['route_name'].replace('.', '_').replace(
':', '_').replace('/', '_')
# we don't forward to local or link local in a gateway
if route['network'].startswith('127'):
return None
elif route['network'].startswith('169.254'):
return None
elif route['network'].lower().startswith('fe80'):
return None
return route
return None
def ipv4_cidr_from_netmask(netmask):
"""Convert IPv4 netmask to CIDR bits"""
return sum([bin(int(x)).count('1') for x in netmask.split('.')])
def wait_for_dns_resolution(fqdn, timeout=30):
"""Wait for DNS to resolve a required FQDN"""
start = time.time()
end = start + timeout
while time.time() < end:
try:
socket.gethostbyname(fqdn)
return True
except socket.error:
LOG.error('FQDN %s could not be resolved', fqdn)
time.sleep(1)
LOG.error('FQDN %s could not be resolved in %s seconds', fqdn, timeout)
return False
def phone_home(phone_home_url=None,
status='ERROR',
verify_tls=True,
metadata={}):
"""Issues a Phone Home web POST request with collected onboard data"""
if phone_home_url:
try:
parsed_url = urlparse.urlparse(phone_home_url)
headers = {'Content-Type': 'application/json'}
post_data = {}
post_data['id'] = get_dmi_uuid()
post_data['version'] = get_tmos_version()
post_data['product'] = get_tmos_product()
post_data['hostname'] = get_hostname()
post_data['management'] = get_mgmt_cidr()
post_data['status'] = status
post_data['metadata'] = metadata
post_json = json.dumps(post_data)
LOG.debug('POST %s - %s', phone_home_url, post_json)
if verify_tls:
resp = requests.post(phone_home_url,
headers=headers,
data=post_json)
resp.raise_for_status()
else:
resp = requests.post(phone_home_url,
headers=headers,
data=post_json,
verify=False)
resp.raise_for_statuss()
return True
except Exception as err:
LOG.error("could not phone home: %s - %s", phone_home_url, err)
return False
def clean():
"""Remove any onboarding artifacts"""
if REMOVE_DHCP_LEASE_FILES:
lease_files = os.listdir(DHCP_LEASE_DIR)
for lease_file in lease_files:
del_file("%s/%s" % (DHCP_LEASE_DIR, lease_file))
# remove inject line in /config/startup
injected_already = subprocess.Popen(
"cat /config/startup | grep bigiq_onboard_utils" +
" | wc -l",
stdout=subprocess.PIPE,
shell=True).communicate()[0].replace('\n', '')
if injected_already == '1':
subprocess.call([
'/bin/sed', '-i',
"/bigiq_onboard_utils/d", '/config/startup'
])
def get_datasource_file():
return "/opt/cloud/instances/%s/datasource" % get_dmi_uuid().lower()
def get_vendor_data_file():
return "/opt/cloud/instances/%s/vendor-data.txt" % get_dmi_uuid().lower()
def get_meta_data_file():
return '/opt/cloud/instances/%s/meta_data' % get_dmi_uuid().lower()
def get_user_data_file():
return "/opt/cloud/instances/%s/user-data.txt" % get_dmi_uuid().lower()
def get_configdrive_dev():
"""Get Device Name for a ConfigDrive"""
fnull = open(os.devnull, 'w')
dev = subprocess.Popen("%s | %s config-2 | %s -d':' -f1" %
(SYSCMDS['blkid'], SYSCMDS['grep'], SYSCMDS['cut']),
stdout=subprocess.PIPE,
stderr=fnull,
shell=True).communicate()[0].replace('\n', '')
if not os.path.exists(dev):
dev = subprocess.Popen(
"%s --fs | %s config-2 | %s -d' ' -f1" %
(SYSCMDS['lsblk'], SYSCMDS['grep'], SYSCMDS['cut']),
stdout=subprocess.PIPE,
stderr=fnull,
shell=True).communicate()[0].replace('\n', '')
if len(dev) > 2:
dev = "/dev/%s" % dev
return dev
def mount_configdrive(dev, mountpath='/tmp/configdrive'):
"""Mount ConfigDrive"""
if not os.path.exists(mountpath):
os.makedirs(mountpath)
fnull = open(os.devnull, 'w')
mountout = subprocess.Popen("%s %s %s" %
(SYSCMDS['mount'], dev, mountpath),
stdout=subprocess.PIPE,
stderr=fnull,
shell=True).communicate()[0].replace('\n', '')
return mountout
def ummount_configdrive(mountpath):
"""Unmount ConfigDrive"""
time.sleep(3)
fnull = open(os.devnull, 'w')
umountout = subprocess.Popen("%s %s" % (SYSCMDS['umount'], mountpath),
stdout=subprocess.PIPE,
stderr=fnull,
shell=True).communicate()[0].replace(
'\n', '')
return umountout
def onboard_configdrive():
"""Dicover ConfigDrive and Mount"""
dev = get_configdrive_dev()
if dev:
LOG.info('attempting to mount %s as a ConfigDrive data source' % dev)
systemid = get_dmi_uuid().lower()
userdata = '---'
metadata = '{}'
networkdata = '{}'
mountpath = '/tmp/configdrive'
mount_configdrive(dev, mountpath)
userdatapath = "%s/openstack/latest/user_data" % mountpath
metadatapath = "%s/openstack/latest/meta_data.json" % mountpath
networkdatapath = "%s/openstack/latest/network_data.json" % mountpath
if os.path.exists(userdatapath):
f = open(userdatapath, "r")
userdata = f.read()
f.close()
if os.path.exists(metadatapath):
f = open(metadatapath, "r")
metadata = f.read()
f.close()
if os.path.exists(networkdatapath):
f = open(networkdatapath, "r")
networkdata = f.read()
f.close()
create_cloudinit_resources(
dev, systemid, userdata, metadata, networkdata, None,
"DataSourceConfigDriveNet: DataSourceConfigDriveNet [net,ver=2][source=%s]"
% dev)
ummount_configdrive(mountpath)
os.rmdir(mountpath)
return True
else:
return False
def get_nocloud_dev():
"""Get Device Name for a NoCloudDrive"""
fnull = open(os.devnull, 'w')
dev = subprocess.Popen("%s | %s cidata | %s -d':' -f1" %
(SYSCMDS['blkid'], SYSCMDS['grep'], SYSCMDS['cut']),
stdout=subprocess.PIPE,
stderr=fnull,
shell=True).communicate()[0].replace('\n', '')
if not os.path.exists(dev):
dev = subprocess.Popen(
"%s --fs | %s cidata | %s -d' ' -f1" %
(SYSCMDS['lsblk'], SYSCMDS['grep'], SYSCMDS['cut']),
stdout=subprocess.PIPE,
stderr=fnull,
shell=True).communicate()[0].replace('\n', '')
if len(dev) > 2:
dev = "/dev/%s" % dev
return dev
def mount_nocloud(dev, mountpath='/tmp/nocloud'):
"""Mount NoCloudDrive"""
if not os.path.exists(mountpath):
os.makedirs(mountpath)
fnull = open(os.devnull, 'w')
mountout = subprocess.Popen("%s %s %s" %
(SYSCMDS['mount'], dev, mountpath),
stdout=subprocess.PIPE,
stderr=fnull,
shell=True).communicate()[0].replace('\n', '')
return mountout
def ummount_nocloud(mountpath):
"""Unmount NoCloudDrive"""
time.sleep(3)
fnull = open(os.devnull, 'w')
umountout = subprocess.Popen("%s %s" % (SYSCMDS['umount'], mountpath),
stdout=subprocess.PIPE,
stderr=fnull,
shell=True).communicate()[0].replace(
'\n', '')
return umountout
def onboard_nocloud():
"""Dicover NoCloud and Mount"""
dev = get_nocloud_dev()
if dev:
LOG.info('attempting to mount %s as a NoCloud data source' % dev)
systemid = get_dmi_uuid().lower()
userdata = '---'
metadata = ''
vendordata = ''
mountpath = '/tmp/nocloud'
mount_configdrive(dev, mountpath)
userdatapath = "%s/user-data" % mountpath
metadatapath = "%s/meta-data" % mountpath
vendordatapath = "%s/vendor-data" % mountpath
if os.path.exists(userdatapath):
f = open(userdatapath, "r")
userdata = f.read()
f.close()
if os.path.exists(metadatapath):
f = open(metadatapath, "r")
metadata = f.read()
if metadata.find('instance-id') > 0:
for line in metadata.splitlines():
if line.startswith('instance-id'):
systemid = line.split(': ')[1]
f.close()
if os.path.exists(vendordatapath):
f = open(vendordatapath, "r")
vendordata = f.read()
f.close()
create_cloudinit_resources(
dev, systemid, userdata, metadata, None, vendordata,
"DataSourceNoCloud: DataSourceNoCloud [seed=%s][dsmode=net]" % dev)
ummount_nocloud(mountpath)
os.rmdir(mountpath)
return True
else:
return False
def create_cloudinit_resources(dev, systemid, userdata, metadata, networkdata,
vendordata, ds_string):
"""Copy Source Resources to CloudInit Locations"""
if not os.path.exists('/opt/cloud/data'):
os.makedirs('/opt/cloud/data')
if not os.path.exists('/opt/cloud/handlers'):
os.makedirs('/opt/cloud/handlers')
if not os.path.exists('/opt/cloud/instances'):
os.makedirs("/opt/cloud/instances/%s" % systemid)
if not os.path.exists("/opt/cloud/instance"):
os.symlink("/opt/cloud/instances/%s" % systemid, '/opt/cloud/instance')
if not os.path.exists('/opt/cloud/scripts'):
os.makedirs('/opt/cloud/scripts/per-boot')
os.makedirs('/opt/cloud/scripts/per-instance')
os.makedirs('/opt/cloud/scripts/per-once')
os.makedirs('/opt/cloud/seed')
os.makedirs('/opt/cloud/sem')
with open('/opt/cloud/data/previous-datasource', 'w') as ds:
ds.write(ds_string)
with open('/opt/cloud/instances/%s/datasource' % systemid, 'w') as ds:
ds.write(ds_string)
with open('/opt/cloud/data/instance-id', 'w') as instid:
LOG.info('writing sytemid as %s' % systemid)
instid.write(systemid)
with open('/opt/cloud/data/previous-instance-id', 'w') as instid:
instid.write(systemid)
with open("/opt/cloud/instances/%s/user-data.txt" % systemid, 'w') as ds:
LOG.info('writing user-data: %d bytes' % len(userdata))
ds.write(userdata)
if metadata:
try:
json.loads(metadata)
with open("/opt/cloud/instances/%s/meta_data.json" % systemid,
'w') as ds:
LOG.info('writing meta_data.json: %d bytes' % len(metadata))
ds.write(metadata)
except:
with open("/opt/cloud/instances/%s/meta_data" % systemid,
'w') as ds:
LOG.info('writing meta_data: %d bytes' % len(metadata))
ds.write(metadata)
if networkdata:
with open("/opt/cloud/instances/%s/network_data.json" % systemid,
'w') as ds:
LOG.info('writing network_data.json: %d bytes' % len(networkdata))
ds.write(networkdata)
if vendordata:
with open("/opt/cloud/instances/%s/vendor-data.txt" % systemid,
'w') as ds:
LOG.info("writing vendor-data.txt: %d bytes" % len(vendordata))
ds.write(vendordata)
def set_hostname(hostname):
global HOSTNAME_SET
if not HOSTNAME_SET:
wait_for_mcpd()
fnull = open(os.devnull, 'w')
cmd = "%s modify sys global-settings hostname %s" % (SYSCMDS['tmsh'],
hostname)
sethostname = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=fnull,
shell=True,
).communicate()[0].replace('\n', '')
cmd = "%s mv cm device $(%s list cm device | %s 'cm device' | %s -d ' ' -f3) %s" % (
SYSCMDS['tmsh'], SYSCMDS['tmsh'], SYSCMDS['grep'], SYSCMDS['cut'],
hostname)
renamedevice = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=fnull,
shell=True,
).communicate()[0].replace('\n', '')
HOSTNAME_SET = True
def del_file(path):
LOG.debug("Attempting to remove %s", path)
try:
os.unlink(path)
except OSError as e:
if e.errno != ENOENT:
raise e
def copy(src, dest):
LOG.debug("Copying %s to %s", src, dest)
shutil.copy(src, dest)
def read_conf(fname):
try:
return load_yaml(load_file(fname), default={})
except IOError as e:
if e.errno == ENOENT:
return {}
else:
raise
def pipe_in_out(in_fh, out_fh, chunk_size=1024, chunk_cb=None):
bytes_piped = 0
while True:
data = in_fh.read(chunk_size)
if len(data) == 0:
break
else:
out_fh.write(data)
bytes_piped += len(data)
if chunk_cb:
chunk_cb(bytes_piped)
out_fh.flush()
return bytes_piped
def decode_binary(blob, encoding='utf-8'):
if isinstance(blob, basestring):
return blob
return blob.decode(encoding)
def load_file(fname, read_cb=None, quiet=False, decode=True):
LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
ofh = StringIO.StringIO()
try:
with open(fname, 'rb') as ifh:
pipe_in_out(ifh, ofh, chunk_cb=read_cb)
except IOError as e:
if not quiet:
raise
if e.errno != ENOENT:
raise
contents = ofh.getvalue()
LOG.debug("Read %s bytes from %s", len(contents), fname)
if decode:
return decode_binary(contents)
else:
return contents
def load_yaml(blob, default=None, allowed=(dict, )):
loaded = default
blob = decode_binary(blob)
try:
LOG.debug(
"Attempting to load yaml from string "
"of length %s with allowed root types %s", len(blob), allowed)
converted = yaml.load(blob, Loader=yaml.FullLoader)
if converted is None:
LOG.debug("loaded blob returned None, returning default.")
converted = default
elif not isinstance(converted, allowed):
raise TypeError("Yaml load only allows %s root types" % allowed)
loaded = converted
except (yaml.YAMLError, TypeError, ValueError) as e:
msg = 'Failed loading yaml blob'
mark = None
if hasattr(e, 'context_mark') and getattr(e, 'context_mark'):
mark = getattr(e, 'context_mark')
elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'):
mark = getattr(e, 'problem_mark')
if mark:
msg += (
'. Invalid format at line {line} column {col}: "{err}"'.format(
line=mark.line + 1, col=mark.column + 1, err=e))
else:
msg += '. {err}'.format(err=e)
LOG.warning(msg)
return loaded
def save_config():
wait_for_mcpd()
fnull = open(os.devnull, 'w')
cmd = "%s save sys config" % SYSCMDS['tmsh']
saveconfig = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=fnull,
shell=True,
).communicate()[0].replace('\n', '')
def process_json_metadata():
metadatapath = "%s.json" % get_meta_data_file()
if os.path.exists(metadatapath):
with open(metadatapath, 'r') as md:
metadata = json.load(md)
if 'hostname' in metadata:
if metadata['hostname'].find('.') < 0:
set_hostname("%s.local" % metadata['hostname'])
else:
set_hostname(metadata['hostname'])
if 'public_keys' in metadata:
keys = []
for keyname in metadata['public_keys'].keys():
keys.append(metadata['public_keys'][keyname])
inject_public_ssh_keys(keys)
def process_text_metadata():
metadatapath = get_meta_data_file()
if os.path.exists(metadatapath):
with open(metadatapath, 'r') as md:
for line in md:
if line.startswith('local-hostname'):
hostname = line.split(': ')[1].strip()
if hostname.find('.') < 0:
set_hostname("%s.local" % hostname)
else:
set_hostname(hostname)
def run_modules():
userdatapath = get_user_data_file()
if os.path.exists(userdatapath):
USERDATA_CONFIG = read_conf(userdatapath)
for cm in config_modules:
LOG.info('importing module: %s' % cm)
m = importlib.import_module('cloudinit.config.%s' % cm)
LOG.info('running %s handler' % m.MODULE_NAME)
m.handle(m.MODULE_NAME, USERDATA_CONFIG, None, logging, [])
def onboard():
if not os.path.exists(FIRST_BOOT_FILE):
if onboard_configdrive():
run_modules()
LOG.info('touching %s flag file' % FIRST_BOOT_FILE)
touch_file(FIRST_BOOT_FILE)
elif onboard_nocloud():
run_modules()
LOG.info('touching %s flag file' % FIRST_BOOT_FILE)
touch_file(FIRST_BOOT_FILE)
else:
LOG.error(
'BIG-IQ cloud-init only accepts ConfigDrive or NoCloud as a data sources'
)
touch_file(FIRST_BOOT_FILE)
else:
LOG.info('cloudinit onboarding did not run because %s exists' %
FIRST_BOOT_FILE)
if __name__ == "__main__":
onboard()
```
|
{
"source": "jgruberf5/ibmcloud_schematics_volterra_adn_tier_mz",
"score": 2
}
|
#### File: modules/volterra/volterra_resource_site_create.py
```python
import os
import sys
import json
import argparse
import urllib.request
from urllib.error import HTTPError
def get_tenant_id(tenant, token):
headers = {
"Authorization": "APIToken %s" % token
}
try:
url = "https://%s.console.ves.volterra.io/api/web/namespaces/system" % tenant
request = urllib.request.Request(
url, headers=headers, method='GET')
response = urllib.request.urlopen(request)
return json.load(response)['system_metadata']['tenant']
except HTTPError as her:
sys.stderr.write(
"Error retrieving tenant ID - %s\n" % her)
sys.exit(1)
def assure_site_token(tenant, token, site_token_name):
site_token_name = site_token_name.encode('utf-8').decode('utf-8')
headers = {
"Authorization": "APIToken %s" % token
}
# Does the site token exist
try:
url = "https://%s.console.ves.volterra.io/api/register/namespaces/system/tokens/%s" % (
tenant, site_token_name)
request = urllib.request.Request(
url, headers=headers, method='GET')
response = urllib.request.urlopen(request)
return json.load(response)['system_metadata']['uid']
except HTTPError as her:
if her.code == 404:
try:
url = "https://%s.console.ves.volterra.io/api/register/namespaces/system/tokens" % tenant
headers['volterra-apigw-tenant'] = tenant
headers['content-type'] = 'application/json'
data = {
"metadata": {
"annotations": {},
"description": "Site Authorization Token for %s" % site_token_name,
"disable": False,
"labels": {},
"name": site_token_name,
"namespace": "system"
},
"spec": {}
}
data = json.dumps(data)
request = urllib.request.Request(
url=url, headers=headers, data=bytes(data.encode('utf-8')), method='POST')
response = urllib.request.urlopen(request)
site_token = json.load(response)
return site_token['system_metadata']['uid']
except HTTPError as err:
sys.stderr.write(
"Error creating site token resources %s: %s\n" % (url, err))
sys.exit(1)
else:
sys.stderr.write(
"Error retrieving site token resources %s: %s\n" % (url, her))
sys.exit(1)
except Exception as er:
sys.stderr.write(
"Error retrieving site token resources %s\n" % er)
sys.exit(1)
def assure_k8s_cluster(tenant, token, site_name, k8sdomain):
headers = {
"Authorization": "APIToken %s" % token
}
# create K8s cluster object
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/k8s_clusters/%s" % (
tenant, site_name)
request = urllib.request.Request(
url, headers=headers, method='GET')
urllib.request.urlopen(request)
except HTTPError as her:
if her.code == 404:
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/k8s_clusters" % tenant
headers['volterra-apigw-tenant'] = tenant
headers['content-type'] = 'application/json'
data = {
"namespace": "system",
"metadata": {
"name": site_name,
"namespace": None,
"labels": {},
"annotations": {},
"description": None,
"disable": None
},
"spec": {
"local_access_config": {
"local_domain": k8sdomain,
"default_port": {}
},
"global_access_enable": {},
"use_default_psp": {},
"use_default_cluster_roles": {},
"use_default_cluster_role_bindings": {},
"no_insecure_registries": {}
}
}
data = json.dumps(data)
request = urllib.request.Request(
url=url, headers=headers, data=bytes(data.encode('utf-8')), method='POST')
urllib.request.urlopen(request)
except HTTPError as err:
sys.stderr.write(
"Error creating k8s_clusters resources %s: %s\n" % (url, err))
sys.exit(1)
def assure_voltstack_site(tenant, token, site_name, tenant_id, cluster_size, latitude, longitude, inside_networks, inside_gateways):
headers = {
"Authorization": "APIToken %s" % token
}
# create Voltstack site
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/sites/%s" % (
tenant, site_name)
request = urllib.request.Request(
url, headers=headers, method='GET')
response = urllib.request.urlopen(request)
return json.load(response)['system_metadata']['uid']
except HTTPError as her:
if her.code == 404:
try:
v_static_routes = []
for gw in inside_gateways:
v_static_routes.append(
{
"ip_prefixes": inside_networks,
"ip_address": gw,
"attrs": ['ROUTE_ATTR_INSTALL_HOST', 'ROUTE_ATTR_INSTALL_FORWARDING']
})
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/voltstack_sites" % tenant
headers['volterra-apigw-tenant'] = tenant
headers['content-type'] = 'application/json'
data = {
"metadata": {
"name": site_name,
"namespace": None,
"labels": {},
"annotations": {},
"description": None,
"disable": None
},
"spec": {
"volterra_certified_hw": "kvm-volstack-combo",
"master_nodes": [],
"worker_nodes": [],
"no_bond_devices": {},
"custom_network_config": {
"slo_config": {
"labels": {},
"static_routes": {
"static_routes": v_static_routes
},
"no_dc_cluster_group": {}
},
"default_interface_config": {},
"no_network_policy": {},
"no_forward_proxy": {},
"global_network_list": {
"global_network_connections": [
{
"slo_to_global_dr": {
"global_vn": {
"tenant": "ves-io",
"namespace": "shared",
"name": "public"
}
}
}
]
},
},
"default_storage_config": {},
"disable_gpu": {},
"address": None,
"coordinates": {
"latitude": latitude,
"longitude": longitude
},
"k8s_cluster": {
"tenant": tenant_id,
"namespace": "system",
"name": site_name
},
"logs_streaming_disabled": {},
"deny_all_usb": {}
},
"resource_version": None
}
masters = []
for indx in range(min(cluster_size, 3)):
masters.append("%s-vce-%d" % (site_name, indx))
data['spec']['master_nodes'] = masters
workers = []
for indx in range(cluster_size - 3):
workers.append("%s-vce-%d" % (site_name, indx + 3))
data['spec']['worker_nodes'] = workers
data = json.dumps(data)
request = urllib.request.Request(
url=url, headers=headers, data=bytes(data.encode('utf-8')), method='POST')
response = urllib.request.urlopen(request)
site = json.load(response)
return site['system_metadata']['uid']
except HTTPError as err:
sys.stderr.write(
"Error creating volstack site resources %s: %s\n" % (url, err))
sys.exit(1)
else:
sys.stderr.write(
"Error retrieving voltstack site resources %s: %s\n" % (url, her))
sys.exit(1)
except Exception as er:
sys.stderr.write(
"Error retrieving voltstack site resources %s\n" % er)
sys.exit(1)
def assure_virtual_network(tenant, token, site_name, fleet_label, tenant_id, inside_networks, inside_gateways):
headers = {
"Authorization": "APIToken %s" % token
}
if inside_networks:
# Does virtual network exist
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/virtual_networks/%s" % (
tenant, fleet_label)
request = urllib.request.Request(
url, headers=headers, method='GET')
response = urllib.request.urlopen(request)
except HTTPError as her:
if her.code == 404:
try:
v_static_routes = []
for gw in inside_gateways:
v_static_routes.append({
"ip_prefixes": inside_networks,
"ip_address": gw,
"attrs": ['ROUTE_ATTR_INSTALL_HOST', 'ROUTE_ATTR_INSTALL_FORWARDING']
})
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/virtual_networks" % tenant
headers['volterra-apigw-tenant'] = tenant
headers['content-type'] = 'application/json'
data = {
"namespace": "system",
"metadata": {
"name": site_name,
"namespace": "system",
"labels": {
"ves.io/fleet": fleet_label
},
"annotations": {},
"description": "Routes inside %s" % site_name,
"disable": False
},
"spec": {
"site_local_inside_network": {},
"static_routes": v_static_routes
}
}
data = json.dumps(data)
request = urllib.request.Request(
url=url, headers=headers, data=bytes(data.encode('utf-8')), method='POST')
urllib.request.urlopen(request)
except HTTPError as her:
sys.stderr.write(
"Error creating virtual_networks resources %s: %s - %s\n" % (url, data, her))
sys.exit(1)
else:
sys.stderr.write(
"Error retrieving virtual_networks resources %s: %s\n" % (url, her))
sys.exit(1)
def assure_network_connector(tenant, token, site_name, fleet_label):
headers = {
"Authorization": "APIToken %s" % token
}
# Does Global Network connector exist?
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/network_connectors/%s" % (
tenant, site_name)
request = urllib.request.Request(
url, headers=headers, method='GET')
urllib.request.urlopen(request)
except HTTPError as her:
if her.code == 404:
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/network_connectors" % tenant
headers['volterra-apigw-tenant'] = tenant
headers['content-type'] = 'application/json'
data = {
"namespace": "system",
"metadata": {
"name": site_name,
"namespace": None,
"labels": {
"ves.io/fleet": fleet_label
},
"annotations": {},
"description": "connecting %s to the global shared network" % site_name,
"disable": False
},
"spec": {
"sli_to_global_dr": {
"global_vn": {
"tenant": "ves-io",
"namespace": "shared",
"name": "public"
}
},
"disable_forward_proxy": {}
}
}
data = json.dumps(data)
request = urllib.request.Request(
url=url, headers=headers, data=bytes(data.encode('utf-8')), method='POST')
urllib.request.urlopen(request)
except HTTPError as her:
sys.stderr.write(
"Error creating network_connectors resources %s: %s - %s\n" % (url, data, her))
sys.exit(1)
else:
sys.stderr.write(
"Error retrieving network_connectors resources %s: %s\n" % (url, her))
sys.exit(1)
def assure_fleet(tenant, token, site_name, fleet_label, tenant_id):
headers = {
"Authorization": "APIToken %s" % token
}
# Does the fleet exist
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/fleets/%s" % (
tenant, site_name)
request = urllib.request.Request(
url, headers=headers, method='GET')
response = urllib.request.urlopen(request)
return json.load(response)['spec']['fleet_label']
except HTTPError as her:
if her.code == 404:
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/fleets" % tenant
headers['volterra-apigw-tenant'] = tenant
headers['content-type'] = 'application/json'
data = {
"namespace": "system",
"metadata": {
"name": site_name,
"namespace": None,
"labels": {},
"annotations": {},
"description": "Fleet provisioning object for %s" % site_name,
"disable": None
},
"spec": {
"fleet_label": fleet_label,
"volterra_software_version": None,
"network_connectors": [
{
"kind": "network_connector",
"uuid": None,
"tenant": tenant_id,
"namespace": "system",
"name": site_name
}
],
"network_firewall": None,
"operating_system_version": None,
"outside_virtual_network": None,
"inside_virtual_network": [
{
"kind": "virtual_network",
"uid": None,
"tenant": tenant_id,
"namespace": "system",
"name": site_name
}
],
"default_config": {},
"no_bond_devices": {},
"no_storage_interfaces": {},
"no_storage_device": {},
"default_storage_class": {},
"no_dc_cluster_group": {},
"disable_gpu": {},
"no_storage_static_routes": {},
"enable_default_fleet_config_download": None,
"logs_streaming_disabled": {},
"deny_all_usb": {}
}
}
data = json.dumps(data)
request = urllib.request.Request(
url=url, headers=headers, data=bytes(data.encode('utf-8')), method='POST')
response = urllib.request.urlopen(request)
return json.load(response)['spec']['fleet_label']
except HTTPError as her:
sys.stderr.write(
"Error creating fleets resources %s: %s - %s\n" % (url, data, her))
sys.exit(1)
else:
sys.stderr.write(
"Error retrieving feet resources %s: %s\n" % (url, her))
sys.exit(1)
except Exception as er:
sys.stderr.write(
"Error retrieving fleet resources %s\n" % er)
sys.exit(1)
def assure_service_discovery(tenant, token, site_name, tenant_id, consul_servers, ca_cert_encoded):
headers = {
"Authorization": "APIToken %s" % token
}
for indx, consul_server in enumerate(consul_servers):
name = "%s-consul-%d" % (site_name, indx)
# Does service discovery exist
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/discoverys/%s" % (
tenant, name)
request = urllib.request.Request(
url, headers=headers, method='GET')
urllib.request.urlopen(request)
except HTTPError as her:
if her.code == 404:
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/discoverys" % tenant
data = {
"namespace": "system",
"metadata": {
"name": name,
"namespace": None,
"labels": {},
"annotations": {},
"description": None,
"disable": False
},
"spec": {
"where": {
"site": {
"ref": [{
"kind": "site",
"uid": None,
"tenant": tenant_id,
"namespace": "system",
"name": site_name
}],
"network_type": "VIRTUAL_NETWORK_SITE_LOCAL_INSIDE"
}
},
"discovery_consul": {
"access_info": {
"connection_info": {
"api_server": consul_server,
"tls_info": {
"server_name": None,
"certificate_url": None,
"certificate": None,
"key_url": None,
"ca_certificah signal has different support and stability in OTLP, described through its own maturity level, which in turn applies to all the OTLP Transports listed below.te_url": None,
"trusted_ca_url": "string:///%s" % ca_cert_encoded
}
},
"scheme": None,
"http_basic_auth_info": None
},
"publish_info": {
"disable": {}
}
}
}
}
data = json.dumps(data)
request = urllib.request.Request(
url=url, headers=headers, data=bytes(data.encode('utf-8')), method='POST')
urllib.request.urlopen(request)
except HTTPError as her:
sys.stderr.write(
"Error creating discoverys resources %s: %s - %s\n" % (url, data, her))
sys.exit(1)
else:
sys.stderr.write(
"Error retrieving discoverys resources %s: %s\n" % (url, her))
sys.exit(1)
def main():
ap = argparse.ArgumentParser(
prog='volterra_resource_site_destroy',
usage='%(prog)s.py [options]',
description='clean up site tokens and fleets on destroy'
)
ap.add_argument(
'--site',
help='Volterra site name',
required=True
)
ap.add_argument(
'--fleet',
help='Volterra fleet label',
required=True
)
ap.add_argument(
'--tenant',
help='Volterra site tenant',
required=True
)
ap.add_argument(
'--token',
help='Volterra API token',
required=True
)
ap.add_argument(
'--voltstack',
help='Create Voltstack site',
required=False,
default='false'
)
ap.add_argument(
'--k8sdomain',
help='Voltstack domain for K8s config',
required=False,
default='local'
)
ap.add_argument(
'--cluster_size',
help='Volterra Cluster size',
required=False,
default='3'
),
ap.add_argument(
'--latitude',
help='Volterra Cluster latitude',
required=False,
default='33.1032'
),
ap.add_argument(
'--longitude',
help='Volterra Cluster longitude',
required=False,
default='-96.6706'
)
ap.add_argument(
'--inside_networks',
help='Network CIDRs reachable inside the Volterra Cluster',
required=False,
default='[]'
)
ap.add_argument(
'--inside_gateways',
help='Network inside the Volterra Cluster next hop IPv4 address',
required=False,
default='[]'
)
ap.add_argument(
'--consul_servers',
help='Consul server IPv4 addresses to add as service discovery',
required=False,
default='[]'
)
ap.add_argument(
'--ca_cert_encoded',
help='Base64 encoded Consul CA certificate to add as service discovery',
required=False,
default='[]'
)
args = ap.parse_args()
tenant_id = get_tenant_id(
args.tenant,
args.token
)
if args.voltstack == "true":
assure_k8s_cluster(
args.tenant,
args.token,
args.site,
args.k8sdomain
)
assure_voltstack_site(
args.tenant,
args.token,
args.site,
tenant_id,
int(args.cluster_size),
args.latitude,
args.longitude,
json.loads(args.inside_networks),
json.loads(args.inside_gateways)
)
else:
assure_virtual_network(
args.tenant,
args.token,
args.site,
args.fleet,
tenant_id,
json.loads(args.inside_networks),
json.loads(args.inside_gateways)
)
assure_network_connector(
args.tenant,
args.token,
args.site,
args.fleet
)
assure_fleet(
args.tenant,
args.token,
args.site,
args.fleet,
tenant_id
)
consul_servers = json.loads(args.consul_servers)
if consul_servers:
assure_service_discovery(
args.tenant,
args.token,
args.site,
tenant_id,
consul_servers,
args.ca_cert_encoded
)
site_token = assure_site_token(
args.tenant,
args.token,
args.site
)
site_token_file = "%s/%s_site_token.txt" % (
os.path.dirname(os.path.realpath(__file__)), args.site)
if os.path.exists(site_token_file):
os.unlink(site_token_file)
with open(site_token_file, "w") as site_token_file:
site_token_file.write(site_token)
sys.stdout.write(
'Created registration token for the site: %s' % site_token)
sys.exit(0)
if __name__ == '__main__':
main()
```
|
{
"source": "jgruberf5/salesinvites",
"score": 2
}
|
#### File: jgruberf5/salesinvites/server.py
```python
import csv
import json
import logging
import os
import requests
import sys
import threading
import time
from flask import Flask, flash, request, redirect, url_for
VERSION = '05042020-3'
CSV_FILE = '/tmp/list.csv'
LOG_FILE = '/tmp/logoutput.txt'
LOG = logging.getLogger('csv_invite_processor')
LOG.setLevel(logging.DEBUG)
FORMATTER = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
LOGSTREAM = logging.StreamHandler(sys.stdout)
LOGSTREAM.setFormatter(FORMATTER)
LOG.addHandler(LOGSTREAM)
USERNAME = None
PASSWORD = <PASSWORD>
API_HOST = "api.cloudservices.f5.com"
API_VERSION = "v1"
ROLE_ID = "r-NAYFdYfiR"
DRY_RUN = False
DELAY = 0
TOKEN = None
def get_service_token():
if USERNAME and PASSWORD:
try:
headers = {
"Content-Type": "application/json"
}
data = {
"username": USERNAME,
"password": PASSWORD
}
url = "https://%s/%s/svc-auth/login" % (API_HOST, API_VERSION)
response = requests.post(
url, headers=headers, data=json.dumps(data))
if response.status_code < 300:
return response.json()['access_token']
else:
LOG.error('error retrieving token: %d: %s',
response.status_code, response.content)
except Exception as ex:
LOG.error('error retrieveing token: %s', ex)
return None
else:
LOG.error('can not issue token without setting Usename and Password')
return None
def get_account_info(token):
if token:
try:
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer %s" % token
}
url = "https://%s/%s/svc-account/user" % (API_HOST, API_VERSION)
response = requests.get(url, headers=headers)
if response.status_code < 300:
data = response.json()
return {
'user_id': data['id'],
'account_id': data['primary_account_id']
}
else:
LOG.error('error retrieving account: %d: %s',
response.status_code, response.content)
except Exception as ex:
LOG.error('error retrieveing account: %s', ex)
else:
LOG.error('can not retrieve user account without access token')
return None
def get_existing_invites(token):
try:
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer %s" % token
}
url = "https://%s/%s/svc-account/invites" % (API_HOST, API_VERSION)
response = requests.get(url, headers=headers)
if response.status_code < 300:
return response.json()
else:
LOG.error('error retrieving existing invitations: %d: %s',
response.status_code, response.content)
except Exception as ex:
LOG.error('error retrieveing account invitations: %s', ex)
return None
def delete_invite(token, invite_id):
try:
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer %s" % token
}
url = "https://%s/%s/svc-account/invites/%s" % (
API_HOST, API_VERSION, invite_id)
response = requests.delete(url, headers=headers)
if response.status_code < 300:
return True
else:
LOG.error('error deleting invitation: %s - %d: %s',
invite_id, response.status_code, response.content)
except Exception as ex:
LOG.error('error deleting invitations: %s - %s', invite_id, ex)
def delete_accepted_invitations(token, account_id):
existing_invitations = get_existing_invites(token)
if existing_invitations:
for invite in existing_invitations['invites']:
if invite['status'] == 'accepted' and invite['inviter_account_id'] == account_id:
if DRY_RUN:
LOG.info(
'dry run - would have deleted accepted invitation for %s', invite['invitee_email'])
else:
LOG.info('deleting accepted invitation for %s',
invite['invitee_email'])
delete_invite(token, invite['invite_id'])
def get_existing_account_members(token, account_id):
try:
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer %s" % token
}
url = "https://%s/%s/svc-account/accounts/%s/members" % (
API_HOST, API_VERSION, account_id)
response = requests.get(url, headers=headers)
if response.status_code < 300:
return response.json()
else:
LOG.error('error retrieving existing account members: %d: %s',
response.status_code, response.content)
except Exception as ex:
LOG.error('error retrieveing existing account members: %s', ex)
return None
def issue_invite(token, account_id, user_id, first_name, last_name, email):
try:
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer %s" % token
}
data = {
"inviter_account_id": account_id,
"inviter_user_id": user_id,
"account_ids": [
account_id
],
"invitees": [
{
"first_name": first_name,
"last_name": last_name,
"email": email
}
],
"role_id": ROLE_ID
}
url = "https://%s/%s/svc-account/invites" % (API_HOST, API_VERSION)
response = requests.post(url, headers=headers, data=json.dumps(data))
if response.status_code < 300:
return response.json()
else:
LOG.error('error sending invitation for: %s - %d: %s',
email, response.status_code, response.content)
except Exception as ex:
LOG.error('error sending invitations: %s - %s', email, ex)
class ListProcessingThread(object):
def __init__(self):
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
LOG.debug('logging into F5 cloud services')
token = get_service_token()
if not token:
LOG.error('halting processing due to login failure')
LOG.info('finished processing %d records', 0)
return False
account_info = get_account_info(token)
if not account_info:
LOG.error('halting processing missing account ID')
LOG.info('finished processing %d records', 0)
return False
if DRY_RUN:
LOG.info('performing dry run simulation only')
LOG.info('deleting accepted invitations for users in account: %s',
account_info['user_id'])
delete_accepted_invitations(token, account_info['account_id'])
LOG.info('getting existing account members for account: %s',
account_info['account_id'])
sent_invites = []
members = get_existing_account_members(
token, account_info['account_id'])['users']
for member in members:
sent_invites.append(member['user']['email'])
LOG.info('sending invites with user_id: %s, account id: %s',
account_info['user_id'], account_info['account_id'])
existing_invitations = get_existing_invites(token)
if existing_invitations:
for invite in existing_invitations['invites']:
if not invite['invitee_email'] in sent_invites and invite['status'] == 'pending':
sent_invites.append(invite['invitee_email'])
else:
LOG.warning('no existing invitations')
line_count = 0
with open(CSV_FILE, newline='') as csvfile:
try:
invitations = csv.reader(csvfile, dialect='excel')
for row in invitations:
line_count += 1
first_name = row[0]
last_name = row[1]
email = row[2]
if first_name == 'FirstName':
continue
if not email in sent_invites:
if DRY_RUN:
LOG.info(
'dry run - would have processed invitation for %s %s: %s', first_name, last_name, email)
else:
LOG.info('processing invitation for %s %s: %s',
first_name, last_name, email)
issue_invite(
token, account_info['account_id'], account_info['user_id'], first_name, last_name, email)
if DELAY > 0:
time.sleep(DELAY)
else:
LOG.info('invitation for %s %s: %s already processed',
first_name, last_name, email)
except Exception as ex:
LOG.error('error reading CSV: %s', ex)
LOG.info('finished processing %d invitations', line_count)
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def upload_list():
global USERNAME, PASSWORD, API_HOST, API_VERSION, ROLE_ID, DRY_RUN, DELAY
if request.method == 'POST':
if 'username' not in request.form:
flash('No username')
return redirect(request.url)
if 'password' not in request.form:
flash('No password')
return redirect(request.url)
if 'file' not in request.files:
flash('No file')
return redirect(request.url)
USERNAME = request.form['username']
PASSWORD = request.form['password']
if 'apihost' in request.form:
API_HOST = request.form['apihost']
if 'apiversion' in request.form:
API_VERSION = request.form['apiversion']
if 'roleid' in request.form:
ROLE_ID = request.form['roleid']
if 'dryrun' in request.form and request.form['dryrun'] == 'on':
DRY_RUN = True
else:
DRY_RUN = False
if 'delay' in request.form:
delay = request.form['delay']
DELAY = round((int(delay) / 1000), 2)
file = request.files['file']
if file.filename == '':
flash('No file')
return redirect(request.url)
if file:
handlers = [h for h in LOG.handlers if not isinstance(
h, logging.StreamHandler)]
for handler in handlers:
LOG.removeHandler(handler)
if os.path.exists(LOG_FILE):
os.unlink(LOG_FILE)
if os.path.exists(CSV_FILE):
os.unlink(CSV_FILE)
textstream = logging.FileHandler(LOG_FILE)
textstream.setFormatter(FORMATTER)
LOG.addHandler(textstream)
file.save(CSV_FILE)
LOG.info('received %s', file.filename)
num_lines = sum(1 for line in open(CSV_FILE))
LOG.info('processing %d lines', num_lines)
LOG.info('starting list processing thread...')
csv_processor = ListProcessingThread()
return redirect(url_for('display_stream'))
return '''
<!doctype html>
<html>
<head>
<title>Upload CSV List</title>
</head>
<body>
<h1>Invite with Excel Format CSV List</h1>
<h2>
Version: %s
</h2>
<pre>
FirstName,LastName,email
Bob,Johnson,<EMAIL>
Mike,Smith,<EMAIL>
Don,Williams,<EMAIL>
Scott,White,<EMAIL>
Justin,Case,<EMAIL>
</pre>
<form method=post enctype=multipart/form-data>
<table>
<tr><th align='left'>API Host: </th><td><input name=apihost value=api.cloudservices.f5.com></td></tr>
<tr><th align='left'>API Version: </th><td><input name=apiversion value=v1></td></tr>
<tr><th align='left'>Username: </th><td><input name=username></td></tr>
<tr><th align='left'>Password: </th><td><input name=password></td></tr>
<tr><th align='left'>Invite as Role ID: </th><td><input name=roleid value=r-NAYFdYfiR></td></tr>
<tr><th align='left'>Dry Run: </th><td><input name=dryrun type=checkbox unchecked></td></tr>
<tr><th align='left'>Delay Between Invites (ms): </th><td><input type=number name=delay min=0 max=10000 value=1000></td></tr>
<tr><th align='left'>CSV Invite File: </th><td><input type=file name=file></td></tr>
</table>
</br>
<input type=submit value=Process>
</form>
</body>
</html>
''' % VERSION
@app.route('/display_stream')
def display_stream():
return '''
<!doctype html>
<html>
<head>
<title>Processing the CSV List</title>
</head>
<body>
<p>Last Record: <span id="latest"></span></p>
<p>Output Log:</p>
<ul id="output"></ul>
<script>
var latest = document.getElementById('latest');
var output = document.getElementById('output');
var position = 0;
var stop_timer = false;
function handleNewData() {
var xhr = new XMLHttpRequest();
xhr.open('GET', '/stream_output');
xhr.send();
xhr.onload = function() {
var messages = xhr.responseText.split('\\n');
messages.slice(position, -1).forEach(function(value) {
console.log(value.includes('finished processing'));
if(value.includes('finished processing')) {
stop_timer = true;
latest.textContent = 'Done';
} else {
latest.textContent = value;
var item = document.createElement('li');
item.textContent = value;
output.appendChild(item);
}
});
position = messages.length - 1;
}
}
var timer;
timer = setInterval(function() {
handleNewData();
if (stop_timer) {
clearInterval(timer);
latest.textContent = 'Done';
}
}, 1000);
</script>
</body>
</html>
'''
@app.route('/stream_output')
def stream():
def generate():
with open(LOG_FILE, 'r') as log_out:
yield log_out.read()
return app.response_class(generate(), mimetype='text/plain')
app.run(host='0.0.0.0', threaded=True)
```
|
{
"source": "jgruberf5/TrustedProxy",
"score": 2
}
|
#### File: TrustedProxy/tests/test_local_trusts.py
```python
import requests
import time
import sys
import signal
import argparse
import logging
LOG = logging.getLogger('trusted_proxy_testing')
LOG.setLevel(logging.DEBUG)
FORMATTER = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
LOGSTREAM = logging.StreamHandler(sys.stdout)
LOGSTREAM.setFormatter(FORMATTER)
LOG.addHandler(LOGSTREAM)
def handler(signum, frame):
LOG.info('user requested exit..')
sys.exit(0)
def print_local_id():
local_device_info = {}
try:
device_response = requests.get(
'http://127.0.0.1:8100/mgmt/shared/identified-devices/config/device-info',
auth=requests.auth.HTTPBasicAuth('admin', ''))
device_response.raise_for_status()
local_device_info = device_response.json()
except Exception as ex:
LOG.error(
'local iControl REST error getting local device info.. is restjavad down?')
raise ex
cert_json = {}
try:
cert_response = requests.get(
'http://127.0.0.1:8100/mgmt/shared/device-certificates',
auth=requests.auth.HTTPBasicAuth('admin', ''))
cert_response.raise_for_status()
cert_json = cert_response.json()
except Exception as ex:
LOG.error(
'local iControl REST error getting local certificates.. is restjavad down?')
raise ex
local_certs = []
if 'items' in cert_json:
local_certs = cert_json['items']
if not local_certs:
raise Exception(
'no local certificates found.. local iControl REST error')
local_cert_id = ''
for c in local_certs:
if c['machineId'] == local_device_info['machineId']:
local_cert_id = c['certificateId']
LOG.info("########### LOCAL DEVICE ###########")
LOG.info("%s version %s",
local_device_info['platformMarketingName'], local_device_info['restFrameworkVersion'])
LOG.info("hostname: %s", local_device_info['hostname'])
LOG.info("id: %s", local_device_info['machineId'])
LOG.info("certificate id:%s", local_cert_id)
LOG.info("####################################")
return local_cert_id
def print_local_proxy_trusts():
try:
proxy_response = requests.get(
'http://127.0.0.1:8105/shared/TrustedProxy')
proxy_response.raise_for_status()
except Exception as ex:
LOG.error(
'local iControl LX exception calling TrustedProxy is it installed or is restnoded down?')
raise ex
proxy_trusts = proxy_response.json()
LOG.info("######## LOCAL PROXY TRUSTS ########")
for d in proxy_trusts:
sec_left = int(600 - (int(time.time()) - d['timestamp'] / 1000))
LOG.info('have a trust token for: %s:%d for another %d seconds' %
(d['targetHost'], d['targetPort'], sec_left))
LOG.info("####################################")
return proxy_trusts
def get_remote_device_info(targetHost, targetPort):
data = {
'method': 'Get',
'uri': 'https://%s:%d/mgmt/shared/identified-devices/config/device-info' % (targetHost, targetPort)
}
response = requests.post(
'http://127.0.0.1:8105/shared/TrustedProxy', json=data)
response.raise_for_status()
return response.json()
def get_remote_device_certificates(targetHost, targetPort):
data = {
'method': 'Get',
'uri': 'https://%s:%d/mgmt/shared/device-certificates' % (targetHost, targetPort)
}
response = requests.post(
'http://127.0.0.1:8105/shared/TrustedProxy', json=data)
response.raise_for_status()
response_json = response.json()
if 'items' in response_json:
return response_json['items']
else:
return []
def do_you_trust_me(localonly):
my_cert_id = print_local_id()
devices = print_local_proxy_trusts()
if not localonly:
LOG.info("########## TESTING TRUSTS ##########")
for d in devices:
remote_device_info = get_remote_device_info(
d['targetHost'], d['targetPort'])
remote_certs = get_remote_device_certificates(
d['targetHost'], d['targetPort'])
trusted = False
remote_certificate_id = ''
for c in remote_certs:
if c['certificateId'] == my_cert_id:
trusted = True
if c['machineId'] == remote_device_info['machineId']:
remote_certificate_id = c['certificateId']
remote_print = "%s at %s:%d (%s [%s] machineId: %s certificateId: %s)" % (
remote_device_info['hostname'],
d['targetHost'],
d['targetPort'],
remote_device_info['platformMarketingName'],
remote_device_info['restFrameworkVersion'],
remote_device_info['machineId'],
remote_certificate_id
)
if trusted:
LOG.info("%s trusts me" % remote_print)
LOG.info("####################################")
def test_cycle(delay, localonly=False):
try:
do_you_trust_me(localonly)
except Exception as ex:
LOG.error("test cycle failed with %s" % ex)
time.sleep(delay)
def main():
ap = argparse.ArgumentParser(
prog='test_local_trusts',
usage='%(prog)s.py [options]',
description='poll remote devices assuring trusts are established'
)
ap.add_argument(
'--cycles',
help="The number of cycles through local trusts to test. Default is continuous.",
required=False,
type=int,
default=0
)
ap.add_argument(
'--delay',
help="The delay in seconds between cycles. Default is 10 seconds.",
required=False,
type=int,
default=10
)
ap.add_argument(
'--localonly',
help="Show only local device responses, do not test the remote device trusts. Default is false.",
action='store_true'
)
args = ap.parse_args()
signal.signal(signal.SIGINT, handler)
if args.cycles == 0:
while True:
test_cycle(args.delay, localonly=args.localonly)
else:
for _ in range(args.cycles):
test_cycle(args.delay, localonly=args.localonly)
if __name__ == '__main__':
main()
```
|
{
"source": "jgruber/khsprcexport",
"score": 2
}
|
#### File: jgruber/khsprcexport/create_publisher_record_cards.py
```python
import os
import sys
import argparse
import datetime
import khsprcexport.constants as constants
import khsprcexport.importers as importers
import khsprcexport.exporters as exporters
import khsprcexport.utils as utils
def _generate_pioneer_reports(output_dir, fsgs, publishers, fsrecords, json_output=False, pdf_output=True, pdf_template_file=None):
# collect pioneer publisher ids
pioneer_ids = utils.get_pioneers_ids(fsgs)
# make pioneer report header
pid_headers = {}
for pid in pioneer_ids:
pid_headers[pid] = utils.get_publisher_header(pid, publishers)
# collect reports for pioneers in prc service years
pioneer_service_reports = utils.get_reports(pioneer_ids, fsrecords)
# create reports
output_dir = os.path.join(output_dir, 'active', 'pioneers')
reports_by_pid = {}
for pr in pioneer_service_reports:
if pr['publisher_id'] not in reports_by_pid.keys():
header = utils.get_publisher_header(pr['publisher_id'], publishers)
reports_by_pid[pr['publisher_id']] = {
'header': header,
'reports': []
}
reports_by_pid[pr['publisher_id']]['reports'].append(pr)
for pid in reports_by_pid.keys():
reports_by_pid[pid]['summary'] = utils.get_reports_totals_avgs(
reports_by_pid[pid]['reports'])
if json_output:
os.makedirs(output_dir, exist_ok=True)
# write out json pioneer reports
for r in reports_by_pid.keys():
report_out_file = "%s.json" % reports_by_pid[r]['header']['file_name_prefix']
report_out_path = os.path.join(
output_dir, report_out_file)
exporters.write_json_file(report_out_path, reports_by_pid[r])
if pdf_output:
os.makedirs(output_dir, exist_ok=True)
# write out S-21 pdf
for r in reports_by_pid.keys():
report_out_file = "%s.pdf" % reports_by_pid[r]['header']['file_name_prefix']
report_out_path = os.path.join(
output_dir, report_out_file)
exporters.write_pdf_file(
report_out_path, reports_by_pid[r], pdf_template_file)
return pioneer_ids
def _generate_inactive_reports(output_dir, fsgs, publishers, fsrecords, json_output=False, pdf_output=True, pdf_template_file=None):
# collect inactive publisher ids
inactive_ids = utils.get_inactive_ids(fsrecords)
# colect reports for inactive in prc service years
inactive_service_reports = utils.get_reports(inactive_ids, fsrecords)
# create reports
output_dir = os.path.join(output_dir, 'inactive')
reports_by_pid = {}
for pr in inactive_service_reports:
if pr['publisher_id'] not in reports_by_pid.keys():
header = utils.get_publisher_header(pr['publisher_id'], publishers)
reports_by_pid[pr['publisher_id']] = {
'header': header,
'reports': []
}
reports_by_pid[pr['publisher_id']]['reports'].append(pr)
for pid in reports_by_pid.keys():
reports_by_pid[pid]['summary'] = utils.get_reports_totals_avgs(
reports_by_pid[pid]['reports'])
if json_output:
os.makedirs(output_dir, exist_ok=True)
# write out json inactive reports
for r in reports_by_pid.keys():
report_out_file = "%s.json" % reports_by_pid[r]['header']['file_name_prefix']
report_out_path = os.path.join(
output_dir, report_out_file)
exporters.write_json_file(report_out_path, reports_by_pid[r])
if pdf_output:
os.makedirs(output_dir, exist_ok=True)
# write out S-21 pdf
for r in reports_by_pid.keys():
report_out_file = "%s.pdf" % reports_by_pid[r]['header']['file_name_prefix']
report_out_path = os.path.join(
output_dir, report_out_file)
exporters.write_pdf_file(
report_out_path, reports_by_pid[r], pdf_template_file)
return inactive_ids
def _generate_fsg_reports(output_dir, fsgs, publishers, fsrecords, json_output=False, pdf_output=True, pdf_template_file=None):
# collect pioneer publisher ids
pioneer_ids = utils.get_pioneers_ids(fsgs)
# collect inactive publisher ids
inactive_ids = utils.get_inactive_ids(fsrecords)
# create a list of field service group report objects
fsg_pub_ids = {}
for fsg in fsgs:
# get publisher ids in this service group
pub_ids = []
for pub in fsg['publishers']:
if (pub['id'] not in pioneer_ids) and (pub['id'] not in inactive_ids):
pub_ids.append(pub['id'])
# collect reports for this service group
fsg_reports = utils.get_reports(pub_ids, fsrecords)
# create reports
fsg_output_dir = os.path.join(
output_dir, 'active', utils.make_dir_name(fsg['name']))
reports_by_pid = {}
for pr in fsg_reports:
if pr['publisher_id'] not in reports_by_pid.keys():
reports_by_pid[pr['publisher_id']] = {
'header': utils.get_publisher_header(pr['publisher_id'], publishers),
'reports': []
}
reports_by_pid[pr['publisher_id']]['reports'].append(pr)
for pid in reports_by_pid.keys():
reports_by_pid[pid]['summary'] = utils.get_reports_totals_avgs(
reports_by_pid[pid]['reports'])
if json_output:
os.makedirs(fsg_output_dir, exist_ok=True)
for r in reports_by_pid.keys():
report_out_file = "%s.json" % reports_by_pid[r]['header']['file_name_prefix']
report_out_path = os.path.join(
fsg_output_dir, report_out_file)
exporters.write_json_file(report_out_path, reports_by_pid[r])
if pdf_output:
os.makedirs(fsg_output_dir, exist_ok=True)
for r in reports_by_pid.keys():
report_out_file = "%s.pdf" % reports_by_pid[r]['header']['file_name_prefix']
report_out_path = os.path.join(
fsg_output_dir, report_out_file)
exporters.write_pdf_file(
report_out_path, reports_by_pid[r], pdf_template_file)
fsg_pub_ids[fsg['id']] = pub_ids
return fsg_pub_ids
def _generate_summary_reports(output_dir, fsgs, publishers, fsrecords, json_output=False, pdf_output=True, pdf_template_file=None):
service_years = utils.get_service_years()
# initial report data dictionaries
pioneer_reports = utils.generate_dummy_report()
pioneer_reports['header']['name'] = 'Pioneer Summary'
pioneer_reports['header']['file_name_prefix'] = 'Pioneers'
auxiliary_pioneer_reports = utils.generate_dummy_report()
auxiliary_pioneer_reports['header']['name'] = 'Auxiliary Pioneer Summary'
auxiliary_pioneer_reports['header']['file_name_prefix'] = 'AuxiliaryPioneers'
publisher_reports = utils.generate_dummy_report()
publisher_reports['header']['name'] = 'Auxiliary Pioneer Summary'
publisher_reports['header']['file_name_prefix'] = 'Publishers'
first_sy_month_indexes = {9: 0, 10: 1, 11: 2, 12: 3,
1: 4, 2: 5, 3: 6, 4: 7, 5: 8, 6: 9, 7: 10, 8: 11}
second_sy_month_indexes = {9: 12, 10: 13, 11: 14, 12: 15,
1: 16, 2: 17, 3: 18, 4: 19, 5: 20, 6: 21, 7: 22, 8: 23}
# used to get active, inactive .. see comments below
# index_to_month_year = {
# 0: (9, service_years[0] - 1),
# 1: (10, service_years[0] - 1),
# 2: (11, service_years[0] - 1),
# 3: (12, service_years[0] -1),
# 4: (1, service_years[0]),
# 5: (2, service_years[0]),
# 6: (3, service_years[0]),
# 7: (4, service_years[0]),
# 8: (5, service_years[0]),
# 9: (6, service_years[0]),
# 10: (7, service_years[0]),
# 11: (8, service_years[0]),
# 12: (9, service_years[1] - 1),
# 13: (10, service_years[1] - 1),
# 14: (11, service_years[1] - 1),
# 15: (12, service_years[1] -1),
# 16: (1, service_years[1]),
# 17: (2, service_years[1]),
# 18: (3, service_years[1]),
# 19: (4, service_years[1]),
# 20: (5, service_years[1]),
# 21: (6, service_years[1]),
# 22: (7, service_years[1]),
# 23: (8, service_years[1]),
#}
all_pub_ids = []
for fsg in fsgs:
for pub in fsg['publishers']:
all_pub_ids.append(pub['id'])
for sr in fsrecords:
if sr['publisher_id'] in all_pub_ids and sr['service_year'] in service_years:
month_index = 0
if sr['service_year'] == service_years[0]:
month_index = first_sy_month_indexes[sr['month']]
if sr['service_year'] == service_years[1]:
month_index = second_sy_month_indexes[sr['month']]
if sr['pioneer']:
rs = pioneer_reports['summary'][sr['service_year']]
rs['total_placements'] = rs['total_placements'] + sr['placements']
rs['total_video_showings'] = rs['total_video_showings'] + \
sr['video_showings']
rs['total_hours'] = rs['total_hours'] + sr['hours']
rs['total_return_visits'] = rs['total_placements'] + \
sr['placements']
rs['total_studies'] = rs['total_placements'] + sr['placements']
rs['number_of_reports'] = rs['number_of_reports'] + 1
mr = pioneer_reports['reports'][month_index]
mr['placements'] = mr['placements'] + sr['placements']
mr['video_showings'] = mr['video_showings'] + sr['video_showings']
mr['hours'] = mr['hours'] + sr['hours']
mr['return_visits'] = mr['return_visits'] + sr['hours']
mr['studies'] = mr['studies'] + sr['studies']
if isinstance(mr['remarks'], str):
mr['remarks'] = 0
mr['remarks'] = mr['remarks'] + 1
elif sr['auxiliary_pioneer']:
rs = auxiliary_pioneer_reports['summary'][sr['service_year']]
rs['total_placements'] = rs['total_placements'] + sr['placements']
rs['total_video_showings'] = rs['total_video_showings'] + \
sr['video_showings']
rs['total_hours'] = rs['total_hours'] + sr['hours']
rs['total_return_visits'] = rs['total_placements'] + \
sr['placements']
rs['total_studies'] = rs['total_placements'] + sr['placements']
rs['number_of_reports'] = rs['number_of_reports'] + 1
mr = auxiliary_pioneer_reports['reports'][month_index]
mr['placements'] = mr['placements'] + sr['placements']
mr['video_showings'] = mr['video_showings'] + sr['video_showings']
mr['hours'] = mr['hours'] + sr['hours']
mr['return_visits'] = mr['return_visits'] + sr['hours']
mr['studies'] = mr['studies'] + sr['studies']
if isinstance(mr['remarks'], str):
mr['remarks'] = 0
mr['remarks'] = mr['remarks'] + 1
else:
rs = publisher_reports['summary'][sr['service_year']]
rs['total_placements'] = rs['total_placements'] + sr['placements']
rs['total_video_showings'] = rs['total_video_showings'] + \
sr['video_showings']
rs['total_hours'] = rs['total_hours'] + sr['hours']
rs['total_return_visits'] = rs['total_placements'] + \
sr['placements']
rs['total_studies'] = rs['total_placements'] + sr['placements']
rs['number_of_reports'] = rs['number_of_reports'] + 1
mr = publisher_reports['reports'][month_index]
mr['placements'] = mr['placements'] + sr['placements']
mr['video_showings'] = mr['video_showings'] + sr['video_showings']
mr['hours'] = mr['hours'] + sr['hours']
mr['return_visits'] = mr['return_visits'] + sr['hours']
mr['studies'] = mr['studies'] + sr['studies']
if isinstance(mr['remarks'], str):
mr['remarks'] = 0
mr['remarks'] = mr['remarks'] + 1
# calculate averages
for sy in service_years:
pioneer_reports['summary'][sy]['avg_placements'] = round(
(pioneer_reports['summary'][sy]['total_placements'] / pioneer_reports['summary'][sy]['number_of_reports']), 2)
pioneer_reports['summary'][sy]['avg_video_showings'] = round(
(pioneer_reports['summary'][sy]['total_video_showings'] / pioneer_reports['summary'][sy]['number_of_reports']), 2)
pioneer_reports['summary'][sy]['total_hours'] = round(pioneer_reports['summary'][sy]['total_hours'], 2)
pioneer_reports['summary'][sy]['avg_hours'] = round(
(pioneer_reports['summary'][sy]['total_hours'] / pioneer_reports['summary'][sy]['number_of_reports']), 2)
pioneer_reports['summary'][sy]['avg_return_visits'] = round(
(pioneer_reports['summary'][sy]['total_return_visits'] / pioneer_reports['summary'][sy]['number_of_reports']), 2)
pioneer_reports['summary'][sy]['avg_studies'] = round(
(pioneer_reports['summary'][sy]['total_studies'] / pioneer_reports['summary'][sy]['number_of_reports']), 2)
auxiliary_pioneer_reports['summary'][sy]['avg_placements'] = round(
(auxiliary_pioneer_reports['summary'][sy]['total_placements'] / auxiliary_pioneer_reports['summary'][sy]['number_of_reports']), 2)
auxiliary_pioneer_reports['summary'][sy]['avg_video_showings'] = round(
(auxiliary_pioneer_reports['summary'][sy]['total_video_showings'] / auxiliary_pioneer_reports['summary'][sy]['number_of_reports']), 2)
auxiliary_pioneer_reports['summary'][sy]['total_hours'] = round(auxiliary_pioneer_reports['summary'][sy]['total_hours'], 2)
auxiliary_pioneer_reports['summary'][sy]['avg_hours'] = round(
(auxiliary_pioneer_reports['summary'][sy]['total_hours'] / auxiliary_pioneer_reports['summary'][sy]['number_of_reports']), 2)
auxiliary_pioneer_reports['summary'][sy]['avg_return_visits'] = round(
(auxiliary_pioneer_reports['summary'][sy]['total_return_visits'] / auxiliary_pioneer_reports['summary'][sy]['number_of_reports']), 2)
auxiliary_pioneer_reports['summary'][sy]['avg_studies'] = round(
(auxiliary_pioneer_reports['summary'][sy]['total_studies'] / auxiliary_pioneer_reports['summary'][sy]['number_of_reports']), 2)
publisher_reports['summary'][sy]['avg_placements'] = round(
(publisher_reports['summary'][sy]['total_placements'] / publisher_reports['summary'][sy]['number_of_reports']), 2)
publisher_reports['summary'][sy]['avg_video_showings'] = round(
(publisher_reports['summary'][sy]['total_video_showings'] / publisher_reports['summary'][sy]['number_of_reports']), 2)
publisher_reports['summary'][sy]['total_hours'] = round(publisher_reports['summary'][sy]['total_hours'], 2)
publisher_reports['summary'][sy]['avg_hours'] = round(
(publisher_reports['summary'][sy]['total_hours'] / publisher_reports['summary'][sy]['number_of_reports']), 2)
publisher_reports['summary'][sy]['avg_return_visits'] = round(
(publisher_reports['summary'][sy]['total_return_visits'] / publisher_reports['summary'][sy]['number_of_reports']), 2)
publisher_reports['summary'][sy]['avg_studies'] = round(
(publisher_reports['summary'][sy]['total_studies'] / publisher_reports['summary'][sy]['number_of_reports']), 2)
# clean up remarks labels
for r in pioneer_reports['reports']:
r['remarks'] = "%d reports" % r['remarks']
for r in auxiliary_pioneer_reports['reports']:
r['remarks'] = "%d reports" % r['remarks']
# This doesn't work because KHS adds 6 months before a publish began publishing
# So your inactive account shows high if you don't know the month they moved in
for indx, r in enumerate(publisher_reports['reports']):
# (month, year) = index_to_month_year[indx]
# print("\n\nGetting active_inactive %d-%d\n\n" % (year, month))
# (active, inactive) = utils.get_active_inactive_publisher_count(fsrecords, year, month)
# r['remarks'] = "%d reports %d active %d inactive " % (r['remarks'], active, inactive)
r['remarks'] = "%d reports " % (r['remarks'])
if json_output:
os.makedirs(output_dir, exist_ok=True)
report_out_file = "%s.json" % pioneer_reports['header']['file_name_prefix']
report_out_path = os.path.join(output_dir, report_out_file)
exporters.write_json_file(report_out_path, pioneer_reports)
report_out_file = "%s.json" % auxiliary_pioneer_reports['header']['file_name_prefix']
report_out_path = os.path.join(output_dir, report_out_file)
exporters.write_json_file(report_out_path, auxiliary_pioneer_reports)
report_out_file = "%s.json" % publisher_reports['header']['file_name_prefix']
report_out_path = os.path.join(output_dir, report_out_file)
exporters.write_json_file(report_out_path, publisher_reports)
if pdf_output:
os.makedirs(output_dir, exist_ok=True)
report_out_file = "%s.pdf" % pioneer_reports['header']['file_name_prefix']
report_out_path = os.path.join(output_dir, report_out_file)
exporters.write_pdf_file(
report_out_path, pioneer_reports, pdf_template_file)
report_out_file = "%s.pdf" % auxiliary_pioneer_reports['header']['file_name_prefix']
report_out_path = os.path.join(output_dir, report_out_file)
exporters.write_pdf_file(
report_out_path, auxiliary_pioneer_reports, pdf_template_file)
report_out_file = "%s.pdf" % publisher_reports['header']['file_name_prefix']
report_out_path = os.path.join(output_dir, report_out_file)
exporters.write_pdf_file(
report_out_path, publisher_reports, pdf_template_file)
'''
Parse command arguments and excute export workflow
'''
def main():
ap = argparse.ArgumentParser(
prog='create_publisher_record_cards',
usage='%(prog)s.py [options]',
description='reads KHS DBF files and creates S-21-E pdfs in a virtual card file',
)
ap.add_argument(
'--khsdatadir',
help='path to KHS data files',
required=True
)
ap.add_argument(
'--outputdir',
help='path to output virtual card file',
default='./prc_cardbox',
)
ap.add_argument(
'--json',
help='create reports json',
action='store_true'
)
ap.add_argument(
'--pdf',
help='create S-21-E PDFs',
action='store_true',
)
ap.add_argument(
'--pdftemplate',
help='S-21-E.pdf original PDF template file - default is ./templates/S-21_E.pdf',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates', 'S-21-E.pdf'),
required=False
)
ap.add_argument(
'--zip',
help='create a zip archive the output folder',
action='store_true'
)
ap.add_argument(
'--zipfilename',
help='the zip file to create',
required=False,
default='prcs-%s.zip' % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
)
args = ap.parse_args()
required_files = [
os.path.join(args.khsdatadir, constants.FSGROUP_FILE),
os.path.join(args.khsdatadir, constants.NAMES_FILE),
os.path.join(args.khsdatadir, constants.FIELD_SERVICE_FILE)
]
if args.pdf:
required_files.append(args.pdftemplate)
for file_path in required_files:
if not os.path.exists(file_path):
print("\nCan not find %s, a required file\n" % file_path)
sys.exit(1)
# load data from KHS data
(fsgs, publishers, fsrecords) = importers.load_khs_data(args.khsdatadir)
# generate the pionner PRC reports
_generate_pioneer_reports(
args.outputdir, fsgs, publishers, fsrecords, args.json, args.pdf, args.pdftemplate)
# generate the inactive PRC reports
_generate_inactive_reports(
args.outputdir, fsgs, publishers, fsrecords, args.json, args.pdf, args.pdftemplate)
# generate the fields service group reports
_generate_fsg_reports(args.outputdir, fsgs, publishers,
fsrecords, args.json, args.pdf, args.pdftemplate)
# generate the summary PRC reports
_generate_summary_reports(args.outputdir, fsgs, publishers,
fsrecords, args.json, args.pdf, args.pdftemplate)
# optionally zip up the reports
if args.zip:
utils.zipdir(args.zipfilename, args.outputdir)
'''
The script was executed from the CLI directly, run main function
'''
if __name__ == "__main__":
main()
```
#### File: khsprcexport/khsprcexport/importers.py
```python
import os
import datetime
from khsprcexport import constants
from dbfread import DBF
'''
Export field service group list from DBF files.
Each field service group in the Fsgroups.DBF file gets a dictionary in the list.
The group dictionary includes the group ID, the group name, and list of publishers in the group.
The publishers are taken from the Names.DBF file. The pulishers list entry is a dictionary with
the publishers ID, first and last name, gender (M|F), annointed (boolean), elder (boolean),
ministerial_servant (boolean), regular_pioneer (boolean), date of birth (YYYY-MM-DD), and
date of baptism (YYYY-MM-DD).
Example:
[
{
"id": 1,
"name": "Unassigned",
"publishers": [
{
"id": 445,
"last_name": "Doe",
"first_name": "John",
"gender": "M",
"annointed": false,
"elder": false,
"ministerial_servant": false,
"regular_pioneer": false,
"date_of_birth": "2000-01-01",
"date_of baptism": "2015-01-01"
}
]
}
....
]
'''
def create_field_service_groups(fsg_file_path, names_path, excludes=[]):
names_db = DBF(names_path)
# build a dictionary of fields we need for the PRC
field_index = {}
for idx, f in enumerate(names_db.field_names):
field_index[f] = idx
fsg_id_index = field_index['FSGROUP_ID']
last_name_index = field_index['LASTNAME']
first_name_index = field_index['FIRSTNAME']
gender_index = field_index['GENDER']
anointed_index = field_index['ANOINTED']
elder_index = field_index['ELDER']
ministerial_servant_index = field_index['SERVANT']
regular_pioneer_index = field_index['PIONEER']
do_birth_index = field_index['DOB']
baptized_index = field_index['BAPTIZED']
unbaptized_publisher_index = field_index['UBP']
do_baptism_index = field_index['BAPTIZEDON']
deceased_index = field_index['DECEASED']
regular_aux_pioneer_index = field_index['AUX_PIONEE']
moved_date_index = field_index['MOVE_DATE']
publishers = {}
fsg_n = {}
for rec in names_db.records:
vals = list(rec.values())
fsg_id = vals[fsg_id_index]
if not fsg_n or fsg_id not in fsg_n.keys():
fsg_n[fsg_id] = []
male = False
female = False
if vals[gender_index] == 2:
female = True
else:
male = True
anointed = False
other_sheep = True
if vals[anointed_index]:
anointed = True
other_sheep = False
elder = False
if vals[elder_index]:
elder = True
ministerial_servant = False
if vals[ministerial_servant_index]:
ministerial_servant = True
regular_pioneer = False
if vals[regular_pioneer_index]:
regular_pioneer = True
baptized = False
if vals[baptized_index]:
baptized = True
unbaptized_publisher = False
if vals[unbaptized_publisher_index]:
unbaptized_publisher = True
regular_aux_pioneer = False
if vals[regular_aux_pioneer_index]:
regular_aux_pioneer = True
do_birth = vals[do_birth_index]
if do_birth:
do_birth = do_birth.strftime("%Y-%m-%d")
else:
do_birth = ''
do_baptism = vals[do_baptism_index]
if do_baptism:
do_baptism = do_baptism.strftime("%Y-%m-%d")
else:
if unbaptized_publisher:
do_baptism = 'UBP'
else:
do_baptism = ''
moved_date = vals[moved_date_index]
if moved_date:
moved_date = moved_date.strftime("%Y-%m-%d")
deceased = False
if vals[deceased_index]:
deceased = True
if not deceased and not moved_date:
publishers[vals[0]] = {
'id': vals[0],
'last_name': vals[last_name_index],
'first_name': vals[first_name_index],
'male': male,
'female': female,
'anointed': anointed,
'other_sheep': other_sheep,
'elder': elder,
'ministerial_servant': ministerial_servant,
'regular_pioneer': regular_pioneer,
'regular_auxiliary_pioneer': regular_aux_pioneer,
'date_of_birth': do_birth,
'baptized': baptized,
'unbatized_publisher': unbaptized_publisher,
'date_immersed': do_baptism
}
fsg_n[fsg_id].append(
publishers[vals[0]]
)
fsgs = []
fsg_db = DBF(fsg_file_path)
for rec in fsg_db.records:
fsg = list(rec.values())
if fsg[1] not in excludes:
fsgs.append(
{'id': fsg[0], 'name': fsg[1], 'publishers': fsg_n[fsg[0]]})
return (fsgs, publishers)
'''
Export field service reports from the Field_service.DBF file.
Each field service report is added to a list of report dictionaries.
Each report dictionary contains the publisher_id who submitted the report, the year(int), the month(int),
the placements(int), the video_showings(int), hours(int), return_visits(int), studies(int), and
a remarks field (string).
Example:
[
{
"publisher_id": 674,
"year": "2016",
"month": "09",
"placements": 0,
"video_showings": 0,
"hours": 7.0,
"return_visits": 2,
"studies": 0,
"remarks": ""
}
...
]
'''
def create_field_service_reports(fs_file_path):
fs_db = DBF(fs_file_path)
# build a dictionary of fields we need for the PRC
field_index = {}
for idx, f in enumerate(fs_db.field_names):
field_index[f] = idx
publisher_id_index = field_index['NAMES_ID']
year_month_index = field_index['YEARMONTH']
placements_index = field_index['PLACEMENTS']
video_showings_index = field_index['VIDEOS']
hours_index = field_index['HOURS']
return_visits_index = field_index['RVS']
studies_index = field_index['STUDIES']
remarks_index = field_index['REMARKS']
par_index = field_index['PAR']
fs_reports = []
for rec in fs_db.records:
vals = list(rec.values())
auxiliary_pioneer = False
pioneer = False
if vals[par_index] == 2:
auxiliary_pioneer = True
if vals[par_index] == 3:
pioneer = True
year = int(vals[year_month_index][0:4])
month = int(vals[year_month_index][-2:])
service_year = year
if month > 8:
service_year = year + 1
if vals[placements_index] is None:
vals[placements_index] = 0
if vals[video_showings_index] is None:
vals[video_showings_index] = 0
if vals[hours_index] is None:
vals[hours_index] = 0
if vals[return_visits_index] is None:
vals[return_visits_index] = 0
if vals[studies_index] is None:
vals[studies_index] = 0
fs_reports.append(
{
'publisher_id': vals[publisher_id_index],
'year': year,
'month': month,
'service_year': service_year,
'placements': vals[placements_index],
'video_showings': vals[video_showings_index],
'hours': vals[hours_index],
'return_visits': vals[return_visits_index],
'studies': vals[studies_index],
'remarks': vals[remarks_index],
'pioneer': pioneer,
'auxiliary_pioneer': auxiliary_pioneer,
'timestamp': datetime.datetime.strptime("%d-%d" % (year, month), '%Y-%m').timestamp()
}
)
return fs_reports
def populate_fsgs(khsdatadir=None, exclude_fsg_names=[]):
(fsgs, publishers) = create_field_service_groups(
os.path.join(khsdatadir, constants.FSGROUP_FILE),
os.path.join(khsdatadir, constants.NAMES_FILE),
exclude_fsg_names)
return (fsgs, publishers)
def populate_fsrecs(khsdatadir):
return create_field_service_reports(
os.path.join(khsdatadir, constants.FIELD_SERVICE_FILE))
def load_khs_data(khsdatadir):
(fsgs, publishers) = populate_fsgs(khsdatadir)
fsrecs = populate_fsrecs(khsdatadir)
return (fsgs, publishers, fsrecs)
```
|
{
"source": "jgru/dfxml_python",
"score": 3
}
|
#### File: dfxml/bin/deidentify_xml.py
```python
import typing
private_dirs = ["home/","usr/home","Users"]
ok_top_paths_win = ["program files/","System","Windows"]
ok_top_paths_mac = ["bin/","usr","etc","private","applications","developer",'bin','sbin','lib','dev']
ok_top_paths = ok_top_paths_win + ok_top_paths_mac + ['$orphanfiles']
acceptable_extensions = ["exe","dll","sys","com","hlp"]
import os.path, os, sys
partdir : typing.Dict[str, str] = dict()
def sanitize_part(part):
"""Sanitize a part of a pathname in a consistent manner"""
if part not in partdir:
partdir[part] = "P%07d" % (len(partdir)+1)
return partdir[part]
def sanitize_filename(fname):
"""Given a filename, sanitize each part and return it."""
ofn = fname
jfn = fname
if jfn[0]=='/': jfn=jfn[1:]
pathok = False
for p in ok_top_paths:
if jfn.lower().startswith(p):
pathok = True
if not pathok:
# if the path is not okay, replace all of the parts
# and the name up to the .ext
parts = fname.split("/")
parts[:-1] = [sanitize_part(s) for s in parts[:-1]]
(root,ext) = os.path.splitext(parts[-1])
if ext not in acceptable_extensions:
parts[-1] = sanitize_part(root) + ext
fname = "/".join(parts)
if ofn[0]=='/' and fname[0]!='/':
fname = "/" + fname
return fname
class xml_sanitizer:
"""Read and write the XML, but sanitize the filename elements."""
def __init__(self,out):
self.out = out
self.cdata = ""
def _start_element(self, name, attrs):
""" Handles the start of an element for the XPAT scanner"""
s = ['<',name]
if attrs:
for (a,v) in attrs.items():
if '"' not in v:
s += [' ',a,'="',v,'"']
else:
s += [" ",a,"='",v,"'"]
s += ['>']
self.out.write("".join(s))
self.cdata = "" # new element
def _end_element(self, name):
"""Handles the end of an element for the XPAT scanner"""
if name=="filename":
self.cdata = sanitize_filename(self.cdata)
if self.cdata=="\n": self.cdata=""
self.out.write("".join([self.cdata,'</',name,'>']))
self.cdata = ""
def _char_data(self, data):
"""Handles XML data"""
self.cdata += data
def process_xml_stream(self,xml_stream):
"Run the reader on a given XML input stream"
import xml.parsers.expat
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = self._start_element
p.EndElementHandler = self._end_element
p.CharacterDataHandler = self._char_data
p.ParseFile(xml_stream)
if __name__=="__main__":
from optparse import OptionParser
global options
parser = OptionParser()
parser.add_option("-t","--test",help='Test a specific pathanme to sanitize')
(options,args) = parser.parse_args()
if options.test:
if os.path.isdir(options.test):
for (dirpath,dirnames,filenames) in os.walk(options.test):
for filename in filenames:
fn = dirpath+"/"+filename
print("%s\n %s" % (fn,sanitize_filename(fn)))
x = xml_sanitizer(sys.stdout)
x.process_xml_stream(open(args[0],'rb'))
```
#### File: dfxml/bin/idifference2.py
```python
__version__ = "2.0.0alpha2"
import sys
import logging
import os
_logger = logging.getLogger(os.path.basename(__file__))
import dfxml.objects as Objects
import make_differential_dfxml
import summarize_differential_dfxml
def ignorable_name(fn):
"""Filter out recognized pseudo-file names, accommodating user request for including dotdirs."""
if fn is None:
return False
if args.include_dotdirs and os.path.basename(fn) in [".", ".."]:
return False
return make_differential_dfxml.ignorable_name(fn)
def main():
import argparse
parser = argparse.ArgumentParser(description='%prog [options] file1 file2 (files can be xml or image files)')
returningsoon = parser.add_argument_group("Returning soon", "Some of the options in idifference were not carried forward in the reimplementation. Please feel free to request these features be re-implemented if you need them.")
parser.add_argument("-d","--debug",help="Enable debug printing",action='store_true')
parser.add_argument("-x","--xml",help="Specify output file for DFXML manifest of differences",dest="xmlfilename")
parser.add_argument("--include-dotdirs",help="Include files with names ending in '/.' and '/..'",action="store_true", default=False)
parser.add_argument("--sort-by", help="Sorts reported file lists. Pass one of these arguments: \"times\" or \"paths\".")
parser.add_argument("--summary",help="output summary statistics of file system changes",action="store_true", default=False)
parser.add_argument("--timestamp",help="output all times in Unix timestamp format; otherwise use ISO 8601",action="store_true")
returningsoon.add_argument("-n","--notimeline",help="do not generate a timeline",action="store_true")
returningsoon.add_argument("-T","--tararchive",help="create tar archive file of new/changed files",dest="tarfile")
returningsoon.add_argument("-Z","--zipfile",help="create ZIP64 archive file of new/changed files",dest="zipfile")
returningsoon.add_argument("--html",help="specify output in HTML",action="store_true")
returningsoon.add_argument("--noatime",help="Do not include atime changes",action="store_true")
returningsoon.add_argument("--imagefile",help="specifies imagefile or file2 is an XML file and you are archiving")
parser.add_argument("infiles", nargs="+")
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
if len(args.infiles) != 2:
raise NotImplementedError("Sorry, but this version of idifference can only run on two disk images, not a longer sequence. Please feel free to request longer sequences be re-implemented if you need it.")
if args.tarfile:
raise NotImplementedError("Sorry, but the tarring argument was not carried forward in the re-implementation. Please feel free to request this feature be re-implemented if you need it.")
if args.zipfile:
raise NotImplementedError("Sorry, but the zipping argument was not carried forward in the re-implementation. Please feel free to request this feature be re-implemented if you need it.")
#TODO The Extractor program should get a Zip-handling function to handle this flag.
if args.html:
raise NotImplementedError("Sorry, but the HTML output argument was not carried forward in the re-implementation. Please feel free to request this feature be re-implemented if you need it.")
if args.noatime:
raise NotImplementedError("Sorry, but the ignore-atime argument was not carried forward in the re-implementation. Please feel free to request this feature be re-implemented if you need it.")
if args.notimeline:
raise NotImplementedError("Sorry, but the notimeline argument was not carried forward in the re-implementation. Please feel free to request this feature be re-implemented if you need it.")
if args.imagefile:
raise NotImplementedError("Sorry, but the imagefile argument was not carried forward in the re-implementation. Please feel free to request this feature be re-implemented if you need it.")
pre = None
post = None
for infile in args.infiles:
pre = post
post = infile
_logger.info(">>> Reading %s." % infile)
if not pre is None:
diffdfxml = make_differential_dfxml.make_differential_dfxml(
pre,
post,
diff_mode="idifference",
ignore_filename_function=ignorable_name
)
diffdfxml.program = sys.argv[0]
diffdfxml.program_version = __version__
if args.xmlfilename:
_logger.debug("Opening temp file for writing.")
with open(args.xmlfilename, "w") as fh:
diffdfxml.print_dfxml(output_fh=fh)
summarize_differential_dfxml.report(
diffdfxml,
sort_by=args.sort_by,
summary=args.summary,
timestamp=args.timestamp
)
if __name__=="__main__":
main()
```
#### File: dfxml/bin/summarize_differential_dfxml.py
```python
__version__ = "0.8.4"
import collections
import copy
import logging
import os
from dfxml import objects as Objects
import dfxml.bin.idifference as idifference
_logger = logging.getLogger(os.path.basename(__file__))
#Only issue a potentially verbose warning once
_nagged_timestamp_format = False
class FOCounter(object):
"Counter for FileObjects. Does not count differences (differential annotations)."
def __init__(self):
self._inodes = set()
self._fo_tally = 0
self._fo_unalloc_unmatch_tally = 0
self._fo_allocation_tallies_inode = {True:0, False:0, None:0}
self._fo_allocation_tallies_name = {True:0, False:0, None:0}
def add(self, obj):
assert isinstance(obj, Objects.FileObject)
self._inodes.add((obj.partition, obj.inode))
self._fo_tally += 1
self._fo_allocation_tallies_inode[obj.alloc_inode] += 1
self._fo_allocation_tallies_name[obj.alloc_name] += 1
if not (obj.alloc_name and obj.alloc_inode) and obj.original_fileobject is None:
self._fo_unalloc_unmatch_tally += 1
@property
def inode_tally(self):
return len(self._inodes)
@property
def fo_tally(self):
return self._fo_tally
@property
def fo_unalloc_unmatch_tally(self):
return self._fo_unalloc_unmatch_tally
@property
def fo_tally_alloc_inode(self):
return self._fo_allocation_tallies_inode[True]
@property
def fo_tally_alloc_name(self):
return self._fo_allocation_tallies_name[True]
@property
def fo_tally_nullalloc_inode(self):
return self._fo_allocation_tallies_inode[None]
@property
def fo_tally_nullalloc_name(self):
return self._fo_allocation_tallies_name[None]
@property
def fo_tally_unalloc_inode(self):
return self._fo_allocation_tallies_inode[False]
@property
def fo_tally_unalloc_name(self):
return self._fo_allocation_tallies_name[False]
def report(dfxmlobject, sort_by=None, summary=None, timestamp=None):
new_files = []
deleted_files = []
deleted_files_matched = []
deleted_files_unmatched = []
renamed_files = []
renamed_files_directory = []
renamed_files_regular = []
renamed_files_other = []
renamed_files_type_changed = []
renamed_files_type_changes = collections.defaultdict(int) #Key: (old name_type, new name_type); value: counter
renamed_files_content_matches = []
modified_files = []
changed_files = []
unchanged_files = []
obj_alloc_counters = [FOCounter(), FOCounter()]
matched_files_tally = 0
def _is_matched(obj):
_matched = "matched" in obj.annos
return _matched
#Group objects by differential annotations
for obj in dfxmlobject:
if isinstance(obj, Objects.FileObject):
if "matched" in obj.annos:
matched_files_tally += 1
#_logger.debug("Inspecting %s for changes" % obj)
if "new" in obj.annos:
new_files.append(obj)
elif "deleted" in obj.annos:
deleted_files.append(obj)
if _is_matched(obj):
deleted_files_matched.append(obj)
else:
deleted_files_unmatched.append(obj)
elif "renamed" in obj.annos:
#Count content matches
if obj.original_fileobject.sha1 == obj.sha1:
renamed_files_content_matches.append(obj)
renamed_files.append(obj)
if obj.name_type != obj.original_fileobject.name_type:
renamed_files_type_changed.append(obj)
renamed_files_type_changes[(obj.original_fileobject.name_type or "", obj.name_type or "")] += 1
elif obj.name_type == "r":
renamed_files_regular.append(obj)
elif obj.name_type == "d":
renamed_files_directory.append(obj)
else:
renamed_files_other.append(obj)
elif "modified" in obj.annos:
modified_files.append(obj)
elif "changed" in obj.annos:
changed_files.append(obj)
else:
unchanged_files.append(obj)
#Count files of the post image
if "deleted" in obj.annos:
#Don't count the "Ghost" files created for deleted files that weren't matched between images
if _is_matched(obj):
obj_alloc_counters[1].add(obj)
else:
obj_alloc_counters[1].add(obj)
#Count files of the baseline image
if obj.original_fileobject:
obj_alloc_counters[0].add(obj.original_fileobject)
elif isinstance(obj, Objects.VolumeObject):
#TODO
pass
def _sortkey_singlefi():
"""Return a sorting key function, fit for use in sorted() on a list of FileObjects."""
def _key_by_path(fi):
return (
fi.filename or "",
str(fi.mtime) or "n/a",
(fi.original_fileobject and fi.original_fileobject.filename) or "",
(fi.original_fileobject and str(fi.original_fileobject.mtime)) or "n/a"
)
def _key_by_times(fi):
return (
str(fi.mtime) or "n/a",
str(fi.crtime) or "n/a",
fi.filename,
(fi.original_fileobject and str(fi.original_fileobject.mtime)) or "n/a",
(fi.original_fileobject and str(fi.original_fileobject.crtime)) or "n/a",
(fi.original_fileobject and fi.original_fileobject.filename) or ""
)
if sort_by == "path":
return _key_by_path
else: #Default: "times"
return _key_by_times
def _format_timestamp(t):
"""Takes a timestamp, returns a string."""
if t is None:
return "n/a"
if timestamp:
if t.timestamp:
return str(t.timestamp)
else:
if not _nagged_timestamp_format:
_nagged_timestamp_format = True
_logger.warning("Tried to format a Unix timestamp, but failed.")
return "n/a"
else:
return str(t)
idifference.h2("New files:")
new_files_sorted = sorted(new_files, key=_sortkey_singlefi())
res = [(_format_timestamp(obj.mtime), obj.filename or "", obj.filesize) for obj in new_files_sorted]
idifference.table(res)
idifference.h2("Deleted files:")
deleted_files_sorted = sorted(deleted_files, key=_sortkey_singlefi())
res = [(
obj.original_fileobject.mtime,
obj.original_fileobject.filename or "",
obj.original_fileobject.filesize
) for obj in deleted_files_sorted]
idifference.table(res)
def _sortkey_renames():
def _key_by_path(fi):
return (
fi.original_fileobject.filename or "",
fi.filename or "",
str(fi.mtime) or "",
str(fi.original_fileobject.mtime) or ""
)
def _key_by_times(fi):
return (
str(fi.mtime) or "n/a",
str(fi.ctime) or "n/a",
str(fi.atime) or "n/a",
str(fi.dtime) or "n/a",
str(fi.crtime) or "n/a",
fi.original_fileobject.filename or "",
fi.filename or ""
)
if sort_by == "path":
return _key_by_path
else: #Default: "times"
return _key_by_times
def _enumerated_changes(filelist):
res = []
for fi in filelist:
diffs_remaining = copy.deepcopy(fi.diffs)
if "filename" in diffs_remaining:
diffs_remaining -= {"filename"}
res.append(("Renamed", "", fi.original_fileobject.filename, "renamed to", fi.filename))
for timeattr in Objects.TimestampObject.timestamp_name_list:
if timeattr in diffs_remaining:
diffs_remaining -= {timeattr}
res.append((
fi.filename or "",
"%s changed, " % timeattr,
_format_timestamp(getattr(fi.original_fileobject, timeattr)),
"->",
_format_timestamp(getattr(fi, timeattr))
))
for diff in sorted(diffs_remaining):
diffs_remaining -= {diff}
res.append((
fi.filename or "",
"%s changed, " % diff,
getattr(fi.original_fileobject, diff) or ""
"->",
getattr(fi, diff) or "",
))
return res
idifference.h2("Renamed files:")
renamed_files_sorted = sorted(renamed_files, key=_sortkey_renames())
res = _enumerated_changes(renamed_files_sorted)
idifference.table(res, break_on_change=True)
idifference.h2("Files with modified contents:")
modified_files_sorted = sorted(modified_files, key=_sortkey_singlefi())
res = _enumerated_changes(modified_files_sorted)
idifference.table(res, break_on_change=True)
idifference.h2("Files with changed properties:")
changed_files_sorted = sorted(changed_files, key=_sortkey_singlefi())
res = _enumerated_changes(changed_files_sorted)
idifference.table(res, break_on_change=True)
if summary:
idifference.h2("Summary:")
summ_recs = [
("Prior image's file (file object) tally", str(obj_alloc_counters[0].fo_tally)),
(" Inode allocation", ""),
(" Allocated", str(obj_alloc_counters[0].fo_tally_alloc_inode)),
(" Unallocated", str(obj_alloc_counters[0].fo_tally_unalloc_inode)),
(" Unknown", str(obj_alloc_counters[0].fo_tally_nullalloc_inode)),
(" Name allocation", ""),
(" Allocated", str(obj_alloc_counters[0].fo_tally_alloc_name)),
(" Unallocated", str(obj_alloc_counters[0].fo_tally_unalloc_name)),
(" Unknown", str(obj_alloc_counters[0].fo_tally_nullalloc_name)),
(" Unallocated, unmatched", obj_alloc_counters[0].fo_unalloc_unmatch_tally),
("Prior image's file (inode) tally", str(obj_alloc_counters[0].inode_tally)),
("Current image's file (file object) tally", str(obj_alloc_counters[1].fo_tally)),
(" Inode allocation", ""),
(" Allocated", str(obj_alloc_counters[1].fo_tally_alloc_inode)),
(" Unallocated", str(obj_alloc_counters[1].fo_tally_unalloc_inode)),
(" Unknown", str(obj_alloc_counters[1].fo_tally_nullalloc_inode)),
(" Name allocation", ""),
(" Allocated", str(obj_alloc_counters[1].fo_tally_alloc_name)),
(" Unallocated", str(obj_alloc_counters[1].fo_tally_unalloc_name)),
(" Unknown", str(obj_alloc_counters[1].fo_tally_nullalloc_name)),
(" Unallocated, unmatched", obj_alloc_counters[1].fo_unalloc_unmatch_tally),
("Current image's file (inode) tally", str(obj_alloc_counters[1].inode_tally)),
("Matched files", str(matched_files_tally)),
("", ""),
("New files", str(len(new_files))),
("Deleted files", str(len(deleted_files))),
(" Unmatched", str(len(deleted_files_unmatched))),
(" Matched", str(len(deleted_files_matched))),
("Renamed files", str(len(renamed_files))),
(" Directories", str(len(renamed_files_directory))),
(" Regular files", str(len(renamed_files_regular))),
(" Other", str(len(renamed_files_other))),
(" Type changed", str(len(renamed_files_type_changed))),
]
for key in sorted(renamed_files_type_changes.keys()):
summ_recs.append((" %s -> %s" % key, str(renamed_files_type_changes[key])))
summ_recs += [
(" Content matches", str(len(renamed_files_content_matches))),
("Files with modified content", str(len(modified_files))),
("Files with changed file properties", str(len(changed_files)))
]
idifference.table(summ_recs)
def main():
global args
dfxmlobject = Objects.parse(args.infile)
report(dfxmlobject, sort_by=args.sort_by, summary=args.summary)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action="store_true")
parser.add_argument("--sort-by", help="Sorts file lists. Pass one of these arguments: \"times\" or \"path\".")
parser.add_argument("--summary",help="output summary statistics of file system changes",action="store_true", default=False)
parser.add_argument("infile", help="A differential DFXML file. Should include the optional 'delta:matched' attributes for counts to work correctly.")
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
if not args.infile.endswith("xml"):
raise Exception("Input file should be a DFXML file, and should end with 'xml': %r." % args.infile)
if not os.path.exists(args.infile):
raise Exception("Input file does not exist: %r." % args.infile)
main()
```
#### File: dfxml/tests/ByteRuns_test.py
```python
__version__ = "0.1.1"
import os
import sys
import copy
import dfxml.objects as Objects
def test_all():
br0 = Objects.ByteRun()
br0.img_offset = 0
br0.len = 20
br1 = Objects.ByteRun()
br1.img_offset = 20
br1.len = 30
br2 = Objects.ByteRun()
br2.img_offset = 50
br2.len = 20
brs_contiguous = Objects.ByteRuns()
brs_contiguous.append(br0)
brs_contiguous.append(br1)
brs_contiguous.append(br2)
brs_glommed = Objects.ByteRuns()
brs_glommed.glom(br0)
brs_glommed.glom(br1)
brs_glommed.glom(br2)
brs_discontig = Objects.ByteRuns()
brs_discontig.glom(br0)
brs_discontig.glom(br2)
brs_backward = Objects.ByteRuns()
brs_backward.glom(br1)
brs_backward.glom(br0)
assert len(brs_contiguous) == 3
assert len(brs_glommed) == 1
assert len(brs_discontig) == 2
assert len(brs_backward) == 2
assert brs_glommed[0].len == 70
assert brs_backward[0].len == 30
assert brs_backward[1].len == 20
br_facet_data = Objects.ByteRuns(facet="data")
br_facet_name = Objects.ByteRuns(facet="name")
br_facet_default = Objects.ByteRuns()
assert br_facet_data == br_facet_default
assert br_facet_name != br_facet_data
assert br_facet_name != br_facet_default
```
#### File: dfxml/tests/diffing_FileObject_test.py
```python
__version__ = "0.1.1"
import sys
import logging
import os
import dfxml.objects as Objects
def test_all():
logging.basicConfig(level=logging.DEBUG)
_logger = logging.getLogger(os.path.basename(__file__))
f0 = Objects.FileObject()
fo = Objects.FileObject()
pfo = Objects.FileObject()
pfo.inode = 234
f0.parent_object = pfo
f0.filename = "test file"
f0.error = "Neither a real file, nor real error"
f0.partition = 2
f0.id = 235
f0.name_type = "r"
f0.filesize = 1234
f0.unalloc = 0
f0.unused = 0
f0.orphan = 0
f0.compressed = 1
f0.inode = 6543
f0.libmagic = "data"
f0.meta_type = 8
f0.mode = 755
f0.nlink = 1
f0.uid = "S-1-234-etc"
f0.gid = "S-2-234-etc"
f0.mtime = "1999-12-31T12:34:56Z"
f0.ctime = "1998-12-31T12:34:56Z"
f0.atime = "1997-12-31T12:34:56Z"
f0.crtime = "1996-12-31T12:34:56Z"
f0.seq = 3
f0.dtime = "1995-12-31T12:34:56Z"
f0.bkup_time = "1994-12-31T12:34:56Z"
f0.link_target = "Nonexistent file"
f0.libmagic = "Some kind of compressed"
f0.md5 = "db72d20e83d0ae39771403bc4cdde040"
f0.sha1 = "866e1f426b2380aaf74a091aa0f39f62ae8a2de7"
f0.sha256 = "4bc5996997ab9196b2d998b05ef302ed1dc167d74ec881533ee35008b5168630"
f0.sha384 = "2ec378692eeae4b855f58832664f95bb85411caac8dcebe7cd3916e915559d3f0ccda688a1fad1e3f47801fe15298ac0"
#fo.brs = brs #TODO
_logger.debug("f0 = %r" % f0)
_logger.debug("f0.to_dfxml() = %r" % f0.to_dfxml())
e0 = f0.to_Element()
_logger.debug("e0 = %r" % e0)
#f1 = eval(repr(f0)) #TODO The recursive evals cause namespace confusion (Objects.foo); replace the next two lines when that's settled.
f1 = Objects.FileObject()
f1.populate_from_Element(e0)
f2 = Objects.FileObject()
f2.populate_from_Element(e0)
#The id property should not be included in the comparisons
f1.id = 111
f1.alloc = False
f2.mtime = "2999-12-31T12:34:56Z"
f2.md5 = "593c8fe4a2236f3eeba7f4577b663876"
f2.sha1 = "0c0c20c03bdb8913da8ea120bd59ba5f596deceb"
f2.sha256 = "4f6dcb46e0f7b0ad748d083f6e92d7df586d0298a94acc3795287ff156614540"
f2.sha384 = "2af87ca47d01989009caf3927a84be215528a53629dd935a828921ac0a4b22202bcba20d38fdd16d719b8c4241fcdacb"
_logger.debug("f1 = %r" % f1)
d01 = f0.compare_to_other(f1)
_logger.debug("d01 = %r" % d01)
assert d01 == set(["alloc"]) or d01 == set(["alloc", "unalloc"])
d02 = f0.compare_to_other(f2)
_logger.debug("d02 = %r" % d02)
assert d02 == set(["mtime", "md5", "sha1", "sha256", "sha384"])
f2.original_fileobject = f0
f2.compare_to_original()
_logger.debug("f2.diffs = %r" % f2.diffs)
assert f2.diffs == d02
#TODO include byte_runs
if __name__=="__main__":
test_all()
```
#### File: dfxml/tests/FileObject_externals_test.py
```python
__version__ = "0.1.1"
import sys
import logging
import os
import xml.etree.ElementTree as ET
import dfxml.objects as Objects
def test_all():
_logger = logging.getLogger(os.path.basename(__file__))
logging.basicConfig(level=logging.DEBUG)
XMLNS_TEST_CLAMSCAN = "file:///opt/local/bin/clamscan"
XMLNS_TEST_UNREGGED = "file:///dev/random"
ET.register_namespace("clam", XMLNS_TEST_CLAMSCAN)
fi = Objects.FileObject()
fi.filename = "clamscanned"
#Try and fail to add a non-Element to the list.
failed = None
_logger.debug("Before: " + repr(fi.externals))
try:
fi.externals.append(1)
failed = False
except TypeError:
failed = True
except:
failed = True
raise
_logger.debug("After: " + repr(fi.externals))
assert failed
failed = None
#Dummy up a non-DFXML namespace element. This should be appendable.
e = ET.Element("{%s}scan_results" % XMLNS_TEST_CLAMSCAN)
e.text = "Clean"
fi.externals.append(e)
#Dummy up a DFXML namespace element. This should not be appendable (the schema specifies other namespaces).
e = ET.Element("{%s}filename" % Objects.dfxml.XMLNS_DFXML)
e.text = "Superfluous name"
_logger.debug("Before: " + repr(fi.externals))
try:
fi.externals.append(e)
failed = False
except ValueError:
failed = True
except:
failed = True
raise
_logger.debug("After: " + repr(fi.externals))
assert failed
failed = None
#Add an element with the colon prefix style
e = ET.Element("clam:version")
e.text = "20140101"
fi.externals.append(e)
#Add an element that doesn't have an ET-registered namespace prefix.
e = ET.Element("{%s}test2" % XMLNS_TEST_UNREGGED)
e.text = "yes"
fi.externals.append(e)
#Test serialization
s = Objects._ET_tostring(fi.to_Element()) #TODO Maybe this should be more than an internal function.
_logger.debug(s)
if s.find("scan_results") == -1:
raise ValueError("Serialization did not output other-namespace element 'scan_results'.")
if s.find("clam:version") == -1:
raise ValueError("Serialization did not output prefixed element 'clam:version'.")
if s.find("test2") == -1:
raise ValueError("Serialization did not output unregistered-prefix element 'test2'.")
#Test de-serialization
fir = Objects.FileObject()
x = ET.XML(s)
fir.populate_from_Element(x)
_logger.debug("De-serialized: %r." % fir.externals)
assert len(fir.externals) == 3
if __name__=="__main__":
test_all()
```
#### File: dfxml_python/tests/test_reads.py
```python
import os
import pytest
import dfxml
import dfxml.objects
def nop(x):
pass
@pytest.fixture
def top_srcdir() -> str:
srcdir = os.path.dirname(__file__)
retval = os.path.join(srcdir, "..")
assert os.path.isdir(os.path.join(retval, "samples")), "Hard-coded expected path not found, '${top_srcdir}/samples/'."
return retval
@pytest.fixture
def difference_test_0_filepath(top_srcdir) -> str:
retval = os.path.join(top_srcdir, "samples", "difference_test_0.xml")
assert os.path.exists(retval), "Hard-coded path to file did not find expected file, '${top_srcdir}/samples/difference_test_0.xml'."
return retval
def test_read_dfxml(difference_test_0_filepath):
"""
This test confirms that the DFXML pip-managed packaging exposes the dfxml package and the objects.py module.
"""
with open(difference_test_0_filepath, "rb") as fh:
dfxml.read_dfxml(fh, callback=nop)
def test_objects_iterparse(difference_test_0_filepath):
"""
This test confirms that the DFXML pip-managed packaging exposes the dfxml package's objects.py module.
"""
for (event, obj) in dfxml.objects.iterparse(difference_test_0_filepath):
pass
```
|
{
"source": "jgrugru/encrypto-env",
"score": 3
}
|
#### File: encrypto-env/encryptoenv/CLI.py
```python
import argparse
from os import getcwd
from .EnvFile import EnvFile
cli_version = '0.0.1-beta'
class CLI():
"""
Class that filters through the command line options.
The run_script function is the lead function, calling
functions for each specified option.
"""
def __init__(self, args):
self.args = self.parse_args(args)
self.environment_path = getcwd() + '/env/'
self.env_filename = ".env"
self.pem_filename = "my_key.pem"
def parse_args(self, args):
self.my_parser = argparse.ArgumentParser(
prog='encrypto-env',
usage='%(prog)s [options] path',
description="Encrypt the contents of your .env file \
with an RSA key.",
epilog='Please let me know of any improvements \
that could be made. |\
email: <EMAIL> | github: @jgrugru',
fromfile_prefix_chars='@')
self.my_group = self.my_parser.add_mutually_exclusive_group(
required=False)
self.my_parser.version = cli_version
self.add_arguments()
return self.my_parser.parse_args(args)
def add_arguments(self):
self.my_parser.add_argument(
"-p",
"--pem-file",
metavar="pem_filepath",
type=str,
help="The pem filepath relative to the environment path folder")
self.my_parser.add_argument(
'--environment-path',
metavar="env_path",
type=str,
help="Default is 'env' dir. Default dir for RSA key and .env")
self.my_parser.add_argument(
'-a',
'--add-variable',
action='store',
metavar="var",
type=str,
nargs='+',
help="Add variables to the .env file")
self.my_parser.add_argument(
'--clear',
action='store_true',
help="Clear the contents of the .env file")
self.my_parser.add_argument(
'--dot-env-file',
metavar="dot_env_file",
action="store",
help="The .env filepath relative to the \
environment path folder")
self.my_parser.add_argument(
'-v',
'--verbose',
action='store_true',
help="Verbose ouptut")
self.my_parser.add_argument(
'--version',
action="version")
self.my_group.add_argument(
'-E',
'--Encrypt',
action='store_true',
help="Encrypt .env file")
self.my_group.add_argument(
'-D',
'--Decrypt',
action='store_true',
help='Decrypt .env file')
self.my_group.add_argument(
'--no-key',
action='store_true',
help="Disables creation of my_key.pem file")
self.my_group.add_argument(
'-l',
'--list-variables',
action='store_true',
help="List the variable names stored in the .env file"
)
def get_env_file(self):
return self.env_file
def environment_path_option(self):
if self.args.environment_path:
self.environment_path = self.args.environment_path
def dot_env_file_option(self):
if self.args.dot_env_file:
self.env_filename = self.args.dot_env_file
def pem_file_option(self):
if self.args.pem_file:
self.pem_filename = self.args.pem_file
def create_env_file(self):
self.env_file = EnvFile(environment_path=self.environment_path,
filename=self.env_filename,
pem_filename=self.pem_filename,
no_key=self.args.no_key)
def clear_option(self):
if(self.args.clear):
self.env_file.clear_file()
def add_variable_option(self):
if self.args.add_variable:
if self.env_file.is_decryptable():
self.env_file.add_variables_as_bytes(self.args.add_variable)
else:
self.env_file.add_variables_as_text(self.args.add_variable)
def print_variable_names_from_str(self, string):
for count, variable in enumerate(
string.split("\n")):
if count != 0:
print("\n")
if variable != "":
print(self.env_file.split_str_by_equalsign(variable)[0])
def list_variable_option(self):
if self.args.list_variables:
if self.env_file.is_decryptable():
print(self.env_file.get_decrypted_lines_as_list())
else:
print(self.env_file.get_lines_as_list_from_text_file())
def run_script(self):
# Print the variables with verbose mode.
if self.args.verbose:
print(vars(self.args))
self.environment_path_option()
self.dot_env_file_option()
self.pem_file_option()
# --pem_file
# --dot-env-file
# --environment-path
# --no-key
# creates pem_file if it doesn't exist
self.create_env_file()
# --clear
self.clear_option()
# --add-variable
self.add_variable_option()
# -E
if self.args.Encrypt and not self.env_file.is_decryptable():
self.env_file.encrypt()
# -l
self.list_variable_option()
# -D
if self.args.Decrypt and self.env_file.is_decryptable():
self.env_file.decrypt()
```
|
{
"source": "jgrugru/file-flamingo",
"score": 4
}
|
#### File: file-flamingo/fileflamingo/EncryptionFile.py
```python
from .TextFile import TextFile
from .ByteFile import ByteFile
from .Encryptor import Encryptor
class EncryptionFile(TextFile, ByteFile):
"""
EncryptionFile class inherits from TextFile and ByteFile.
EncryptionFile allows you to encrypt and decrypt the contents
of the file. Constructor requires a filepath to an RSA key.
The RSA key is passed to the Encryptor class which does the
encryption and decryption. Due to the size of the RSA key
generated in RSAFile.py, the max character count to be
encrypted cannot be greater than 240, so encryption is
done line by line.
"""
def __init__(self, filepath, rsa_filepath):
super().__init__(str(filepath))
self.rsa_filepath = str(rsa_filepath)
self.encryptor = Encryptor(self.rsa_filepath)
def encrypt(self):
"""
Checks that the file is_encryptable,
then encrypts the contents through the Encryptor
class and writes the bytes to the file.
"""
if self.is_encryptable():
encrypted_file_lines = self.encrypt_text_lines()
self.clear_file()
self.map_file_lines(encrypted_file_lines, self.append_byte_line_to_file)
self.is_encrypted = True
else:
print(
self.get_filepath()
+ " does not exist or is \
not encryptable."
)
def decrypt(self):
"""
Checks if the file is decryptable,
then utilizes the Encryptor class to decrypt
the data and write the text to the file.
"""
if self.is_decryptable():
decrypted_file_lines = self.decrypt_byte_lines()
self.clear_file()
self.map_file_lines(decrypted_file_lines, self.append_text_line_to_file)
self.write_text_to_file(self.get_contents_of_file().strip())
self.is_encrypted = False
else:
print(
self.get_filepath()
+ " does not exist or is \
not encryptable."
)
def encrypt_text_lines(self):
"""
Grabs the file lines from the text file as a list.
Then encrypts each line in the list and returns
the encrypted file lines in a new list.
"""
file_lines = self.get_text_lines_as_list()
encrypted_file_lines = self.map_file_lines(file_lines, self.encrypt_line)
return encrypted_file_lines
def decrypt_byte_lines(self):
"""
Grabs the byte lines from the byte file as a list.
Then decrypts each line in the list and returns
the decrypted file lines in a new list.
"""
file_lines = self.get_byte_lines_as_list()
decrypted_file_lines = self.map_file_lines(file_lines, self.decrypt_line)
decrypted_file_lines.remove(None)
return decrypted_file_lines
def encrypt_line(self, line):
"""
Before returning the encrypted line, the line
is stripped to remove any whitespace.
"""
clean_line = line.strip()
return self.encryptor.encrypt_data(clean_line)
def decrypt_line(self, line):
"""
Returns the decrypted line if the line != b''.
If line == b'', the function returns None.
This is accomplished through the boolean
expression len(line) == 0.
"""
if len(line):
return self.encryptor.decrypt_data(line)
else:
return None
```
|
{
"source": "jgrugru/ifcollector",
"score": 3
}
|
#### File: ifcollector/tests/test_ifcollector.py
```python
from pytest import mark, raises
from re import search
from ifcollector import ifandstatement, iforstatement, CannotEvaluateExpression
def matches_email_regex(value):
match_object = search(r"^(\w|\.|\_|\-)+[@](\w|\_|\-|\.)+[.]\w{2,3}$", value)
return bool(match_object)
is_valid_test_str = [
str.isalnum,
"len(value) > 5",
"value == 'Testing'",
lambda value: value == "Testing",
]
is_valid_gmail = [
"len(value) > 5",
"'@' in value",
matches_email_regex,
"'gmail.com' in value",
lambda value: bool(search(r"^(\w|\.|\_|\-)+[@](\w|\_|\-|\.)+[.]\w{2,3}$", value)),
]
@mark.parametrize(
"value, ifstatement, expression_list, expected_result",
[
("Test String", ifandstatement, is_valid_test_str, False),
("Test ", ifandstatement, is_valid_test_str, False),
("Testing", ifandstatement, is_valid_test_str, True),
("Testing1", ifandstatement, is_valid_test_str, False),
("Test String", iforstatement, is_valid_test_str, True),
("Test ", iforstatement, is_valid_test_str, False),
("Testing", iforstatement, is_valid_test_str, True),
("Testing1", iforstatement, is_valid_test_str, True),
("<EMAIL>", ifandstatement, is_valid_gmail, True),
("<EMAIL>", ifandstatement, is_valid_gmail, False),
("@gmail.com", ifandstatement, is_valid_gmail, False),
(" @gmail.com", ifandstatement, is_valid_gmail, False),
],
)
def test_ifstatements(value, ifstatement, expression_list, expected_result):
assert ifstatement(value, *expression_list, debug=True) == expected_result
def test_CannotEvaluateExpression():
with raises(CannotEvaluateExpression):
ifandstatement("Test String", lambda x, y: print("I am the lambda"), debug=True)
```
|
{
"source": "jgrugru/ticket_tracking_system",
"score": 2
}
|
#### File: grewtix/tickets/tests.py
```python
from django.test import TestCase, Client
from .models import Ticket, TicketType, Project, Comment
from django.contrib.auth.models import User
from django.contrib import auth
from model_bakery import baker
from django.urls import reverse
from datetime import timedelta
from django.utils import timezone
user_test_password = '<PASSWORD>'
##############################
#Create test case for:
#create ticket from POST request
#edit from EDIT request
#delete from request
#########################
def create_user(username):
user = User.objects.create(username=username)
user.set_password(user_test_password)
user.save()
return user
def create_ticket(ticket_subject, owner, creator):
x = baker.make('Ticket')
x.owner = owner
x.creator = creator
x.subject = ticket_subject
x.save()
return x
def login(user):
pass
def create_comment_attached_to_ticket(ticket):
comment = baker.make("Comment")
comment.ticketID = ticket
comment.save()
return comment
def update_tickets_creation_date(ticket, days):
ticket.created_at = timezone.now() - timedelta(days=days)
ticket.save()
return ticket
def output_response(response):
obj = response
for attr in dir(obj):
print("obj.%s = %r" % (attr, getattr(obj, attr)))
class test_login(TestCase):
def test_login(self):
client = Client()
user = create_user('testuser')
self.assertTrue(client.login(username='testuser', password=<PASSWORD>))
class test_features(TestCase):
def test_index_response_code(self):
response = self.client.get(reverse('tickets:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Unassigned\n Queue")
def test_tickets_age(self):
ticket = baker.make("Ticket")
ticket.save()
response = self.client.get(reverse('tickets:all_ticket_queue'))
self.assertContains(response, "today.") #test that the ticket was created today.
update_tickets_creation_date(ticket, 1)
response = self.client.get(reverse('tickets:all_ticket_queue'))
self.assertContains(response, "day ago.") #test that the ticket was created 1 day ago.
update_tickets_creation_date(ticket, 2)
response = self.client.get(reverse('tickets:all_ticket_queue'))
self.assertContains(response, "days ago.") #test that the ticket was created 2 days ago.
update_tickets_creation_date(ticket, -1)
response = self.client.get(reverse('tickets:all_ticket_queue'))
self.assertContains(response, """<ul class="list-group">""") #test that the ticket was created 1 day in the future. Expected result is no tickets,
#so check that the list group is created but empty.
def test_assign_button(self):
owner = create_user('testOwner')
creator = create_user('testCreator')
x = create_ticket('test ticket', creator, creator)
self.client.login(username='testOwner', password=<PASSWORD>) #Login as testowner
response = self.client.get(reverse('tickets:owned_by_user_queue'))
self.assertContains(response, 'No tickets are available')
response = self.client.get(reverse('tickets:assign', kwargs={'ticket_id': x.id})) #Check that the assign/ticket.id redirects to the edit page
self.assertEqual(response.status_code, 302)
response = self.client.get(reverse('tickets:owned_by_user_queue')) #check that the assign user worked correctly
self.assertContains(response, 'testOwner')
def test_regex_edit_ticket_path(self): #test that the reged path for tickets (ex. '\edit\prod-21')
response = self.client.get('/edit/test-1234') #expected output should be a redirect to 'edit\21' if the ticket
self.assertContains(response, "Could not find the ticket you are looking for.") #exists.
ticket = baker.make("Ticket")
ticket.save()
response = self.client.get('/edit/' + str(ticket))
self.assertEqual(response.status_code, 302)
class test_ticket_model(TestCase):
def test_get_comments(self):
ticket = baker.make("Ticket")
ticket.save()
self.assertIs(ticket.get_comments(), None) #Test without a comment, expected result is None
comment = create_comment_attached_to_ticket(ticket)
self.assertTrue(comment in ticket.get_comments()) #Test with a comment, expected result is comment to be returned in queryset
response = Client().get(reverse('tickets:edit', kwargs={'pk': ticket.id}))
self.assertContains(response, comment.comment) #Test that the comment appears in the ticket edit page.
def test_post_ticket(self):
self.client = Client()
self.client.post('/create/', data = {
"ticketType:"
})
class test_ticket_querysets_view(TestCase):
def __init__(self, x):
super().__init__(x)
self.client = Client()
def setup_with_ticket(self):
owner = create_user('testOwner')
creator = create_user('testCreator')
x = create_ticket('test ticket', owner, creator)
self.client.login(username='testOwner', password=<PASSWORD>)
def setup_without_ticket(self):
owner = create_user('testOwner')
self.client.login(username='testOwner', password=<PASSWORD>)
def test_owned_by_user_queue_with_ticket(self):
self.setup_with_ticket()
response = self.client.get(reverse('tickets:owned_by_user_queue'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'testOwner')
def test_owned_by_user_queue_without_ticket(self):
self.setup_without_ticket()
response = self.client.get(reverse('tickets:owned_by_user_queue'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'No tickets are available')
def test_created_by_user_queue_with_ticket(self):
self.setup_with_ticket()
response = self.client.get(reverse('tickets:created_by_user_queue'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'testOwner')
def test_created_by_user_queue_without_ticket(self):
self.setup_without_ticket()
response = self.client.get(reverse('tickets:created_by_user_queue'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'No tickets are available')
def test_unassigned_queue_with_ticket(self):
self.setup_with_ticket()
response = self.client.get(reverse('tickets:unassigned_queue'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'testOwner')
def test_unassigned_queue_without_ticket(self):
self.setup_without_ticket()
response = self.client.get(reverse('tickets:unassigned_queue'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'No tickets are available')
def test_recently_created_queue_with_ticket(self):
self.setup_with_ticket()
response = self.client.get(reverse('tickets:recently_created_queue'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'testOwner')
def test_recently_created_queue_without_ticket(self):
self.setup_without_ticket()
response = self.client.get(reverse('tickets:recently_created_queue'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'No tickets are available')
def test_all_ticket_queue_with_ticket(self):
self.setup_with_ticket()
response = self.client.get(reverse('tickets:all_ticket_queue'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'testOwner')
def test_all_ticket_queue_without_ticket(self):
self.setup_without_ticket()
response = self.client.get(reverse('tickets:all_ticket_queue'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'No tickets are available')
# obj = response
# for attr in dir(obj):
# print("obj.%s = %r" % (attr, getattr(obj, attr)))
```
#### File: management/commands/populate_tickets.py
```python
from django.core.management.base import BaseCommand
from model_bakery import baker
from tickets.models import Ticket, TicketType, Project # noqa: F401
from django.contrib.auth.models import User
class Command(BaseCommand):
help = 'Pass an integer, how many mock tickets you want created'
def add_arguments(self, parser):
parser.add_argument('populate', type=int)
def handle(self, *args, **options):
if options['populate']:
for count in range(int(options['populate'])):
ticket = baker.make(
'Ticket',
ticketType=TicketType.objects.all()[0],
project=Project.objects.all()[0],
description="THIS IS A TEST.",
priority='Low',
status="Backlog",
creator=User.objects.filter(username="jgruenbaum")[0],
)
print("Created: " + str(ticket))
ticket.save()
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.