metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jharmison-redhat/osdk-manager",
"score": 2
} |
#### File: src/osdk_manager/exceptions.py
```python
class ContainerRuntimeException(Exception):
"""Unable to identify a container runtime in your current environment."""
pass
class ShellRuntimeException(RuntimeError):
"""Shell command returned non-zero return code.
Attributes:
code -- the return code from the shell command
"""
def __init__(self, code: int = None):
"""Save the code with the exception."""
self.code = code
```
#### File: src/osdk_manager/util.py
```python
import gnupg
import logging
import logging.handlers
import os
import shlex
import subprocess
from io import BytesIO
from pathlib import Path
from typing import List, Iterable
from osdk_manager.exceptions import (
ContainerRuntimeException,
ShellRuntimeException
)
def get_logger(verbosity: int = None):
"""Create a logger, or return an existing one with specified verbosity."""
logger = logging.getLogger('osdk-manager')
logger.setLevel(logging.DEBUG)
if len(logger.handlers) == 0:
_format = '{asctime} {name} [{levelname:^9s}]: {message}'
formatter = logging.Formatter(_format, style='{')
stderr = logging.StreamHandler()
stderr.setFormatter(formatter)
if verbosity is not None:
stderr.setLevel(40 - (min(3, verbosity) * 10))
else:
stderr.setLevel(40)
logger.addHandler(stderr)
if os.path.exists('/dev/log'):
syslog = logging.handlers.SysLogHandler(address='/dev/log')
syslog.setFormatter(formatter)
syslog.setLevel(logging.INFO)
logger.addHandler(syslog)
else:
if verbosity is not None and verbosity != 0:
stderr = logger.handlers[0]
stderr.setLevel(40 - (min(3, verbosity) * 10))
return logger
def _utf8ify(line_bytes: List[bytes] = None) -> str:
"""Decode line_bytes as utf-8 and strips excess whitespace."""
return line_bytes.decode("utf-8").rstrip()
def shell(cmd: str = None, fail: bool = True) -> Iterable[str]:
"""Run a command in a subprocess, yielding lines of output from it.
By default will cause a failure using the return code of the command. To
change this behavior, pass fail=False.
"""
logger = get_logger()
logger.debug("Running: {}".format(cmd))
proc = subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
for line in map(_utf8ify, iter(proc.stdout.readline, b'')):
logger.debug("Line: {}".format(line))
yield line
ret = proc.wait()
if fail and ret != 0:
logger.error("Command errored: {}".format(cmd))
raise ShellRuntimeException(ret)
elif ret != 0:
logger.warning("Command returned {}: {}".format(ret, cmd))
def determine_runtime() -> str: # pragma: no cover
"""Determine the container runtime installed on the system."""
try:
[line for line in shell("docker --help", fail=False)]
return "docker"
except FileNotFoundError:
pass
try:
[line for line in shell("podman --help", fail=False)]
return "podman"
except FileNotFoundError:
raise ContainerRuntimeException
class GpgTrust(object):
"""Handles GPG key trust and signature validation."""
def __init__(self, key_server: str = 'keys.gnupg.net') -> None:
"""Initialize a GPG Trust database object."""
self.logger = get_logger()
self.gnupghome = os.path.expanduser('~/.gnupg')
self.key_server = key_server
self.logger.debug(f'Creating {self.gnupghome}')
os.makedirs(self.gnupghome, mode=0o700, exist_ok=True)
self.gpg = gnupg.GPG(gnupghome=self.gnupghome)
def trust(self, key_id: str = None) -> bool:
"""Trust a GPG public key."""
try:
self.logger.debug(f'Importing key {key_id} from {self.key_server}')
self.gpg.recv_keys(self.key_server, key_id)
except FileNotFoundError: # pragma: no cover
self.logger.error('Unable to receive keys!')
raise
return True
def verify(self, target: Path = None, signature: bytes = None) -> bool:
"""Validate a signature using the trust database."""
self.logger.debug((f'Validating {target} signature'))
signature = BytesIO(signature)
if self.gpg.verify_file(signature, target):
self.logger.debug(f'{target} verified.')
return True
else:
raise RuntimeError(f'{target} failed verification.')
```
#### File: osdk-manager/tests/conftest.py
```python
import logging
import os
import pytest
import shutil
import tempfile
import yaml
from osdk_manager.util import shell
from osdk_manager.exceptions import ShellRuntimeException
logger = logging.getLogger()
settings_1 = {
"domain": "io",
"group": "operators",
"kinds": [
"PytestResource"
],
"api_version": "v1alpha1",
"image": "harbor.jharmison.com/osdk-testing/pytest-operator",
"version": "0.0.1",
"channels": [
"alpha"
],
"default_sample": "operators_v1alpha1_pytestresource.yaml",
}
settings_2 = {
"domain": "io",
"group": "operators",
"kinds": [
"PytestResourceTwo"
],
"api_version": "v1beta1",
"image": "quay.io/jharmison/pytest-operator-two",
"version": "0.0.1",
"channels": [
"alpha"
],
"default_sample": "operators_v1beta1_pytestresourcetwo.yaml",
}
@pytest.fixture()
def tmp_path():
"""Return a simple dictionary for temporary directories."""
return {"path": "/tmp"}
@pytest.fixture()
def installed_opm(request):
"""Update the Operator Package Manager and return the version.
The request.param is used to specify the version to request. If specified
as "latest", it will attempt to identify the latest version from the GitHub
API.
"""
import osdk_manager.opm.update as opm_update
opm_update._called_from_test = True
return opm_update.opm_update(directory="/tmp", path="/tmp",
version=request.param)
@pytest.fixture()
def new_folder():
"""Create a new temp folder, cleaning it up after the test."""
good_name = False
while not good_name:
folder = tempfile.mkdtemp()
if '_' in folder:
logger.debug("removing bad generated tmpdir")
shutil.rmtree(folder)
else:
logger.debug("good tmpdir")
good_name = True
yield folder
logger.debug("cleaning up tmpdir")
shutil.rmtree(folder)
def operator_settings_file(settings: dict = {}) -> str:
"""Yield the path to a file with settings saved as YAML."""
operator_file = tempfile.mkstemp()[1]
with open(operator_file, "w") as f:
yaml.safe_dump(settings, f)
return operator_file
@pytest.fixture()
def operator_settings_1():
"""Return a dictionary of some basic operator settings."""
return settings_1
@pytest.fixture()
def operator_settings_file_1():
"""Yield the path to a file with operator_settings_1 saved in it."""
settings = {k.replace('_', '-'): v for k, v in settings_1.items()}
operator_file = operator_settings_file(settings)
yield operator_file
os.remove(operator_file)
@pytest.fixture()
def operator_settings_2():
"""Return a dictionary of some basic operator settings."""
return settings_2
@pytest.fixture()
def operator_settings_file_2():
"""Yield the path to a file with operator_settings_1 saved in it."""
settings = {k.replace('_', '-'): v for k, v in settings_2.items()}
operator_file = operator_settings_file(settings)
yield operator_file
os.remove(operator_file)
@pytest.fixture()
def minikube_profile():
"""Identify a running minikube instance and return its profile name.
Returns None if the minikube or kubectl binaries aren't in $PATH, or if the
cluster is not up and running.
"""
try:
''.join(shell("which minikube"))
except ShellRuntimeException:
logger.warning("no minikube")
return None # we need minikube
try:
''.join(shell("which kubectl"))
except ShellRuntimeException:
logger.warning("no kubectl")
return None # we need kubectl
try:
''.join(shell("minikube status"))
except ShellRuntimeException:
logger.warning("no cluster up")
return None # we need a running cluster
logger.info("returning minikube profile")
return ''.join(shell("minikube profile"))
```
#### File: osdk-manager/tests/test_cli_osdk.py
```python
import os
import shlex
from click.testing import CliRunner
from osdk_manager.cli import cli
from osdk_manager.util import get_logger
import osdk_manager.osdk.update as osdk_update
osdk_update._called_from_test = True
def test_osdk_update():
"""Test a basic invocation of osdk-manager update."""
runner = CliRunner()
args = shlex.split('osdk update --path=/tmp')
result = runner.invoke(cli, args)
assert result.exit_code == 0
assert 'operator-sdk version' in result.output
assert 'is available at /tmp/operator-sdk' in result.output
def test_osdk_version_update():
"""Test a version-pinned invocation of osdk-manager update."""
runner = CliRunner()
args = shlex.split(
'osdk update --path=/tmp --version=1.3.1'
)
result = runner.invoke(cli, args)
assert result.exit_code == 0
assert 'operator-sdk version 1.3.1 is available at /tmp/operator-sdk' in \
result.output
def test_osdk_update_verbosity():
"""Test the osdk-manager update command verbosity flag."""
runner = CliRunner()
args = shlex.split('osdk update --path=/tmp -vvv')
logger = get_logger()
logger.handlers.clear()
result = runner.invoke(cli, args)
assert result.exit_code == 0
assert int(logger.handlers[0].level) == 10
def test_osdk_verbosity_update():
"""Test the osdk-manager verbosity with an update afterwards."""
runner = CliRunner()
args = shlex.split('-vvv osdk update --path=/tmp')
logger = get_logger()
logger.handlers.clear()
result = runner.invoke(cli, args)
assert result.exit_code == 0
assert int(logger.handlers[0].level) == 10
def test_osdk_update_path():
"""Test the osdk-manager difference between using a value in PATH."""
PATH = os.getenv('PATH')
os.environ['PATH'] = ':'.join([os.path.expanduser('~/.local/bin'), PATH])
runner = CliRunner()
args = shlex.split('osdk update --version=1.3.1')
result = runner.invoke(cli, args)
assert 'is in your path' in result.output
def test_osdk_no_validate():
"""Test the osdk-manager update without verification of signatures."""
runner = CliRunner()
args = shlex.split('osdk update --path=/tmp --no-verify')
result = runner.invoke(cli, args)
assert result.exit_code == 0
assert 'operator-sdk version' in result.output
assert 'is available at /tmp/operator-sdk' in result.output
```
#### File: osdk-manager/tests/test_opm_update.py
```python
import os
import os.path
import pytest
@pytest.mark.parametrize("installed_opm", ["latest", "1.14.2", "1.14.2"], # noqa: PT014,E501
indirect=True)
def test_update(installed_opm):
"""Test updates with both unspecified version and a pinned version."""
link_path = "/tmp/opm"
assert os.path.islink(link_path)
link_inode = os.stat(link_path)
bin_path = "/tmp/linux-amd64-opm-{}".format(
installed_opm
)
bin_inode = os.stat(bin_path)
assert link_inode == bin_inode
@pytest.mark.parametrize("installed_opm", ["1.14.3", "1.14.3"], # noqa: PT014
indirect=True)
def test_broken_link_update(installed_opm):
"""Test updates with successive installations missing a link."""
link_path = "/tmp/opm"
assert os.path.islink(link_path)
link_inode = os.stat(link_path)
bin_path = "/tmp/linux-amd64-opm-{}".format(
installed_opm
)
bin_inode = os.stat(bin_path)
assert link_inode == bin_inode
# Unlink the installation to test ability to reapply
os.remove(link_path)
@pytest.mark.parametrize("installed_opm", ["1.14.3", "1.14.3"], # noqa: PT014
indirect=True)
def test_dangling_link_update(installed_opm):
"""Test updates with successive installations missing a binary."""
link_path = "/tmp/opm"
assert os.path.islink(link_path)
link_inode = os.stat(link_path)
bin_path = "/tmp/linux-amd64-opm-{}".format(
installed_opm
)
bin_inode = os.stat(bin_path)
assert link_inode == bin_inode
# Remove the installation binary to test ability to reapply
os.remove(bin_path)
@pytest.mark.parametrize("installed_opm", ["1.14.3", "1.14.3"], # noqa: PT014
indirect=True)
def test_wrong_link_update(installed_opm):
"""Test updates with successive installations with the wrong link."""
link_path = "/tmp/opm"
assert os.path.islink(link_path)
link_inode = os.stat(link_path)
bin_path = "/tmp/linux-amd64-opm-{}".format(
installed_opm
)
bin_inode = os.stat(bin_path)
assert link_inode == bin_inode
# Mislink the installation to test ability to reapply
if os.path.islink(link_path):
os.remove(link_path)
os.symlink("/etc/passwd", link_path)
``` |
{
"source": "jharnath/PyBackup",
"score": 3
} |
#### File: jharnath/PyBackup/backupooall.py
```python
import sys
from emailconfig import EmailConfig
from job import Job
from backup import Backup, BackupFile, BackupDirectory
from backupcfg import job_msg, usage_msg, logfile, email_config, jobs
def main():
'''
Execute all backup jobs in job list.
'''
# interate through all jobs in jobs list
for job in jobs:
# Set job attributes
# determine the type of backup to perform based upon job type
backup = BackupFile() if job.is_file_job else BackupDirectory()
job.set_backup(backup)
job.set_logfile(logfile)
job.set_email_config(email_config)
# perform backup
if not job.errors:
job.do_backup()
# send errors as email
if job.errors:
pass # job.do_email()
# record result in logfile
job.do_logfile()
sys.exit(0)
if __name__ == '__main__':
main()
```
#### File: jharnath/PyBackup/emailconfig.py
```python
class EmailConfig(object):
'''
Individual Email configuration details.
'''
def __init__(self, *args):
'''
class EmailConfig constructor
Set class attributes to initial values.
Parameters:
args[0]: email address of recipient
args[1]: email address of user
args[2]: pwd
args[3]: server
args[4]: port
'''
self.recipient = args[0]
self.user = args[1]
self.pwd = args[2]
self.server = args[3]
self.port = args[4]
``` |
{
"source": "jharpster/WMTS",
"score": 2
} |
#### File: python_code/webapp/api.py
```python
import sys, os, json, re, bottle
from bottle import route, request, response
api = bottle.Bottle();
from tiny_tile_server import base
from tiny_tile_server import python_wmts
from tiny_tile_server import python_server
# Access to tiles in XYZ
@api.route('/tile/<layer>/<z>/<x>/<y>.<ext>')
def api_tile(layer, z, x, y, ext):
bottle.response.content_type = "image/%s" % ext
return base.get_tile(layer, x, y, z, ext, False)
# Access to grids
@api.route('/grid/<layer>/<z>/<x>/<y>.grid.json')
def api_grid(layer, z, x, y):
callback = request.query.callback or 'grid'
bottle.response.content_type = "application/json"
return base.get_grid(layer, x, y, z, callback)
# Access to metadata
@api.route('/metadata/<layer>/metadata.json')
def api_metadata(layer):
bottle.response.content_type = "application/json"
return base.get_metadata(layer)
# Access to metadata JSONP
@api.route('/metadata/<layer>/metadata.jsonp')
def api_metadata(layer):
bottle.response.content_type = "application/json"
return base.get_metadataJSONP(layer)
# Print metadata information
@api.route('/metadata_info/<layer>/metadata.json')
def api_def_metadata(layer):
bottle.response.content_type = "application/json"
return base.def_metadata(layer)
# Access to tiles in WMTS
@api.route('/tilewmts/<layer>/<z>/<x>/<y>.<ext>')
def api_tilewmts(layer, z, x, y, ext):
bottle.response.content_type = "image/%s" % ext
return base.get_tile(layer, x, y, z, ext, True)
# Access to getCapabilities for WMTS protocol
@api.get('/wmtstile/<layer>/<z>/<x>/<y>.<ext>')
def api_wmts(layer, z, x, y, ext):
return python_server.init_data(layer, x, y, z, ext)
``` |
{
"source": "jharrington22/aws-pricing",
"score": 2
} |
#### File: aws-pricing/aws_audit/all_pricing.py
```python
import boto3
import json
import pprint
import re
import sys
from .constants import (
region_short_names,
aws_region
)
from .connection import (
region,
session,
ec2,
pricing_client
)
class PricingInfo:
def __init__(self):
self.pricing = {}
self.price_item = []
self.volume_types = {
'gp2': 'General Purpose',
'io1': 'Provisioned IOPS',
'sc1': 'Cold HDD',
'st1': 'Throughput Optimized HDD',
'standard': 'Magnetic'
}
self.pricing_dict()
self.paginator_connection()
def pricing_dict(self):
for region in aws_region:
self.pricing[region] = {
'EC2': {},
'Snapshots': {},
'ELB': {},
'ELBV2': {},
'EBS': {}
}
def paginator_connection(self):
return pricing_client.get_paginator('get_products')
def onDemand_variables(self, terms, variable_type):
self.product_sku = list(terms[variable_type].keys())
self.pd = terms[variable_type][self.product_sku[0]]['priceDimensions']
self.product_price_sku = list(self.pd.keys())
self.price = self.pd[self.product_price_sku[0]]['pricePerUnit']['USD']
self.description = self.pd[self.product_price_sku[0]]['description']
def response_pages(self, price_list_type):
paginator = self.paginator_connection()
if price_list_type == 'EC2':
resp_pages = paginator.paginate(ServiceCode='AmazonEC2',
Filters=[
{'Type':'TERM_MATCH',
'Field':'preInstalledSw',
'Value':'NA'},
{'Type':'TERM_MATCH',
'Field':'operatingSystem',
'Value':'Linux'},
{'Type':'TERM_MATCH',
'Field':'tenancy',
'Value':'Shared'},
{'Type':'TERM_MATCH',
'Field':'licenseModel',
'Value':'No License required'},
{'Type':'TERM_MATCH',
'Field':'capacitystatus',
'Value':'Used'}
])
if price_list_type == 'ELB':
resp_pages = paginator.paginate(ServiceCode='AmazonEC2',
Filters=[
{'Type':'TERM_MATCH',
'Field':'productFamily',
'Value':'Load Balancer'},
])
if price_list_type == 'ELBV2':
resp_pages = paginator.paginate(ServiceCode='AmazonEC2',
Filters=[
{'Type':'TERM_MATCH',
'Field':'productFamily',
'Value':'Load Balancer-Network'},
])
if price_list_type == 'Snapshots':
resp_pages = paginator.paginate(ServiceCode="AmazonEC2",
Filters=[
{'Type': 'TERM_MATCH',
'Field': 'productFamily',
'Value': 'Storage Snapshot'}
]
)
if price_list_type == 'EBS':
resp_pages = paginator.paginate(ServiceCode='AmazonEC2',
Filters=[
{'Type':'TERM_MATCH',
'Field':'productFamily',
'Value':'Storage'},
])
self.terms_list(resp_pages)
def terms_list(self, resp_pages):
for page in resp_pages:
for item in page['PriceList']:
self.price_item.append(json.loads(item))
def price_list_ELBV2(self):
self.response_pages('ELBV2')
for item in self.price_item:
terms = item['terms']
if 'OnDemand' in terms:
region = region_short_names[item['product']['attributes']['location']]
self.onDemand_variables(terms, 'OnDemand')
if not 'OnDemand' in self.pricing[region]['ELBV2']:
self.pricing[region]['ELBV2']['OnDemand'] = {}
self.pricing[region]['ELBV2']['OnDemand'] = {
'Description': self.description,
'UsageType': item['product']['attributes']['usagetype'],
'Location': item['product']['attributes']['location'],
'USD': self.price
}
return self.pricing
def price_list_EBS(self):
self.response_pages('EBS')
for item in self.price_item:
terms = item['terms']
if 'volumeType' in item['product']['attributes']:
volume_type = list(self.volume_types.keys())[list(self.volume_types.values()).index(item['product']['attributes']['volumeType'])]
if 'OnDemand' in terms:
region = region_short_names[item['product']['attributes']['location']]
self.onDemand_variables(terms, 'OnDemand')
if not volume_type in self.pricing[region]['EBS']:
self.pricing[region]['EBS'][volume_type] = {}
if not 'OnDemand' in self.pricing[region]['EBS'][volume_type]:
self.pricing[region]['EBS'][volume_type]['OnDemand'] = {}
self.pricing[region]['EBS'][volume_type]['OnDemand'] = {
'Description': self.description,
'UsageType': item['product']['attributes']['usagetype'],
'Location': item['product']['attributes']['location'],
'Max Volume Size': item['product']['attributes']['maxVolumeSize'],
'USD': self.price
}
return self.pricing
def price_list_snapshots(self):
self.response_pages('Snapshots')
for item in self.price_item:
terms = item['terms']
if 'OnDemand' in terms:
region = region_short_names[item['product']['attributes']['location']]
self.onDemand_variables(terms, 'OnDemand')
if not 'OnDemand' in self.pricing[region]['Snapshots']:
self.pricing[region]['Snapshots']['OnDemand'] = {}
self.pricing[region]['Snapshots']['OnDemand'] = {
'Description': self.description,
'UsageType': item['product']['attributes']['usagetype'],
'Location': item['product']['attributes']['location'],
'USD': self.price
}
return self.pricing
def price_list_ELB(self):
self.response_pages('ELB')
for item in self.price_item:
terms = item['terms']
if 'OnDemand' in terms:
region = region_short_names[item['product']['attributes']['location']]
self.onDemand_variables(terms, 'OnDemand')
if not 'OnDemand' in self.pricing[region]['ELB']:
self.pricing[region]['ELB']['OnDemand'] = {}
self.pricing[region]['ELB']['OnDemand'] = {
'Description': self.description,
'UsageType': item['product']['attributes']['usagetype'],
'Location': item['product']['attributes']['location'],
'USD': self.price
}
return self.pricing
def price_list_EC2(self):
self.response_pages('EC2')
for item in self.price_item:
terms = item['terms']
if 'instanceType' in item['product']['attributes']:
instance_type = item['product']['attributes']['instanceType']
region = region_short_names[item['product']['attributes']['location']]
if 'OnDemand' in terms:
self.onDemand_variables(terms, 'OnDemand')
if not instance_type in self.pricing[region]['EC2']:
self.pricing[region]['EC2'][instance_type] = {}
if not 'OnDemand' in self.pricing[region]['EC2'][instance_type]:
self.pricing[region]['EC2'][instance_type]['OnDemand'] = {}
usageType = item['product']['attributes']['usagetype']
if re.search('.*BoxUsage:{}'.format(instance_type),usageType):
self.pricing[region]['EC2'][instance_type]['OnDemand'] = {
'Description': self.description,
'UsageType': item['product']['attributes']['usagetype'],
'Location': item['product']['attributes']['location'],
'Tenancy': item['product']['attributes']['tenancy'],
'Operating System': item['product']['attributes']['operatingSystem'],
'USD': self.price
}
if 'Reserved' in terms:
for reserved_sku in terms['Reserved'].keys():
term_attributes = terms['Reserved'][reserved_sku]['termAttributes']
price_dimensions = terms['Reserved'][reserved_sku]['priceDimensions']
ri_purchase_option = term_attributes['PurchaseOption']
if not instance_type in self.pricing[region]['EC2']:
self.pricing[region]['EC2'][instance_type] = {}
if not 'Reserved' in self.pricing[region]['EC2'][instance_type]:
self.pricing[region]['EC2'][instance_type]['Reserved'] = {}
if not ri_purchase_option in self.pricing[region]['EC2'][instance_type]['Reserved']:
self.pricing[region]['EC2'][instance_type]['Reserved'][ri_purchase_option] = {}
if term_attributes['OfferingClass'] == 'standard' and term_attributes['LeaseContractLength'] == '1yr':
if ri_purchase_option == 'Partial Upfront':
self.pricing[region]['EC2'][instance_type]['Reserved'][ri_purchase_option] = {
'QuantityRateCode': '',
'HrsRateCode': '',
'Offering_Class': term_attributes['OfferingClass'],
'PurchaseOption': ri_purchase_option,
'HrsUSD': '',
'UpfrontFeeUSD': ''
}
for price_dimension in price_dimensions:
if price_dimensions[price_dimension]['unit'] == 'Quantity':
self.pricing[region]['EC2'][instance_type]['Reserved'][ri_purchase_option]['UpfrontFeeUSD'] = price_dimensions[price_dimension]['pricePerUnit']['USD']
self.pricing[region]['EC2'][instance_type]['Reserved'][ri_purchase_option]['QuantityRateCode'] = price_dimensions[price_dimension]['rateCode']
if price_dimensions[price_dimension]['unit'] == 'Hrs':
self.pricing[region]['EC2'][instance_type]['Reserved'][ri_purchase_option]['HrsUSD'] = price_dimensions[price_dimension]['pricePerUnit']['USD']
self.pricing[region]['EC2'][instance_type]['Reserved'][ri_purchase_option]['HrsRateCode'] = price_dimensions[price_dimension]['rateCode']
if ri_purchase_option == 'All Upfront':
self.pricing[region]['EC2'][instance_type]['Reserved'][ri_purchase_option] = {
'QuantityRateCode': '',
'HrsRateCode': '',
'Offering_Class': term_attributes['OfferingClass'],
'PurchaseOption': ri_purchase_option,
'HrsUSD': '',
'UpfrontFeeUSD': ''
}
for price_dimension in price_dimensions:
if price_dimensions[price_dimension]['unit'] == 'Quantity':
self.pricing[region]['EC2'][instance_type]['Reserved'][ri_purchase_option]['UpfrontFeeUSD'] = price_dimensions[price_dimension]['pricePerUnit']['USD']
self.pricing[region]['EC2'][instance_type]['Reserved'][ri_purchase_option]['QuantityRateCode'] = price_dimensions[price_dimension]['rateCode']
if price_dimensions[price_dimension]['unit'] == 'Hrs':
self.pricing[region]['EC2'][instance_type]['Reserved'][ri_purchase_option]['HrsUSD'] = price_dimensions[price_dimension]['pricePerUnit']['USD']
self.pricing[region]['EC2'][instance_type]['Reserved'][ri_purchase_option]['HrsRateCode'] = price_dimensions[price_dimension]['rateCode']
if ri_purchase_option == 'No Upfront':
self.pricing[region]['EC2'][instance_type]['Reserved'][ri_purchase_option] = {
'RateCode': '',
'Offering_Class': term_attributes['OfferingClass'],
'PurchaseOption': ri_purchase_option,
'USD': ''
}
for price_dimension in price_dimensions:
self.pricing[region]['EC2'][instance_type]['Reserved'][ri_purchase_option]['RateCode'] = price_dimensions[price_dimension]['rateCode']
self.pricing[region]['EC2'][instance_type]['Reserved'][ri_purchase_option]['USD'] = price_dimensions[price_dimension]['pricePerUnit']['USD']
return self.pricing
price = PricingInfo()
```
#### File: aws-pricing/aws_audit/aws_auditing_list.py
```python
import boto3
import json
from datetime import datetime
import os
import pprint
from prettytable import PrettyTable
import argparse
import sys
# from aws_audit.all_pricing import pricing_info
from all_pricing import pricing_info
# Parser for command line
parser = argparse.ArgumentParser()
parser.add_argument(
'--allpricing',
'-a',
help='pricing report for all regions',
)
parser.add_argument(
'region', help='pricing report for that region'
)
parser.add_argument(
'--pricing', '-p', help='get pricing for a region', action = 'store_true'
)
parser.add_argument(
'--resources', '-r', help='get reources for a region', action = 'store_true'
)
args = parser.parse_args()
# Creating Table
x = PrettyTable()
x.field_names = [
'Region',
'Service',
'Instance_Type',
'Count',
'Price per hour',
'Total Instances/Size',
'Total cost per month',
]
x.align = 'l'
y = PrettyTable()
y.field_names = [
'Region',
'Service',
'Instance_Type',
'Count',
'Price per hour',
'Total Instances/Size'
]
y.align = 'l'
# To fix datetime object not serializable
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return json.JSONEncoder.default(self, o)
# To get the AWS resource report
class AWSAudit:
def __init__(self):
self.resources = {}
self.dictionary = {}
self.volume_ebs = {}
self.snap_vol_id = []
self.aws_region = []
self.attached_vol_list = []
self.unattached_vol_list = []
self.state = 'running'
self.per_month_hours = 730.5
self.con = self.connect_service('ec2')
self.sts_client = self.connect_service('sts')
self.aws_regions = self.region(self.aws_region)
self.initialize_resource_dict(self.aws_regions)
self.get_ec2_resources(self.aws_regions)
self.get_classic_elb_resources(self.aws_regions)
self.get_network_elb_resources(self.aws_regions)
self.get_ebs_resources(self.aws_regions)
if args.resources:
self.get_resources(
self.aws_regions,
self.volume_ebs,
)
if args.pricing:
self.get_price(
self.aws_regions,
self.volume_ebs,
)
def region(self, aws_region):
if args.region:
aws_region = [args.region]
else:
aws_region = [
d['RegionName']for d in self.con.describe_regions()['Regions']
]
return aws_region
def connect_service_region(
self, service, region_name=None
):
return boto3.client(service, region_name)
def connect_service(self, service):
return boto3.client(service)
def initialize_resource_dict(self, regions):
resources_dict = {}
for region_name in regions:
resources_dict[region_name] = {
'ELB': {},
'ELBV2': {},
'EC2': {},
'EBS': {'orphaned_snapshots': []},
}
self.dictionary = resources_dict
# Get EC2 resources
def get_ec2_resources(self, regions):
for region_name in regions:
conn = self.connect_service_region(
'ec2',
region_name=region_name
)
instance_list = conn.describe_instances()
for r in instance_list['Reservations']:
for i in r['Instances']:
instance_id = i['InstanceId']
if 'KeyName' in i:
key_name = i['KeyName']
else:
key_name = ''
self.dictionary[region_name]['EC2'][instance_id] = {
'key_name': key_name,
'launch_time': i['LaunchTime'],
'instance_state': i['State']['Name'],
'instance_type': i['InstanceType']
}
# Get Classic ELB
def get_classic_elb_resources(self, regions):
for region_name in regions:
conn = self.connect_service_region(
'elb',
region_name=region_name
)
lb = conn.describe_load_balancers()
for l in lb['LoadBalancerDescriptions']:
self.dictionary[region_name]['ELB'][l['LoadBalancerName']] = {'instanceId': []}
if l['Instances']:
self.dictionary[region_name]['ELB'][l['LoadBalancerName']]['instanceId'] = [id for id in l['Instances']]
else:
self.dictionary[region_name]['ELB'][l['LoadBalancerName']]['instanceId'] = []
# Get Network ELB
def get_network_elb_resources(self, regions):
for region_name in regions:
conn = self.connect_service_region(
'elbv2',
region_name=region_name
)
lb = conn.describe_load_balancers()
network_elb = len(lb['LoadBalancers'])
self.dictionary[region_name]['ELBV2'] = {
'Length': network_elb
}
# Get Volumes and Snapshots
def get_ebs_resources(self, regions):
sts_response = self.sts_client.get_caller_identity()
user_account = sts_response['Account']
for region_name in regions:
conn = self.connect_service_region(
'ec2',
region_name=region_name
)
volumes = conn.describe_volumes()
snapshots = conn.describe_snapshots(
Filters=[
{
'Name': 'owner-id',
'Values': [str(user_account)],
}
]
)
for vol in volumes['Volumes']:
vol_id = vol['VolumeId']
self.dictionary[region_name]['EBS'][vol_id] = {
'state': vol['State'],
'snapshots': [],
'size': vol['Size'],
'volumeType': vol['VolumeType'],
}
# Get all snapshots and assign them to their volume
for snapshot in snapshots['Snapshots']:
snap = snapshot['VolumeId']
if (snap in self.dictionary[region_name]['EBS']):
self.dictionary[region_name]['EBS'][snap]['snapshots'].append(snapshot['SnapshotId'])
else:
self.dictionary[region_name]['EBS']['orphaned_snapshots'].append(snapshot['SnapshotId'])
# List EC2 instances
def list_instances(self, state, region):
instances_per_state = []
for i in self.dictionary[region]['EC2']:
if self.dictionary[region]['EC2'][i]['instance_state'] == state and i not in instances_per_state:
instances_per_state.append(i)
return(instances_per_state)
# Count EC2 Instances
def count_instance_types(self, instances_per_state, region):
count_instance_type = {}
for instance_id in instances_per_state:
if instance_id in self.dictionary[region]['EC2']:
instance_type = self.dictionary[region]['EC2'][instance_id]['instance_type']
if instance_type not in count_instance_type:
count_instance_type[instance_type] = {'count': 1}
else:
count_instance_type[instance_type]['count'] += 1
return(count_instance_type)
# Count Classic ELB's
def count_classic_elb(self, region):
return (len(self.dictionary[region]['ELB']))
# Count Network ELB's
def count_network_elb(self, region):
return (self.dictionary[region]['ELBV2']['Length'])
# Count orphaned and attached snapshots
def count_snapshots(self, count_type, region):
attached_snapshot_count = 0
for vol_id in self.dictionary[region]['EBS']:
if vol_id == 'orphaned_snapshots':
continue
if vol_id in self.dictionary[region]['EBS']:
if len(self.dictionary[region]['EBS'][vol_id]['snapshots']) > 0:
self.snap_vol_id.append(vol_id)
attached_snapshot_count += 1
if count_type == 'attached':
return attached_snapshot_count
else:
orphaned_snapshot_count = len(self.dictionary[region]['EBS']['orphaned_snapshots'])
return orphaned_snapshot_count
# Count attached and orphaned volumes
def list_volumes(self, regions):
conn = self.connect_service_region(
'ec2',
region_name=regions
)
volumes = conn.describe_volumes()
for vol in volumes['Volumes']:
if len(vol['Attachments']) > 0:
if not vol['VolumeId'] in self.attached_vol_list:
self.attached_vol_list.append(vol['VolumeId'])
else:
if not vol['VolumeId'] in self.unattached_vol_list:
self.unattached_vol_list.append(vol['VolumeId'])
# Count volume types and repsective volume size
def count_volume_types(self, vol_list, vol_list_type, region):
# Dictionary to store the count and size
devices_dict = {}
if vol_list_type == 'attached':
vol_list = self.attached_vol_list
else:
vol_list = self.unattached_vol_list
for vol_id in vol_list:
if vol_id in self.dictionary[region]['EBS']:
v_type = self.dictionary[region]['EBS'][vol_id]['volumeType']
if v_type in devices_dict:
devices_dict[v_type]['count'] += 1
devices_dict[v_type]['size'] += self.dictionary[region]['EBS'][vol_id]['size']
else:
devices_dict[v_type] = {
'count': 1,
'size': 1,
}
self.volume_ebs[region] = devices_dict
return self.volume_ebs[region]
# Get monthly estimated cost for AWS resources
def get_price(
self,
regions,
volume
):
p_info = pricing_info()
elbv2 = p_info.price_list_ELBV2()
elb = p_info.price_list_ELB()
vol_pricing = p_info.price_list_EBS()
pricing_json = p_info.price_list_EC2()
snapshot_pricing = p_info.price_list_snapshots()
# Pricing
for region in regions:
x.add_row(
[
region,
'',
'',
'',
'',
'',
''
]
)
total_instances = 0
total_size = 0
price_per_month = 0
price = 0
total_cost = 0.00
unattached_volume_cost = 0.00
attached_volume_cost = 0.00
unattached_length = 0
attached_length = 0
# EC2 pricing
x.add_row(
[
'',
'EC2 Instances',
'',
'',
'',
'',
'',
]
)
count_of_instances = self.count_instance_types(self.list_instances(self.state, region), region)
for i_type in count_of_instances:
if i_type in (instance_type for instance_type in pricing_json[region]['EC2']):
price = round(float(pricing_json[region]['EC2'][i_type]['OnDemand']['USD']),3)
total_cost = round(float(total_cost + (price * count_of_instances[i_type]['count'])), 3)
total_instances += count_of_instances[i_type]['count']
x.add_row(
[
'',
'',
i_type,
count_of_instances[i_type]['count'],
price,
'',
'',
]
)
x.add_row(
[
'',
'',
'',
'',
'',
total_instances,
round((total_cost * self.per_month_hours),3),
]
)
# Classic ELB pricing
x.add_row(
[
'',
'ELB Classic',
'',
'',
'',
'',
''
]
)
classic_elb_instances = self.count_classic_elb(region)
price = float(elb[region]['ELB']['OnDemand']['USD'])
total_cost = round(float(price * classic_elb_instances * self.per_month_hours),3)
x.add_row(
[
'',
'',
'',
'',
price,
classic_elb_instances,
total_cost,
]
)
# Network ELB pricing
x.add_row(
[
'',
'ELB Network',
'',
'',
'',
'',
''
]
)
network_elb_instances = self.count_network_elb(region)
price = float(elbv2[region]['ELBV2']['OnDemand']['USD'])
total_cost = round(
float(price * network_elb_instances * self.per_month_hours),
3,
)
x.add_row(
[
'',
'',
'',
'',
price,
network_elb_instances,
total_cost,
]
)
# Volume pricing
x.add_row(
[
'',
'Volume',
'',
'',
'',
'',
''
]
)
x.add_row(
[
'',
'',
'Attached Volume',
'',
'',
'',
''
]
)
attached_vol_dict = self.count_volume_types(
self.list_volumes(region),
'attached',
region
)
x.add_row(
[
'',
'',
'',
'',
'',
'',
''
]
)
for volume_type in attached_vol_dict:
if volume_type in (v_type for v_type in vol_pricing[region]['EBS']):
attached_length += attached_vol_dict[volume_type]['count']
price = float(vol_pricing[region]['EBS'][volume_type]['OnDemand']['USD'])
attached_volume_cost = round(
float(float(attached_vol_dict[volume_type]['size'])
* price
+ attached_volume_cost), 3)
x.add_row(
[
'',
'',
volume_type,
attached_vol_dict[volume_type]['count'],
price,
attached_vol_dict[volume_type]['size'],
'',
]
)
x.add_row(
[
'',
'',
'',
'',
'Total Attached Volumes',
attached_length,
attached_volume_cost,
]
)
x.add_row(
[
'',
'',
'Orphaned Volume',
'',
'',
'',
''
]
)
unattached_vol_dict = self.count_volume_types(
self.list_volumes(region),
'unattached',
region
)
x.add_row(
[
'',
'',
'',
'',
'',
'',
''
]
)
for volume_type in unattached_vol_dict:
if volume_type in (v_type for v_type in vol_pricing[region]['EBS']):
unattached_length += unattached_vol_dict[volume_type]['count']
price = float(vol_pricing[region]['EBS'][volume_type]['OnDemand']['USD'])
unattached_volume_cost = round(
float(float(unattached_vol_dict[volume_type]['size'])
* price
+ unattached_volume_cost), 3)
x.add_row(
[
'',
'',
volume_type,
unattached_vol_dict[volume_type]['count'],
price,
unattached_vol_dict[volume_type]['size'],
'',
]
)
x.add_row(
[
'',
'',
'',
'',
'Total Orphaned Volumes',
unattached_length,
unattached_volume_cost,
]
)
# Snapshot pricing
x.add_row(
[
'',
'Snapshots',
'',
'',
'',
'',
''
]
)
x.add_row(
[
'',
'',
'',
'',
'',
'',
''
]
)
attached_snap = self.count_snapshots('attached', region)
price = float(snapshot_pricing[region]['Snapshots']['OnDemand']['USD'])
for volume_id in self.snap_vol_id:
if volume_id in (vol_id for vol_id in self.dictionary[region]['EBS']):
size = self.dictionary[region]['EBS'][volume_id]['size']
total_size += size
price_per_month = round(
float(price
* float(total_size)), 3
)
x.add_row(
[
'',
'',
'snapshots',
attached_snap,
price,
total_size,
price_per_month,
]
)
orphaned_snap = self.count_snapshots('unattached', region)
x.add_row(
[
'',
'',
'orphaned snapshots',
orphaned_snap,
price,
'',
round(
float(price
* orphaned_snap), 3)
]
)
print(x)
# Get monthly estimated cost for AWS resources
def get_resources(
self,
regions,
volume
):
for region in regions:
y.add_row(
[
region,
'',
'',
'',
'',
''
]
)
total_instances = 0
size = 0
unattached_length = 0
attached_length = 0
# EC2 pricing
y.add_row(
[
'',
'EC2 Instances',
'',
'',
'',
'',
]
)
count_of_instances = self.count_instance_types(self.list_instances(self.state, region), region)
for i_type in count_of_instances:
total_instances += count_of_instances[i_type]['count']
y.add_row(
[
'',
'',
i_type,
count_of_instances[i_type]['count'],
'',
'',
]
)
y.add_row(
[
'',
'',
'',
'',
'',
total_instances,
]
)
# Classic ELB pricing
y.add_row(
[
'',
'ELB Classic',
'',
'',
'',
''
]
)
classic_elb_instances = self.count_classic_elb(region)
y.add_row(
[
'',
'',
'',
'',
'',
classic_elb_instances,
]
)
# Network ELB pricing
y.add_row(
[
'',
'ELB Network',
'',
'',
'',
''
]
)
network_elb_instances = self.count_network_elb(region)
y.add_row(
[
'',
'',
'',
'',
'',
network_elb_instances
]
)
# Volume pricing
y.add_row(
[
'',
'Volume',
'',
'',
'',
''
]
)
y.add_row(
[
'',
'',
'Attached Volume',
'',
'',
''
]
)
attached_vol_dict = self.count_volume_types(
self.list_volumes(region),
'attached',
region
)
y.add_row(
[
'',
'',
'',
'',
'',
''
]
)
for volume_type in attached_vol_dict:
attached_length += attached_vol_dict[volume_type]['count']
y.add_row(
[
'',
'',
volume_type,
attached_vol_dict[volume_type]['count'],
'',
attached_vol_dict[volume_type]['size']
]
)
y.add_row(
[
'',
'',
'',
'',
'Total Attached Volumes',
attached_length
]
)
y.add_row(
[
'',
'',
'Orphaned Volume',
'',
'',
''
]
)
unattached_vol_dict = self.count_volume_types(
self.list_volumes(region),
'unattached',
region
)
y.add_row(
[
'',
'',
'',
'',
'',
''
]
)
for volume_type in unattached_vol_dict:
unattached_length += unattached_vol_dict[volume_type]['count']
y.add_row(
[
'',
'',
volume_type,
unattached_vol_dict[volume_type]['count'],
'',
unattached_vol_dict[volume_type]['size']
]
)
y.add_row(
[
'',
'',
'',
'',
'Total Orphaned Volumes',
unattached_length
]
)
# Snapshot pricing
y.add_row(
[
'',
'Snapshots',
'',
'',
'',
''
]
)
y.add_row(
[
'',
'',
'',
'',
'',
''
]
)
attached_snap = self.count_snapshots('attached', region)
for volume_id in self.snap_vol_id:
if volume_id in (vol_id for vol_id in self.dictionary[region]['EBS']):
size += self.dictionary[region]['EBS'][volume_id]['size']
y.add_row(
[
'',
'',
'snapshots',
attached_snap,
'',
size
]
)
orphaned_snap = self.count_snapshots('unattached', region)
y.add_row(
[
'',
'',
'orphaned snapshots',
orphaned_snap,
'',
''
]
)
print(y)
aws_audit = AWSAudit()
``` |
{
"source": "jharris1993/thonny",
"score": 2
} |
#### File: thonny/thonny/languages.py
```python
import gettext
from logging import getLogger
import os
logger = getLogger(__name__)
BASE_LANGUAGE_CODE = "en_US"
BASE_LANGUAGE_NAME = "English"
# https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
LANGUAGES_DICT = {
"cs_CZ": "Čeština [BETA]",
"de_DE": "Deutsch",
"et_EE": "Eesti",
BASE_LANGUAGE_CODE: BASE_LANGUAGE_NAME,
"es_ES": "Español",
"fr_FR": "Français",
"it_IT": "Italiano",
"lt_LT": "Lietuvių",
"hu_HU": "Magyar [BETA]",
"nb_NO": "Norsk (Bokmål)",
"nn_NO": "Norsk (Nynorsk)",
"nl_NL": "Nederlands",
"pl_PL": "Polski",
"pt_PT": "Português (PT)",
"pt_BR": "Português (BR)",
"ro_RO": "Român",
"ru_RU": "Русский",
"sk_SK": "Slovenčina [BETA]",
"fi_FI": "Suomi [BETA]",
"sv_SE": "Svenska [BETA]",
"sq_AL": "Shqip [ALPHA]",
"tr_TR": "Türkçe [BETA]",
"uk_UA": "Українська",
"zh_TW": "繁體中文-TW",
"zh_CN": "简体中文 ",
"ja_JP": "日本語 [ALPHA]",
"hy_AM": "Հայերէն [BETA]",
"fa_IR": "[BETA] فارسی",
"el_GR": "Ελληνικά",
"ko_KR": "한국어 [韓國語] [ALPHA]",
"ar_AR": "[BETA] عربي",
}
# how many spaces to add to button caption in order to make whole text visible
BUTTON_PADDING_SIZES = {"zh_TW": 4, "zh_CN": 4, "ja_JP": 4}
_translation = gettext.NullTranslations()
def get_button_padding():
from thonny import get_workbench
code = get_workbench().get_option("general.language")
if code in BUTTON_PADDING_SIZES:
return BUTTON_PADDING_SIZES[code] * " "
else:
return ""
def get_language_code_by_name(name):
for code in LANGUAGES_DICT:
if LANGUAGES_DICT[code] == name:
return code
raise RuntimeError("Unknown language name '%s'" % name)
def tr(message: str) -> str:
return _translation.gettext(message)
def set_language(language_code: str) -> None:
global _translation
try:
path = os.path.join(os.path.dirname(__file__), "locale")
_translation = gettext.translation("thonny", path, [language_code])
except Exception as e:
logger.exception("Could not set language to '%s", language_code, exc_info=e)
_translation = gettext.NullTranslations()
``` |
{
"source": "jharrymoore/Icolos",
"score": 2
} |
#### File: core/job_control/job_control.py
```python
from typing import List
from pydantic.main import BaseModel
from icolos.core.workflow_steps.step import StepBase
from icolos.utils.general.parallelization import Parallelizer, SubtaskContainer
from icolos.core.workflow_steps.step import _LE
class IterParallelizer(BaseModel):
# config block controlling how the steps are parallelized
# if you are executing a 5 step workflow with 10 repeats, dependent_steps = 5, cores = 10
# this will allow each independent replica to be allocated to a single job queue, retaining step order
parallelize: bool = False
cores: int = 1
dependent_steps: int = None
class StepJobControl(StepBase, BaseModel):
"""
Step class containing job control functionality required for StepIterator, supports Slurm for job scheduling
Supports running Icolos process as master job for parallel step execution on cluster. Generates a pool of initialized steps to be executed, based on the
"""
initialized_steps: List = []
# expect the parallel execution block to be handed over from flow control
parallel_execution: IterParallelizer = IterParallelizer()
def __init__(self, **data):
super().__init__(**data)
def _prepare_batch(self, batch) -> List[List[StepBase]]:
batch_steps = []
for sublist in batch:
sublist_steps = []
for task in sublist:
sublist_steps.append(task.data)
batch_steps.append(sublist_steps)
return batch_steps
def execute(self):
"""
Execute multiple steps in parallel
"""
# Spin up multiple processes
self.execution.parallelization.cores = self.parallel_execution.cores
# each subtask needs to contain an entire mini workflow to be executed sequentially,
self.execution.parallelization.max_length_sublists = (
self.parallel_execution.dependent_steps
)
# if we try steps multiple times, we have steps fail depending on its dependency on a
# previous step - too complicated
self._subtask_container = SubtaskContainer(max_tries=1)
self._subtask_container.load_data(self.initialized_steps)
parallelizer = Parallelizer(func=self._run_step)
n = 1
while self._subtask_container.done() is False:
next_batch = self._get_sublists(
get_first_n_lists=self.parallel_execution.cores
) # return n lists of length max_sublist_length
_ = [sub.increment_tries() for element in next_batch for sub in element]
_ = [sub.set_status_failed() for element in next_batch for sub in element]
self._logger.log(
f"Starting {len(next_batch)} parallel jobs under Icolos JobControl, execution batch {n}",
_LE.INFO,
)
steps = self._prepare_batch(next_batch)
result = parallelizer.execute_parallel(steps=steps)
# sucessful execution of each step is not explicitly checked,
# the step is responsible for throwing errors if something has gone wrong
for task in next_batch:
for subtask in task:
subtask.set_status_success()
def _run_step(self, steps: List[StepBase]):
# submits then monitors the step
for step in steps: # length max_len_sublist
# at this point the internal steps don't have their data initialised
step.generate_input()
step.execute()
step.process_write_out()
```
#### File: calculation/electrostatics/cresset_ec.py
```python
from copy import deepcopy
from typing import List, Tuple
from icolos.core.workflow_steps.step import StepBase
from pydantic import BaseModel
import tempfile
from icolos.utils.enums.step_enums import StepCressetEnum
from icolos.utils.execute_external.cresset_executor import CressetExecutor
from icolos.utils.general.files_paths import gen_tmp_file
from icolos.core.workflow_steps.step import _LE
import os
from icolos.utils.general.parallelization import Parallelizer, SubtaskContainer
_SCE = StepCressetEnum()
class StepCressetEC(StepBase, BaseModel):
def __init__(self, **data):
super().__init__(**data)
self._initialize_backend(executor=CressetExecutor)
self._check_backend_availability()
def _prepare_tmp_input(self, batch: List) -> Tuple[List, List]:
conformers = []
tmp_dirs = []
protein = self.data.generic.get_argument_by_extension(
"pdb", rtn_file_object=True
)
for sublist in batch:
for task in sublist:
conformer = task.data
conformers.append(conformer)
# generate the tmpdir
tmp_dir = tempfile.mkdtemp()
tmp_dirs.append(tmp_dir)
_, path_input_sdf = gen_tmp_file(
prefix="tmp_", suffix=".sdf", dir=tmp_dir
)
conformer.write(path=path_input_sdf)
# write the protein to that tmpdir
protein.write(path=os.path.join(tmp_dir, "protein.pdb"), join=False)
return conformers, tmp_dirs
def _execute_cresset_ec_parallel(self):
parallelizer = Parallelizer(func=self._run_conformer)
n = 1
while self._subtask_container.done() is False:
next_batch = self._get_sublists(
get_first_n_lists=self._get_number_cores()
) # return n lists of length max_sublist_length
_ = [sub.increment_tries() for element in next_batch for sub in element]
_ = [sub.set_status_failed() for element in next_batch for sub in element]
conformers, tmp_dirs = self._prepare_tmp_input(next_batch)
self._logger.log(
f"Executing Cresset EC for batch {n} containing {len(conformers)} conformers",
_LE.DEBUG,
)
parallelizer.execute_parallel(tmp_dir=tmp_dirs, conformer=conformers)
results = self._parse_results(tmp_dirs, conformers)
for sublist, result in zip(next_batch, results):
# TODO: this only works if max length sublist == 1, fine for now as that is all turbomole can handle
for task in sublist:
if result == _SCE.SUCCESS:
task.set_status_success()
else:
task.set_status_failed()
self._remove_temporary(tmp_dirs)
n += 1
def _parse_results(self, tmp_dirs: List, conformers: List):
# walk over the directory structure, parse the output file, identify the conformer, attach a tag to the mol object
# TODO: No idea what the output looks like for this, write the parser!!
pass
def execute(self):
# unroll all conformers
all_conformers = []
for compound in self.get_compounds():
for enum in compound.get_enumerations():
if self._input_object_empty(enum):
continue
else:
for conformer in enum.get_conformers():
conf = deepcopy(conformer)
all_conformers.append(conf)
self._subtask_container = SubtaskContainer(
max_tries=self.execution.failure_policy.n_tries
)
self._subtask_container.load_data(all_conformers)
self._execute_cresset_ec_parallel()
def _run_conformer(self):
# run a single conformer through Flare's EC
self._backend_executor.execute()
# execution is
# module load Flare && pyflare electrostaticcomplementarity.py -p protein.pdb ligands.sdf
```
#### File: workflow_steps/calculation/rmsd.py
```python
from typing import List
from pydantic import BaseModel
from icolos.core.containers.compound import Conformer, unroll_conformers
from icolos.utils.enums.step_enums import StepRMSDEnum, StepDataManipulationEnum
from icolos.core.workflow_steps.step import _LE
from icolos.core.workflow_steps.calculation.base import StepCalculationBase
_SR = StepRMSDEnum()
_SDM = StepDataManipulationEnum()
class StepRMSD(StepCalculationBase, BaseModel):
def __init__(self, **data):
super().__init__(**data)
# extend parameters
if _SR.METHOD not in self.settings.additional.keys():
self.settings.additional[_SR.METHOD] = _SR.METHOD_ALIGNMOL
def _calculate_RMSD(self, conformers: List[Conformer]):
for conf in conformers:
rmsd_matrix = self._calculate_rms_matrix(
conformers=[conf] + conf.get_extra_data()[_SDM.KEY_MATCHED],
rms_method=self._get_rms_method(),
)
# use the specified tag name if it is the first value and append an index in case there are more
for idx, col in enumerate(rmsd_matrix.columns[1:]):
combined_tag = "".join([_SR.RMSD_TAG, "" if idx == 0 else str(idx)])
rmsd_value = rmsd_matrix.iloc[[0]][col][0]
conf.get_molecule().SetProp(combined_tag, str(rmsd_value))
conf.get_extra_data()[_SDM.KEY_MATCHED][idx].get_molecule().SetProp(
combined_tag, str(rmsd_value)
)
def execute(self):
# this assumes that the conformers that are to be matched for the calculation of the RMSD matrix, are attached
# as a list in a generic data field with a specified key
conformers = unroll_conformers(compounds=self.get_compounds())
self._calculate_RMSD(conformers=conformers)
self._logger.log(
f"Annotated {len(conformers)} conformers with RMSD values (tag: {_SR.RMSD_TAG}).",
_LE.INFO,
)
# TODO: add a nice pandas DF with the RMSD values to a generic data field
```
#### File: workflow_steps/calculation/shaep.py
```python
from icolos.utils.execute_external.execute import Executor
from icolos.core.workflow_steps.calculation.base import StepCalculationBase
from icolos.core.workflow_steps.step import _LE
from icolos.utils.enums.step_enums import StepShaepEnum
from icolos.utils.enums.program_parameters import PantherEnum, ShaepEnum
from icolos.core.containers.compound import Conformer
import tempfile
from pydantic import BaseModel
import os
_SSE = StepShaepEnum()
_SE = ShaepEnum()
_PE = PantherEnum()
class StepShaep(StepCalculationBase, BaseModel):
def __init__(self, **data):
super().__init__(**data)
self._initialize_backend(executor=Executor)
def _prepare_tmp_input_dir(self):
tmp_dir = tempfile.mkdtemp()
return tmp_dir
def _execute_backend(self, conf_path: str, tmp_dir: str, ni_path: str):
arguments = [
os.path.join(tmp_dir, ni_path),
conf_path,
os.path.join(tmp_dir, _SE.OUTPUT_SIMILARITY),
]
self._backend_executor.execute(
command=_SE.SHAEP_EXECUTABLE, arguments=arguments, check=True
)
def _parse_output(self, tmp_dir: str, conformer: Conformer):
with open(os.path.join(tmp_dir, _SE.OUTPUT_SIMILARITY), "r") as f:
# TODO: add support for multiple input structures; ignore the names (all will be in one line), but from
# position 8 (index 7 in python) onwards, the shape and esp similarities are reported in the same
# order as the input, i.e. <7 other values> mol1_shape mol1_esp mol2_shape ...
parts = f.readlines()[1].split("\t")
conformer.get_molecule().SetProp(_SE.TAG_SHAPE_SIMILARITY, str(parts[7]))
conformer.get_molecule().SetProp(_SE.TAG_ESP_SIMILARITY, str(parts[8]))
def execute(self):
number_rescored = 0
for compound in self.get_compounds():
for enumeration in compound.get_enumerations():
if len(enumeration.get_conformers()) == 0:
self._logger.log(
f"Found no conformers for enumeration {enumeration} for compound {compound}.",
_LE.WARNING,
)
# we can still execute shaep at the enumeration level, if the compounds are correcty annotated they should be written out ok. Will be slower though
# easiest for now is to add the enumeration mol object as a single conformer and run that through shaep
mol = enumeration.get_molecule()
conf = Conformer(conformer=mol)
enumeration.add_conformer(conf)
# TODO: ShaEP allow batch execution for any number of compounds (parsing gets more difficult though)
# Implement that to avoid overhead from file system issues
# TODO: Refactor and add comments
for conformer in enumeration.get_conformers():
tmp_dir = self._prepare_tmp_input_dir()
conf_path = os.path.join(tmp_dir, _SE.CONFORMER_PATH)
ni_file = self.data.generic.get_files_by_extension("mol2")[0]
ni_file.write(tmp_dir)
conformer.write(conf_path)
self._execute_backend(conf_path, tmp_dir, ni_file.get_file_name())
self._parse_output(tmp_dir, conformer)
self._logger.log(
f"Finished shaep execution for conformer {enumeration.get_index_string()}.",
_LE.DEBUG,
)
number_rescored += 1
self._remove_temporary(tmp_dir)
self._logger.log(f"Executed ShaEP for {number_rescored} conformers.", _LE.INFO)
```
#### File: workflow_steps/confgen/omega.py
```python
import os
from typing import List
from pydantic import BaseModel
from rdkit import Chem
from copy import deepcopy
from icolos.utils.execute_external.omega import OMEGAExecutor
from icolos.core.workflow_steps.step import _LE, _CTE
from icolos.utils.general.molecules import get_charge_for_molecule
from icolos.core.containers.compound import Enumeration, Conformer
from icolos.utils.enums.program_parameters import OMEGAEnum, OMEGAOutputEnum
from icolos.core.workflow_steps.confgen.base import StepConfgenBase
_EE = OMEGAEnum()
_COE = OMEGAOutputEnum()
class StepOmega(StepConfgenBase, BaseModel):
def __init__(self, **data):
super().__init__(**data)
# initialize the executor and test availability
self._initialize_backend(executor=OMEGAExecutor)
self._check_backend_availability()
def _parse_OMEGA_result(
self, dir_path: str, enumeration: Enumeration
) -> List[Conformer]:
# OMEGA will output a variety of files to "dir_path"
conformers_sdf = os.path.join(dir_path, _COE.OUTPUT_SDF_NAME)
# energies are added as a tag in the output
mol_supplier = Chem.SDMolSupplier(conformers_sdf, removeHs=False)
charge = str(
get_charge_for_molecule(enumeration.get_molecule(), add_as_tag=False)
)
result = []
for mol_id, mol in enumerate(mol_supplier):
mol.SetProp(
_CTE.CONFORMER_ENERGY_TAG, mol.GetProp(_COE.CLASSIC_ENERGY_OUTPUT_TAG)
)
mol.ClearProp(_COE.CLASSIC_ENERGY_OUTPUT_TAG)
mol.SetProp(_CTE.FORMAL_CHARGE_TAG, charge)
conf = Conformer(conformer=mol)
result.append(conf)
return result
def _set_input_output_paths(self, parameters: dict, input_path: str) -> dict:
# this is handled this way to overwrite any specifications from the user for the input / output paths as well
parameters[_EE.CLASSIC_INPUT] = input_path
parameters[_EE.CLASSIC_OUTPUT] = _COE.OUTPUT_SDF_NAME
return parameters
def _prepare_settings(self, tmp_dir: str, enumeration: Enumeration) -> list:
# the first argument is the mode of binary "oeomega" (for now defaults to "classic")
settings = [_EE.OMEGA_MODE_CLASSIC]
# add flags
# make sure, the energy tag is set as well
for flag in self.settings.arguments.flags:
settings.append(flag)
if _EE.CLASSIC_SDENERGY not in settings:
settings.append(_EE.CLASSIC_SDENERGY)
# add parameters
parameters = deepcopy(self.settings.arguments.parameters)
# update / over-write fields that need a specific value or are defined elsewhere
parameters = self._set_input_output_paths(
parameters=parameters,
input_path=self._prepare_temp_input(tmp_dir, enumeration.get_molecule()),
)
# flatten the dictionary into a list for command-line execution
for key in parameters.keys():
settings.append(key)
settings.append(parameters[key])
return settings
def execute(self):
for compound in self.get_compounds():
for enumeration in compound.get_enumerations():
if not self._input_object_valid(enumeration):
continue
# set up
tmp_dir = self._move_to_temp_dir()
settings = self._prepare_settings(tmp_dir, enumeration=enumeration)
# execution
self._logger.log(
f"Executing OMEGA backend in folder {tmp_dir}.", _LE.DEBUG
)
result = self._backend_executor.execute(
command=_EE.OMEGA, arguments=settings, check=False
)
self._restore_working_dir()
# parsing
conformers = self._parse_OMEGA_result(tmp_dir, enumeration=enumeration)
enumeration.clear_conformers()
enumeration.add_conformers(conformers=conformers, auto_update=True)
self._logger.log(
f"Completed OMEGA for enumeration {enumeration.get_index_string()}, added {len(conformers)} conformers.",
_LE.INFO,
)
# clean-up
self._remove_temporary(tmp_dir)
```
#### File: workflow_steps/gromacs/genion.py
```python
from icolos.utils.enums.step_enums import StepBaseEnum, StepGromacsEnum
from icolos.utils.enums.program_parameters import GromacsEnum
from icolos.core.workflow_steps.gromacs.base import StepGromacsBase
from icolos.utils.execute_external.gromacs import GromacsExecutor
from pydantic import BaseModel
from icolos.core.workflow_steps.step import _LE
import os
_GE = GromacsEnum()
_SGE = StepGromacsEnum()
_SBE = StepBaseEnum
class StepGMXGenion(StepGromacsBase, BaseModel):
"""
Wrapper for gmx genion
"""
def __init__(self, **data):
super().__init__(**data)
self._initialize_backend(executor=GromacsExecutor)
self._check_backend_availability()
def execute(self):
tmp_dir = self._make_tmpdir()
self._write_input_files(tmp_dir)
arguments = self._parse_arguments(
{
# input file paths are handled internally
"-o": _SGE.STD_STRUCTURE,
"-p": self.data.generic.get_argument_by_extension(_SGE.FIELD_KEY_TOPOL),
"-s": self.data.generic.get_argument_by_extension(_SGE.FIELD_KEY_TPR),
}
)
result = self._backend_executor.execute(
command=_GE.GENION,
arguments=arguments,
location=tmp_dir,
pipe_input=self.construct_pipe_arguments(
tmp_dir, self.settings.additional[_SBE.PIPE_INPUT]
),
)
for line in result.stdout.split("\n"):
self._logger_blank.log(line, _LE.DEBUG)
self._logger.log(
f"Completed execution for {self.step_id} successfully", _LE.INFO
)
# this is the last structural change to the topology in a regular gromacs setup,
# update the index groups here
make_ndx_args = ["-f", _SGE.STD_STRUCTURE, "-o", _SGE.STD_INDEX]
index_files = [f for f in os.listdir(tmp_dir) if f.endswith(".ndx")]
# remove any existing index files
for f in index_files:
self._remove_temporary(os.path.join(tmp_dir, f))
# generate new index file
result = self._backend_executor.execute(
command=_GE.MAKE_NDX,
arguments=make_ndx_args,
location=tmp_dir,
check=True,
pipe_input='echo -e "1 | 12 \nq"',
)
for line in result.stdout.split("\n"):
self._logger_blank.log(line, _LE.DEBUG)
self._logger.log('Added index group to "index.ndx"', _LE.DEBUG)
self._parse_output(tmp_dir)
self._remove_temporary(tmp_dir)
```
#### File: workflow_steps/gromacs/mmpbsa.py
```python
from subprocess import CompletedProcess
from icolos.core.containers.generic import GenericData
from typing import AnyStr, List
from icolos.core.workflow_steps.gromacs.base import StepGromacsBase
from pydantic import BaseModel
from icolos.utils.enums.step_enums import StepGromacsEnum
from icolos.core.workflow_steps.step import _LE
from icolos.utils.enums.program_parameters import GromacsEnum
import os
from icolos.utils.general.files_paths import attach_root_path
_SGE = StepGromacsEnum()
_GE = GromacsEnum()
class StepGMXmmpbsa(StepGromacsBase, BaseModel):
"""
Execute gmx_MMPBSA, calculates binding free energy of
protein-ligand complex using single trajectory approximation,
using Amber's mmpbsa.py script
"""
def __init__(self, **data):
super().__init__(**data)
self._initialize_backend(self._get_gromacs_executor())
self._check_backend_availability()
def _get_arg(self, ext) -> AnyStr:
return self.data.generic.get_argument_by_extension(ext)
def _generate_amber_input_file(self) -> None:
input_file = (
self.settings.additional[_SGE.INPUT_FILE]
if _SGE.INPUT_FILE in self.settings.additional.keys()
else None
)
# Normally the user should provide an input file to control the mmgbsa protocol
if input_file is not None and os.path.isfile(input_file):
self._logger.log(
f"Using provided AMBER input file at {self.settings.additional[_SGE.INPUT_FILE]}",
_LE.DEBUG,
)
with open(input_file, "r") as f:
template = GenericData(file_name="mmpbsa.in", file_data=f.read())
else:
self._logger.log("No input file found, defaulting to template", _LE.WARNING)
# parses user arguments and creates the formatted amber input file from the user specification
with open(attach_root_path(_SGE.DEFAULT_MMPBSA_IN), "r") as f:
template = GenericData(file_name="mmpbsa.in", file_data=f.read())
self.data.generic.add_file(template)
def _parse_arguments(self, flag_dict: dict) -> List:
args = []
for flag in self.settings.arguments.flags:
if flag != "-O":
args.append(flag)
for key, value in self.settings.arguments.parameters.items():
args.append(key)
args.append(value)
for key, value in flag_dict.items():
if key not in args:
args.append(key)
args.append(value)
# capture output
return args
def _run_mmpbsa(self, args, tmp_dir) -> CompletedProcess:
command = _GE.MMPBSA
self._logger.log(f"Executing mmgbsa calculation in dir {tmp_dir}", _LE.DEBUG)
result = self._backend_executor.execute(
command=command, arguments=args, check=True, location=tmp_dir
)
for line in result.stdout.split("\n"):
self._logger_blank.log(line, _LE.INFO)
for line in result.stderr.split("\n"):
self._logger_blank.log(line, _LE.INFO)
return result
def _parse_coupling_groups(self, tmp_dir) -> AnyStr:
# parse the coupling groups to their indexes
output = []
pipe_input = self.settings.additional[_SGE.COUPLING_GROUPS]
structure = self.data.generic.get_argument_by_extension(
_SGE.FIELD_KEY_STRUCTURE
)
arguments = ["-f", structure]
if [f for f in os.listdir(tmp_dir) if f.endswith("ndx")]:
arguments.extend(["-n", "index.ndx"])
else:
arguments.extend(["-o", "index.ndx"])
result = self._backend_executor.execute(
command=_GE.MAKE_NDX,
arguments=arguments,
location=tmp_dir,
check=True,
pipe_input='echo -e "q"',
)
for param in pipe_input.split():
for line in result.stdout.split("\n"):
parts = line.split()
if param in line and parts[1] == param:
output.append(parts[0])
break
self._logger.log(f"Resolved coupling groups {output}", _LE.DEBUG)
return " ".join(output)
def _get_file_from_dir(self, tmp_dir: str, ext: str) -> AnyStr:
file = [f for f in os.listdir(tmp_dir) if f.endswith(ext)]
assert len(file) == 1
return file[0]
def execute(self) -> None:
"""
Execute gmx_MMPBSA
Note: execution using mpirun is not supported for stability reasons
"""
tmp_dir = self._make_tmpdir()
self._generate_amber_input_file()
self._write_input_files(tmp_dir)
# gmx_MMPBSA requires the coupling groups of the receptor and ligand
# form any required coupling groups with make_ndx_command before parsing coupling groups
# e.g. combine protein + cofactor
ndx_commands = (
self.settings.additional[_SGE.MAKE_NDX_COMMAND]
if _SGE.MAKE_NDX_COMMAND in self.settings.additional.keys()
else None
)
if ndx_commands is not None:
# can run make_ndx multiple times for complex cases, each set of pipe imput must be separated by a semicolon
for args in ndx_commands.split(";"):
self._add_index_group(tmp_dir=tmp_dir, pipe_input=args)
flag_dict = {
"-i": _SGE.MMPBSA_IN,
"-cs": self._get_arg("tpr"),
"-cg": self._parse_coupling_groups(tmp_dir),
"-ci": self._get_file_from_dir(tmp_dir=tmp_dir, ext="ndx"),
"-ct": self._get_arg("xtc"),
"-cp": self._get_arg("top"),
# do not attempt to open the results in the GUI afterwards
"-nogui": "",
}
flag_list = self._parse_arguments(flag_dict=flag_dict)
result = self._run_mmpbsa(flag_list, tmp_dir)
# parse and delete generated output
self._parse_output(tmp_dir)
self._remove_temporary(tmp_dir)
```
#### File: workflow_steps/gromacs/solvate.py
```python
from icolos.utils.enums.step_enums import StepGromacsEnum
from icolos.utils.enums.program_parameters import GromacsEnum
from icolos.utils.execute_external.gromacs import GromacsExecutor
from pydantic import BaseModel
from icolos.core.workflow_steps.step import _LE
from icolos.core.workflow_steps.gromacs.base import StepGromacsBase
_GE = GromacsEnum()
_SGE = StepGromacsEnum()
class StepGMXSolvate(StepGromacsBase, BaseModel):
"""
Fill waterbox with solvent, executes gmx solvate
"""
def __init__(self, **data):
super().__init__(**data)
self._initialize_backend(executor=GromacsExecutor)
self._check_backend_availability()
def execute(self):
tmp_dir = self._make_tmpdir()
self._write_input_files(tmp_dir)
structure_file = self.data.generic.get_argument_by_extension(
_SGE.FIELD_KEY_STRUCTURE
)
arguments = self._parse_arguments(
flag_dict={
"-cp": structure_file,
"-p": self.data.generic.get_argument_by_extension(_SGE.FIELD_KEY_TOPOL),
"-o": structure_file,
}
)
result = self._backend_executor.execute(
command=_GE.SOLVATE, arguments=arguments, location=tmp_dir
)
for line in result.stdout.split("\n"):
self._logger_blank.log(line, _LE.DEBUG)
self._logger.log(
f"Completed execution for {self.step_id} successfully.", _LE.INFO
)
self._parse_output(tmp_dir)
self._remove_temporary(tmp_dir)
```
#### File: workflow_steps/gromacs/trajcat.py
```python
from icolos.utils.enums.program_parameters import GromacsEnum
from icolos.core.workflow_steps.gromacs.base import StepGromacsBase
from icolos.utils.execute_external.gromacs import GromacsExecutor
from pydantic import BaseModel
from icolos.core.workflow_steps.step import _LE
import os
_GE = GromacsEnum()
class StepGMXTrjcat(StepGromacsBase, BaseModel):
"""
Concatenates multiple trajectories, useful for subsequent rmsd/cluster calculations
"""
def __init__(self, **data):
super().__init__(**data)
self._initialize_backend(executor=GromacsExecutor)
self._check_backend_availability()
def execute(self):
tmp_dir = self._make_tmpdir()
# write the trajectories to the tmpdir, writing to separate file names, then glob the xtc files
for idx, file in enumerate(self.data.generic.get_files_by_extension(ext="xtc")):
file.write(path=os.path.join(tmp_dir, f"traj_{idx}.xtc"), join=False)
flag_dict = {
"-f": "*.xtc",
"-o": "trjcat_out.xtc",
"-cat": "", # need this to paste the trajectories back to back
}
arguments = self._parse_arguments(flag_dict=flag_dict)
result = self._backend_executor.execute(
command=_GE.TRJCAT, arguments=arguments, location=tmp_dir, check=True
)
for line in result.stdout.split("\n"):
self._logger_blank.log(line, _LE.DEBUG)
rm_files = [
f for f in os.listdir(tmp_dir) if f.endswith("xtc") and "trjcat" not in f
]
for f in rm_files:
os.remove(os.path.join(tmp_dir, f))
self._parse_output(tmp_dir)
self._remove_temporary(tmp_dir)
```
#### File: workflow_steps/io/base.py
```python
from pydantic import BaseModel
from icolos.core.workflow_steps.step import StepBase
from icolos.utils.enums.step_enums import StepBaseEnum
_SBE = StepBaseEnum
class StepIOBase(StepBase, BaseModel):
def __init__(self, **data):
super().__init__(**data)
```
#### File: workflow_steps/io/embedder.py
```python
from copy import deepcopy
from pydantic import BaseModel
from rdkit import Chem, RDLogger
from rdkit.Chem import AllChem
from icolos.core.containers.compound import Conformer
from icolos.utils.general.icolos_exceptions import StepFailed
from icolos.utils.enums.step_enums import StepEmbeddingEnum
from icolos.core.workflow_steps.io.base import StepIOBase
from icolos.core.workflow_steps.step import _LE
from icolos.utils.general.convenience_functions import *
from icolos.utils.smiles import to_mol
_SEE = StepEmbeddingEnum()
class StepEmbedding(StepIOBase, BaseModel):
def __init__(self, **data):
super().__init__(**data)
# extend parameters with defaults
if _SEE.EMBED_AS not in self.settings.additional.keys():
self.settings.additional[_SEE.EMBED_AS] = _SEE.EMBED_AS_ENUMERATIONS
self._logger.log(
f'No embedding level specified, defaulting to "{_SEE.EMBED_AS_ENUMERATIONS}".',
_LE.INFO,
)
def _smile_to_molecule(self, smile: str) -> Chem.Mol:
mol = to_mol(smile)
if mol is None:
self._logger.log(
f"The smile {smile} could not be transformed into a molecule and will be skipped.",
_LE.WARNING,
)
return mol
def _embed_with_RDKit(self, smile: str, parameters: dict) -> Chem.Mol:
molecule = self._smile_to_molecule(smile)
# deactivate logger to suppress "missing Hs messages"
RDLogger.DisableLog("rdApp.*")
try:
embed_code = AllChem.EmbedMolecule(
molecule, randomSeed=42, useRandomCoords=True
)
except:
self._logger.log(f"Could not embed molecule with SMILES \"{smile}\", critical error in \"RDkit\".",
_LE.WARNING)
return None
status = 0
if embed_code != -1:
status = AllChem.UFFOptimizeMolecule(molecule, maxIters=600)
if status == 1:
self._logger.log(
f"The 3D coordinate generation of molecule {smile} did not converge in time.",
_LE.WARNING,
)
else:
self._logger.log(
f"Could not embed molecule {smile} - no 3D coordinates have been generated.",
_LE.WARNING,
)
RDLogger.EnableLog("rdApp.*")
# add hydrogens to the molecule (if specified)
if nested_get(parameters, [_SEE.RDKIT_PROTONATE], default=True):
molecule = Chem.AddHs(molecule, addCoords=True)
if embed_code != -1 and status == 0:
return molecule
else:
return None
def _get_embedding_method(self, parameters: dict) -> str:
method = nested_get(parameters, [_SEE.METHOD], default=None)
if method is None:
error = "Embedding method not set."
self._logger.log(error, _LE.ERROR)
raise StepFailed(error)
return method.upper()
def _embed_molecule(self, smile: str, parameters: dict) -> Chem.Mol:
method = self._get_embedding_method(parameters)
if method == _SEE.METHOD_RDKIT:
return self._embed_with_RDKit(smile, parameters)
else:
self._logger.log(
f"Specified embedding method {method} not available.", _LE.ERROR
)
def execute(self):
# TODO: REFACTOR
parameters = deepcopy(self.settings.arguments.parameters)
embed_as = self.settings.additional[_SEE.EMBED_AS]
for compound in self.get_compounds():
if embed_as == _SEE.EMBED_AS_ENUMERATIONS:
enum_buffer = deepcopy(compound.get_enumerations())
compound.clear_enumerations()
for enumeration in enum_buffer:
enumeration.clear_molecule()
enumeration.clear_conformers()
molecule = self._embed_molecule(
smile=enumeration.get_smile(), parameters=parameters
)
if molecule is not None:
enumeration.set_molecule(molecule)
compound.add_enumeration(enumeration)
self._logger.log(
f"Embedding for compound {compound.get_index_string()} (name: {compound.get_name()}) completed ({len(compound)} of {len(enum_buffer)} enumerations successful).",
_LE.INFO,
)
elif embed_as == _SEE.EMBED_AS_CONFORMERS:
# TODO: double-check this bit
for enumeration in compound.get_enumerations():
enumeration.clear_conformers()
molecule = self._embed_molecule(
smile=enumeration.get_smile(), parameters=parameters
)
if molecule is not None:
conformer = Conformer(
conformer=molecule, enumeration_object=enumeration
)
enumeration.add_conformer(conformer, auto_update=True)
number_successful = len(
[
True
for enum in compound.get_enumerations()
if enum[0].get_molecule() is not None
]
)
self._logger.log(
f"Embedding for compound {compound.get_index_string()} (name: {compound.get_name()}) completed ({number_successful} of {len(compound)} enumerations successful).",
_LE.INFO,
)
else:
ValueError(
f'Value "{embed_as}" for parameter "embed_as" not supported.'
)
```
#### File: workflow_steps/pmx/assemble_systems.py
```python
from typing import Dict, List
from icolos.core.workflow_steps.pmx.base import StepPMXBase
from pydantic import BaseModel
import os
from icolos.utils.enums.program_parameters import PMXAtomMappingEnum, PMXEnum
from icolos.utils.execute_external.pmx import PMXExecutor
from icolos.utils.general.parallelization import SubtaskContainer
_PE = PMXEnum()
_PAE = PMXAtomMappingEnum()
class StepPMXAssembleSystems(StepPMXBase, BaseModel):
"""
Executes the assemble_systems.py script, edges are parallelized over available cores
"""
def __init__(self, **data):
super().__init__(**data)
self._initialize_backend(executor=PMXExecutor)
def execute(self):
assert self.work_dir is not None and os.path.isdir(self.work_dir)
# get edges from the perturbation map attached to the step
edges = self.get_edges()
# enforce one edge per task list (results in multiple batches for large maps)
self.execution.parallelization.max_length_sublists = 1
self._subtask_container = SubtaskContainer(
max_tries=self.execution.failure_policy.n_tries
)
self._subtask_container.load_data(edges)
self._execute_pmx_step_parallel(
run_func=self._execute_command, step_id="pmx assemble_systems"
)
def _execute_command(self, jobs: List):
args = {
"-edges": '"' + " ".join([e.get_edge_id() for e in jobs]) + '"',
"-ligand_path": os.path.join(self.work_dir, _PAE.LIGAND_DIR),
"-workPath": self.work_dir,
}
self._backend_executor.execute(
command=_PE.ASSEMBLE_SYSTEMS,
arguments=self.get_arguments(defaults=args),
check=True,
location=self.work_dir,
)
```
#### File: workflow_steps/pmx/mutate.py
```python
from icolos.core.workflow_steps.pmx.base import StepPMXBase
from pydantic import BaseModel
class StepPMXmutate(StepPMXBase, BaseModel):
"""Mutate protein or DNA/RNA."""
def __init__(self, **data):
super().__init__(**data)
def execute(self):
pass
help_string = """
pmx mutate -h
usage: pmx [-h] [-f infile] [-fB infileB] [-o outfile] [-ff ff]
[--script script] [--keep_resid | --ref ] [--resinfo]
This script applies mutations of residues in a structure file for subsequent
free energy calculations. It supports mutations to protein, DNA, and RNA
molecules.
The mutation information and dummy placements are taken from the hybrid residue
database "mutres.mtp". The best way to use this script is to take a pdb/gro file
that has been written with pdb2gmx with all hydrogen atoms present.
By default, all residues are renumbered starting from 1, so to have unique
residue IDs. If you want to keep the original residue IDs, you can use the flag
--keep_resid. In this case, you will also need to provide chain information
in order to be able to mutate the desired residue. Alternatively, if you would
like to use the original residue IDs but these have been changed, e.g. by gromacs,
you can provide a reference PDB file (with chain information too) using the --ref
flag. The input structure will be mutated according to the IDs chosen for the
reference structure after having mapped the two residue indices.
The program can either be executed interactively or via script. The script file
simply has to consist of "residue_id target_residue_name" pairs (just with some
space between the id and the name), or "chain_id residue_id target_residue_name"
if you are keeping the original residue IDs or providing a reference structure.
The script uses an extended one-letter code for amino acids to account for
different protonation states. Use the --resinfo flag to print the dictionary.
optional arguments:
-h, --help show this help message and exit
-f infile Input structure file in PDB or GRO format. Default is "protein.pdb"
-fB infileB Input structure file of the B state in PDB or GRO format (optional).
-o outfile Output structure file in PDB or GRO format. Default is "mutant.pdb"
-ff ff Force field to use. If none is provided,
a list of available ff will be shown.
--script script Text file with list of mutations (optional).
--keep_resid Whether to renumber all residues or to keep the
original residue IDs. By default, all residues are
renumbered so to have unique IDs. With this flags set,
the original IDs are kept. Because the IDs might not
be unique anymore, you will also be asked to choose
the chain ID where the residue you want to mutate is.
--ref Provide a reference PDB structure from which to map
the chain and residue IDs onto the file to be mutated (-f).
This can be useful when wanting to mutate a file that
has had its residues renumbered or the chain information
removed (e.g. after gmx grompp). As in the --keep_resid
option, if --ref is chosen, you will need to provide chain
information either interactively or via the --script flag.
--resinfo Show the list of 3-letter -> 1-letter residues
"""
```
#### File: workflow_steps/prediction/active_learning.py
```python
from typing import List, Tuple
import os
import random
import pickle
from modAL.acquisition import max_EI, max_UCB
from modAL.uncertainty import uncertainty_sampling
from modAL.models.learners import BayesianOptimizer, ActiveLearner
from pydantic.main import BaseModel
from sklearn.gaussian_process.kernels import DotProduct
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
from sklearn.exceptions import NotFittedError
import matplotlib.pyplot as plt
from icolos.core.containers.compound import Compound, Enumeration
from icolos.core.workflow_steps.step import StepBase
from icolos.core.workflow_steps.step import _LE
from icolos.utils.enums.step_enums import (
StepBaseEnum,
StepGlideEnum,
StepActiveLearningEnum,
)
from rdkit.Chem.AllChem import GetMorganFingerprintAsBitVect
from rdkit.Chem import PandasTools, Mol
import pandas as pd
from pandas.core.frame import DataFrame
import numpy as np
from sklearn.metrics import mean_squared_error, confusion_matrix
from icolos.utils.enums.step_initialization_enum import StepInitializationEnum
from icolos.utils.general.convenience_functions import nested_get
_SGE = StepGlideEnum()
_SALE = StepActiveLearningEnum()
_IE = StepInitializationEnum()
class StepActiveLearning(StepBase, BaseModel):
"""
Class to run an active learning framework
Primarily designed for building QSAR models using a physics based method (embedding + docking) as an oracle
Takes the step conf for the oracle as an additional argument. The step with these settings is run with the queried compounds at each stage of the active learning loop
"""
_pca: PCA = PCA()
def __init__(self, **data):
super().__init__(**data)
def _initialize_oracle(self, compound_list: List[pd.Series]) -> List[StepBase]:
# list of step configs
base_oracle_config = self.settings.additional["oracle_config"]
oracle_steps = []
for step in base_oracle_config:
oracle_steps.append(self._initialize_oracle_step_from_dict(step))
# manually attach the compound objects to the oracle's lead step
# subsequent steps should take their input from the the first step.
for idx, compound in enumerate(compound_list):
cmp = Compound(compound_number=idx)
cmp.add_enumeration(
Enumeration(
compound_object=cmp,
smile=compound[_SALE.SMILES],
molecule=compound[_SALE.MOLECULE],
)
)
oracle_steps[0].data.compounds.append(cmp)
return oracle_steps
def query_oracle(self, compound_list: List[Mol]) -> List:
"""
Interface function with the oracle method, in the most likely case this is ligprep + docking
Takes the requested compounds and runs them through the oracle workflow, returns the final compounds with annotations
Notes:
This could be an arbitrarily complex workflow, but the only thing that's going to change is the compounds.
"""
# initialize the basic oracle, load the query compounds for evaluation
oracle_steps = self._initialize_oracle(compound_list)
# we have a fully initialized step with the compounds loaded. Execute them
for idx, step in enumerate(oracle_steps):
# for subsequent steps we will need to read in from the previous one
if idx != 0:
step.generate_input()
step.execute()
step.process_write_out()
# retrieve compounds from the final step
final_compounds = oracle_steps[-1].data.compounds
return final_compounds
# def _reverse_sigmoid(self, x: float) -> float:
# """
# Scales compounds in range [-14,0] to be [1,0]
# """
# return 1.0 / (1 + np.e ** (0.45 * x + 4))
def _extract_final_scores(
self, compounds: List[Compound], criteria: str, highest_is_best: bool = False
) -> List[float]:
"""
Takes a list of compound objects from the oracle and extracts the best score based on the provided criteria
"""
top_scores = []
for comp in compounds:
scores = []
for enum in comp.get_enumerations():
for conf in enum.get_conformers():
scores.append(float(conf._conformer.GetProp(criteria)))
# if docking generated no conformers
if not scores:
scores.append(0.0)
best_score = max(scores) if highest_is_best else min(scores)
top_scores.append(best_score)
return list(np.absolute(top_scores))
def _generate_library(self) -> DataFrame:
"""
Loads the library file from disk
This should be a .sdf file with the pre-embedded compounds from a library enumeration or such
"""
lib_path = self.settings.additional[_SALE.VIRTUAL_LIB]
assert lib_path.endswith(".sdf")
# hold the lib in a pandas df
library = PandasTools.LoadSDF(
lib_path,
smilesName=_SALE.SMILES,
molColName=_SALE.MOLECULE,
includeFingerprints=True,
removeHs=False,
embedProps=True,
)
# need the morgan fingerprints in the df
library[_SALE.MORGAN_FP] = library.apply(
lambda x: np.array(
GetMorganFingerprintAsBitVect(x[_SALE.MOLECULE], 2, nBits=2048)
),
axis=1,
)
return library
# def _prepare_initial_data(
# self, lib: pd.DataFrame
# ) -> Tuple[np.ndarray, List[float]]:
# initial_compound_idx = random.sample(
# range(len(lib)), int(self.settings.additional[_SALE.INIT_SAMPLES])
# )
# data_rows = [lib.iloc[idx] for idx in initial_compound_idx]
# compounds = np.array([row[_SALE.MORGAN_FP] for row in data_rows])
# # return annotated compound list
# self._logger.log("Computing initial datapoints", _LE.INFO)
# annotated_compounds = self.query_oracle(data_rows)
# activities = self._extract_final_scores(
# annotated_compounds, criteria=_SGE.GLIDE_DOCKING_SCORE
# )
# self._logger.log(f"initial data points {activities}", _LE.DEBUG)
# return compounds, activities
def _prepare_validation_data(self) -> Tuple[list[float], List[float], pd.DataFrame]:
"""
parses sdf file with results to dataframe, extract fingerprints + results
"""
val_lib = PandasTools.LoadSDF(
self.settings.additional[_SALE.VALIDATION_LIB],
smilesName=_SALE.SMILES,
molColName=_SALE.MOLECULE,
includeFingerprints=True,
removeHs=False,
embedProps=True,
)
val_lib[_SALE.MORGAN_FP] = val_lib.apply(
lambda x: np.array(
GetMorganFingerprintAsBitVect(x[_SALE.MOLECULE], 2, nBits=2048)
),
axis=1,
)
scores = list(
np.absolute(
pd.to_numeric(
val_lib[self.settings.additional[_SALE.CRITERIA]].fillna(0)
)
)
)
scores = [float(x) for x in scores]
return list(val_lib[_SALE.MORGAN_FP]), scores, val_lib
def greedy_acquisition(
self,
estimator: RandomForestRegressor,
X: np.ndarray,
n_instances: int,
highest_is_best: bool = False,
) -> np.ndarray:
"""
Implement greedy acquisition strategy, return the n_samples best scores
"""
try:
predictions = estimator.predict(X)
except NotFittedError:
# if not initialized, generate random docking scores
predictions = np.random.uniform(-14, 0, len(X))
sorted_preds = np.argpartition(predictions, -n_instances)[-n_instances:]
return sorted_preds
def execute(self):
tmp_dir = self._make_tmpdir()
lib = self._generate_library()
(
val_compounds,
val_scores,
val_lib,
) = self._prepare_validation_data()
# fit tsne embedding
self._pca.fit(val_compounds)
running_mode = self.settings.additional["running_mode"]
if running_mode == "bayes_opt":
learner = BayesianOptimizer(
estimator=GaussianProcessRegressor(
kernel=DotProduct(), normalize_y=True
),
query_strategy=self.greedy_acquisition,
)
elif running_mode == "active_learning":
learner = ActiveLearner(
estimator=RandomForestRegressor(n_estimators=1000),
query_strategy=self.greedy_acquisition,
)
else:
raise KeyError(f"running mode: {running_mode} not supported")
# generate baseline performance
try:
val_predictions = learner.predict(val_compounds)
mse = mean_squared_error(val_scores, val_predictions)
self._logger.log(f"Baseline val set rmsd: {np.sqrt(mse)}", _LE.INFO)
except:
pass
if (
"debug" in self.settings.additional.keys()
and self.settings.additional["debug"] == True
):
self._logger.log("Starting debug run...", _LE.DEBUG)
self._run_learning_loop(
learner=learner,
lib=val_lib,
val_compounds=val_compounds,
val_scores=val_scores,
debug=True,
tmp_dir=tmp_dir,
)
else:
self._run_learning_loop(
learner=learner,
lib=lib,
val_compounds=val_compounds,
val_scores=val_scores,
debug=False,
tmp_dir=tmp_dir,
)
# pickle the final model
with open(os.path.join(tmp_dir, "model.pkl"), "wb") as f:
pickle.dump(learner, f)
self._parse_output(tmp_dir)
def _run_learning_loop(
self,
learner,
lib,
val_compounds,
val_scores,
debug: bool = False,
tmp_dir=None,
):
rounds = self.settings.additional[_SALE.N_ROUNDS]
n_instances = self.settings.additional[_SALE.BATCH_SIZE]
fig, axs = plt.subplots(nrows=5, ncols=5, figsize=(40, 40), squeeze=True)
axs = axs.ravel()
for idx in range(rounds):
query_idx, _ = learner.query(
list(lib[_SALE.MORGAN_FP]),
n_instances=n_instances,
)
query_compounds = [lib.iloc[int(idx)] for idx in query_idx]
if not debug:
self._logger.log(
f"Querying oracle with {len(query_compounds)} compounds", _LE.INFO
)
compounds = self.query_oracle(query_compounds)
scores = self._extract_final_scores(
compounds, self.settings.additional[_SALE.CRITERIA]
)
else:
# retrieve precalculated scores
scores = [
float(lib.iloc[int(idx)][self.settings.additional[_SALE.CRITERIA]])
for idx in query_idx
]
scores = list(np.absolute(scores))
self._logger.log(f"Debug scores: {scores}", _LE.DEBUG)
learner.teach(
np.array([compound[_SALE.MORGAN_FP] for compound in query_compounds]),
scores,
)
# get the predictions
val_predictions = learner.predict(val_compounds)
mse = mean_squared_error(val_scores, val_predictions)
self._logger.log(
f"Round {idx+1} Validation set rmsd: {np.sqrt(mse)}", _LE.INFO
)
self._logger.log(f"Predictions: \n{val_predictions[:5]}", _LE.INFO)
self._logger.log(f"Actual: \n{val_scores[:5]}", _LE.INFO)
# produce tsne embedding
emb = self._pca.transform(learner.X_training)
axs[idx].scatter(emb[0], emb[1])
if tmp_dir is not None:
fig.savefig(os.path.join(tmp_dir, "embeddings.png"))
def _initialize_oracle_step_from_dict(self, step_conf: dict) -> StepBase:
# note this is a bit of a hack to get around a circular import, we can't use the main util
_STE = StepBaseEnum
step_type = nested_get(step_conf, _STE.STEP_TYPE, default=None)
step_type = None if step_type is None else step_type.upper()
if step_type in _IE.STEP_INIT_DICT.keys():
return _IE.STEP_INIT_DICT[step_type](**step_conf)
else:
raise ValueError(
f"Backend for step {nested_get(step_conf, _STE.STEPID, '')} unknown."
)
```
#### File: workflow_steps/schrodinger/fep_base.py
```python
from pydantic import BaseModel
from icolos.core.workflow_steps.schrodinger.base import StepSchrodingerBase
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import shortest_path
from icolos.utils.enums.step_enums import StepFepPlusEnum
from typing import List
import time
import os
from icolos.core.workflow_steps.step import _LE
_SFE = StepFepPlusEnum()
class StepFEPBase(StepSchrodingerBase, BaseModel):
"""
Base class containing common functionality for Schrodinger FEP+ workflows
"""
def __init__(self, **data):
super().__init__(**data)
def _parse_output(self, tmp_dir):
# pick up the final annotated map construction
self.data.generic.clear_file_dict()
self._logger.log(f"Reading output map.", _LE.INFO)
data = None
counts = 0
# hold whilst the job data gets written to local fs
while data is None and counts < 50000:
try:
path = [
file
for file in os.listdir(tmp_dir)
if file.endswith(_SFE.FMP_OUTPUT_FILE)
]
assert len(path) == 1
path = path[0]
with open(os.path.join(tmp_dir, path), "rb") as f:
data = f.read()
except AssertionError:
self._logger.log(
"Output file has not yet appeared in the file system, sleeping and retrying...",
_LE.INFO,
)
time.sleep(15)
counts += 1
self._add_data_to_generic(path, data)
def _extract_log_file_data(self, tmp_dir):
"""
Parses FEP log file to extract edge and node properties
"""
lines = None
counts = 0
# wait whilst job sits in the queue
while lines is None and counts < 50000:
try:
log_file = [
file for file in os.listdir(tmp_dir) if file.endswith(_SFE.LOGFILE)
]
assert len(log_file) == 1
log_file = log_file[0]
with open(os.path.join(tmp_dir, log_file), "r") as f:
lines = f.readlines()
edge_header_index = [
idx for idx, s in enumerate(lines) if _SFE.EDGE_HEADER_LINE in s
][-1]
node_header_index = [
idx for idx, s in enumerate(lines) if _SFE.NODE_HEADER_LINE in s
][-1]
end_of_data_index = [
idx for idx, s in enumerate(lines) if _SFE.DATA_TERMINUS in s
][0]
edge_data_lines = [
line
for line in lines[edge_header_index + 3 : node_header_index - 1]
]
node_data_lines = [
line
for line in lines[node_header_index + 3 : end_of_data_index - 1]
]
self._process_edge_lines(edge_data_lines)
self._process_node_lines(node_data_lines)
except AssertionError:
self._logger.log(
"Log file has not yet appeared in the file system, sleeping and retrying...",
_LE.INFO,
)
time.sleep(15)
counts += 1
def _process_node_lines(self, data: List[str]) -> None:
for entry in data:
fields = entry.split()
idx = fields[1]
dG = fields[2]
# attach dG tags to compound objects if present
if self.data.compounds:
# account for running this step compoundless
self.data.compounds[int(idx[0])].get_enumerations()[0].get_conformers()[
0
].get_molecule().SetProp("dG", str(dG))
self._logger.log(
f"dG directly from the output file for compound {idx} is {dG} ",
_LE.INFO,
)
def _process_edge_lines(self, edge_data: List[str]) -> None:
"""
Calibrate dG values using a reference compound and edge ddG from log file output, return dG for each compound
"""
# caluclate the max ligand index, accounting for ligands that may have been skipped in previous steps, so can't rely on self.get_compounds()
len_nodes = 0
for line in edge_data:
parts = line.split()
lig_from = int(parts[1].split(":")[0])
lig_to = int(parts[3].split(":")[0])
for idx in [lig_from, lig_to]:
if idx > len_nodes:
len_nodes = idx
len_nodes += 1 # account for zero indexed ligands
error_matrix = np.zeros((len_nodes, len_nodes))
ddG_matrix = np.zeros((len_nodes, len_nodes))
for line in edge_data:
parts = line.split()
try:
# parse the compound info from the log file
lig_from = int(parts[1].split(":")[0])
lig_to = int(parts[3].split(":")[0])
ddG = float(parts[4].split("+-")[0])
err = float(parts[4].split("+-")[1])
except ValueError:
self._logger.log(
f"Line: {line} from the logfile contained an unexpected datatype - cannot process this edge - skipping",
_LE.WARNING,
)
continue
error_matrix[lig_from, lig_to] = err
error_matrix[lig_to, lig_from] = err
ddG_matrix[lig_from, lig_to] = ddG
ddG_matrix[lig_to, lig_from] = -ddG
error_matrix = csr_matrix(error_matrix)
# compute shortest path from one ligand to the anchor
_, predecessors = shortest_path(
error_matrix, directed=False, return_predecessors=True, indices=0
)
self._construct_dg_per_compound(ddG_matrix, predecessors, error_matrix)
def _construct_dg_per_compound(
self, ddG: np.ndarray, predecessors: List, error_matrix: np.ndarray
) -> None:
"""
Calculate the calibrated binding free energy per compound using a reference value
Attach calcualted dG to compounds
"""
try:
ref_dG = self.settings.additional[_SFE.REFERENCE_DG]
except KeyError:
self._logger.log(
"Expected to find a reference dG value for the lead compound, but none was found."
"Defaulting to 0.00, you will need to apply a manual correction afterwards",
_LE.WARNING,
)
ref_dG = 0.00
def _calculate_dg(comp_num: int, dG=ref_dG, err=0):
prev_index = predecessors[comp_num]
dG += ddG[prev_index, comp_num]
err += error_matrix[prev_index, comp_num]
if prev_index != 0:
_calculate_dg(prev_index, dG=dG, err=err)
else:
data = str(round(dG, 2)) + "+-" + str(round(err, 2))
self.data.compounds[idx].get_enumerations()[0].get_conformers()[
0
].get_molecule().SetProp("map_dG", data)
self._logger.log(
f"Calculated dG from spanning tree for compound {idx} is {data}",
_LE.INFO,
)
for comp in self.get_compounds():
idx = comp.get_compound_number()
# check whether the compound appeared in the final map
try:
if idx == 0:
comp.get_enumerations()[0].get_conformers()[
0
].get_molecule().SetProp(
"map_dG", str(self.settings.additional[_SFE.REFERENCE_DG])
)
if idx != 0: # skip the reference compound
_calculate_dg(idx)
except IndexError:
self._logger.log(
f"Compound {idx} was not found in the output map, it was likely dropped during the workflow",
_LE.WARNING,
)
continue
```
#### File: workflow_steps/schrodinger/fep_plus_setup.py
```python
from typing import List
from icolos.core.containers.generic import GenericData
from icolos.core.step_utils.structconvert import StructConvert
from icolos.core.workflow_steps.schrodinger.base import StepSchrodingerBase
from icolos.utils.enums.program_parameters import (
FepPlusEnum,
SchrodingerExecutablesEnum,
)
from icolos.utils.enums.step_enums import StepBaseEnum, StepFepPlusEnum, StepGlideEnum
from icolos.utils.execute_external.fep_plus import FepPlusExecutor
from rdkit.Chem import SDMolSupplier
from icolos.utils.execute_external.schrodinger import SchrodingerExecutor
from icolos.core.workflow_steps.step import _LE
import os
from pydantic import BaseModel
from rdkit.Chem import SDWriter
_SFE = StepFepPlusEnum()
_FE = FepPlusEnum()
_SEE = SchrodingerExecutablesEnum()
_SBE = StepBaseEnum
_SGE = StepGlideEnum()
class StepFepPlusSetup(StepSchrodingerBase, BaseModel):
"""
Construct and analyse perturbation map for set of congeneric ligands
Supports extracting structures from poseviewer or pdb files
"""
_schrodinger_executor: SchrodingerExecutor = None
_converter: StructConvert = None
def __init__(self, **data):
super().__init__(**data)
self._initialize_backend(executor=FepPlusExecutor)
self._check_backend_availability()
self._schrodinger_executor = SchrodingerExecutor(
prefix_execution=self.execution.prefix_execution,
binary_location=self.execution.binary_location,
)
self._converter = StructConvert(
prefix_execution=self.execution.prefix_execution,
binary_location=self.execution.binary_location,
)
def _extract_receptor_from_pv(self, tmp_dir, input_file: str = _SFE.RECEPTOR_MAEGZ):
# run split_structure.py to obtain the receptor_structure
self._logger.log(f"Extracting receptor from structure.", _LE.INFO)
self._schrodinger_executor.execute(
command=_SEE.STRUCT_SPLIT,
arguments=[
"-m",
"pdb",
"-many_files",
os.path.join(tmp_dir, input_file),
f"{_SFE.STRUCT_SPLIT_BASE}.mae",
],
check=True,
location=tmp_dir,
)
# get rid of the original receptor structure now we have the new one
os.remove(os.path.join(tmp_dir, _SFE.RECEPTOR_MAEGZ))
def _write_receptor_from_pv(self, tmp_dir):
# Handles writing the receptor structure to tmpdir, either from a poseviewer file, or a provided receptor
# take the first poseviewer file it can find and split the stricure, take only the receptor
for compound in self.get_compounds():
for enumeration in compound.get_enumerations():
for conformer in enumeration.get_conformers():
if (
_SGE.GLIDE_POSEVIEWER_FILE_KEY
in conformer.get_extra_data().keys()
):
with open(
os.path.join(tmp_dir, _SFE.RECEPTOR_MAEGZ), "wb"
) as f:
f.write(
conformer.get_extra_data()[
_SGE.GLIDE_POSEVIEWER_FILE_KEY
]
)
break
if _SFE.RECEPTOR_MAEGZ in os.listdir(tmp_dir):
self._logger.log(
f"Writing poseviewer file to temporary directory.", _LE.INFO
)
self._extract_receptor_from_pv(tmp_dir)
elif self.data.generic.get_files_by_extension("pdb"):
# a pdb file was loaded to generic data, use this as the receptor structure
self.data.generic.get_argument_by_extension(
"pdb", rtn_file_object=True
).write(os.path.join(tmp_dir, "receptor.pdb"), join=False)
self._logger.log(
"Converting provided pdb receptor structure to mae", _LE.DEBUG
)
self._converter.convert(
os.path.join(tmp_dir, "receptor.pdb"),
os.path.join(tmp_dir, f"{_SFE.STRUCT_SPLIT_BASE}_receptor1.mae"),
)
os.remove(os.path.join(tmp_dir, "receptor.pdb"))
else:
self._logger.log(
"No poseviewer file was found attached to any of the conformers, and no PDB receptor file was specified - this must be set in the docking step",
_LE.ERROR,
)
raise FileNotFoundError
def _check_xray_structure(self, compound_number):
# check to see if an xray structure has been provided for that compound
if _SFE.XRAY_STRUCTURES in self.settings.additional.keys():
if isinstance(self.settings.additional[_SFE.XRAY_STRUCTURES], dict):
if (
compound_number
in self.settings.additional[_SFE.XRAY_STRUCTURES].keys()
):
return True, _FE.DICT
elif os.path.isdir(self.settings.additional[_SFE.XRAY_STRUCTURES]):
if os.path.isfile(
os.path.join(
self.settings.additional[_SFE.XRAY_STRUCTURES],
f"{compound_number}.pdb",
)
):
return True, _FE.PATH
return False, None
def _rename_sdf(self, path, comp_num):
with open(path, "r") as f:
lines = f.readlines()[1:]
new_lines = [f"{comp_num}:0:0\n"]
for line in lines:
new_lines.append(line)
self._remove_temporary(path)
with open(path, "w") as f:
f.writelines(new_lines)
def _extract_ligand_from_pdb(self, tmp_dir: str, comp_num: int, type: str):
# if ligand poses have been provided from xray structures, extract just the ligand
self._logger.log(
f"Extracting ligand from provided Xray structure for compound {comp_num}",
_LE.DEBUG,
)
if type == _FE.DICT:
file_path = self.settings.additional[_SFE.XRAY_STRUCTURES[comp_num]]
else:
file_path = os.path.join(
self.settings.additional[_SFE.XRAY_STRUCTURES], f"{comp_num}.pdb"
)
if not os.path.isfile(file_path):
raise FileNotFoundError(
"The provided path to the xray structure does not exist or is not accessible"
)
self._schrodinger_executor.execute(
command=_SEE.STRUCT_SPLIT,
arguments=["-m", "pdb", "-many_files", file_path, f"{_SFE.XRAY_SPLIT}.sdf"],
check=True,
location=tmp_dir,
)
# remove everything apart from the ligand sdf which is concatenated later
lig_found = False
for file in os.listdir(tmp_dir):
idx = file.split("/")[-1]
if idx.startswith(_SFE.XRAY_SPLIT):
if "ligand" in idx:
# need to modify the name from the standard that Schrodinger provides
self._rename_sdf(os.path.join(tmp_dir, file), comp_num)
mols = SDMolSupplier(os.path.join(tmp_dir, file))
data = mols[0]
lig_found = True
self._remove_temporary(os.path.join(tmp_dir, file))
else:
self._remove_temporary(os.path.join(tmp_dir, file))
if lig_found:
return data
def _write_input_files(self, tmp_dir):
# write receptor structure to tmpdir, either from poseviewer or provided pdb file
self._write_receptor_from_pv(tmp_dir)
# write out all conformers present in self.data.compounds to a single sdf file.
writer = SDWriter(os.path.join(tmp_dir, "concatenated.sdf"))
for compound in self.get_compounds():
# If an xray pose is provided, use this
flag, type = self._check_xray_structure(compound.get_compound_number())
if flag is True:
self._logger.log(
"Found Xray structure for the ligand - using this in preference to a docking pose",
_LE.DEBUG,
)
mol = self._extract_ligand_from_pdb(
tmp_dir, compound.get_compound_number(), type
)
writer.write(mol)
else:
# use the docked conformer
for enumeration in compound.get_enumerations():
for conformer in enumeration.get_conformers():
mol = conformer.get_molecule()
writer.write(mol)
def _parse_arguments(self, io_dict: dict) -> List[str]:
arguments = []
for key in self.settings.arguments.parameters.keys():
arguments.append(key)
arguments.append(str(self.settings.arguments.parameters[key]))
for flag in self.settings.arguments.flags:
arguments.append(str(flag))
for key, value in io_dict.items():
arguments.append(key)
arguments.append(value)
return arguments
def _get_structcat_args(
self, tmp_dir: str, out_file_type: str, outfile: str
) -> List[str]:
arguments = [
f"{_SEE.STRUCTCAT_I}mae",
os.path.join(tmp_dir, f"{_SFE.STRUCT_SPLIT_BASE}_receptor1.mae"),
f"{_SEE.STRUCTCAT_I}sd",
]
for file in os.listdir(tmp_dir):
if file.endswith("sdf"):
arguments.append(os.path.join(tmp_dir, file))
arguments.append(f"{_SEE.STRUCTCAT_O}{out_file_type}")
arguments.append(os.path.join(tmp_dir, outfile))
return arguments
def _concatenate_pv_files(self, tmp_dir: str):
# create a poseviewer-formatted file with receptor structure, then docked ligand poses
arguments = self._get_structcat_args(
tmp_dir=tmp_dir, out_file_type="mae", outfile=_SFE.STRUCTCAT_MAEGZ_OUTFILE
)
self._schrodinger_executor.execute(
command=_SEE.STRUCTCAT, arguments=arguments, check=True
)
def _analyse_map(self, tmp_dir):
"""run fmp_stats program to analyse map - generate node similarities etc"""
result = self._schrodinger_executor.execute(
command=_SEE.FMP_STATS,
arguments=["out.fmp", "-f"],
check=True,
location=tmp_dir,
)
log_lines = []
for line in str(result.stdout).split("\n"):
self._logger_blank.log(line, _LE.INFO)
log_lines.append(line + "\n")
self.data.generic.add_file(
GenericData(file_name="fep_mapper.log", file_data=log_lines)
)
def _parse_output(self, tmp_dir: str):
# needs to retrieve the edge and fmp files produced by the mapper step and attach to the generic dict
files = [
os.path.join(tmp_dir, f)
for f in os.listdir(tmp_dir)
if f.endswith(("fmp", "edge", "log"))
]
for file in files:
try:
with open(file, "r") as f:
data = f.read()
except UnicodeDecodeError:
with open(file, "rb") as f:
data = f.read()
self._add_data_to_generic(file, data)
def execute(self):
# run the job in a temporary directory
tmp_dir = self._make_tmpdir()
self._write_input_files(tmp_dir)
self._concatenate_pv_files(tmp_dir)
io_dict = {
"": os.path.join(tmp_dir, _SFE.STRUCTCAT_MAEGZ_OUTFILE),
"-o": _SFE.FEP_MAPPER_OUTPUT,
}
arguments = self._parse_arguments(io_dict=io_dict)
self._apply_token_guard() # need to implement for reliability
self._logger.log("Optimising perturbation map", _LE.DEBUG)
self._backend_executor.execute(
command=_FE.FEP_MAPPER, arguments=arguments, check=True, location=tmp_dir
)
assert os.path.isfile(os.path.join(tmp_dir, "out.fmp"))
self._logger.log(
f"Successfully executed fep_mapper in directory {tmp_dir}.", _LE.DEBUG
)
self._logger.log("Analysing the perturbation map.", _LE.DEBUG)
self._analyse_map(tmp_dir)
self._parse_output(tmp_dir)
self._remove_temporary(tmp_dir)
```
#### File: workflow_steps/structure_prediction/pdb_fixer.py
```python
from icolos.utils.enums.step_enums import StepPdbFixerEnum
from icolos.core.workflow_steps.step import StepBase
from icolos.utils.enums.program_parameters import PdbFixerEnum
from icolos.utils.execute_external.execute import Executor
from pydantic import BaseModel
from pdbfixer.pdbfixer import PDBFixer
import os
_SFE = StepPdbFixerEnum()
_FE = PdbFixerEnum()
class StepPdbFixer(StepBase, BaseModel):
def __init__(self, **data):
super().__init__(**data)
self._initialize_backend(executor=Executor)
def _parse_arguments(self):
default_flags = [
"--replace-nonstandard",
"--add-residues",
]
default_params = {
"--ph": "7.0",
"--add-atoms": "all",
"--keep-heterogens": "all",
}
arguments = []
for arg in self.settings.arguments.flags:
arguments.append(arg)
for key, value in self.settings.arguments.parameters.items():
formatted_arg = f"{key}={value}"
arguments.append(formatted_arg)
for key in default_flags:
if key not in self.settings.arguments.flags:
arguments.append(key)
for key, value in default_params.items():
if key not in self.settings.arguments.parameters.keys():
formatted_arg = f"{key}={value}"
arguments.append(formatted_arg)
return arguments
def execute(self):
tmp_dir = self._make_tmpdir()
self.data.generic.write_out_all_files(tmp_dir)
pdb_files = self.data.generic.get_file_names_by_extension("pdb")
arguments = self._parse_arguments()
for file in pdb_files:
path = os.path.join(tmp_dir, file)
arguments.extend(["--output", path])
arguments = [path] + arguments
self._backend_executor.execute(
command=_FE.FIXER, arguments=arguments, location=tmp_dir, check=True
)
#
self._parse_output(tmp_dir)
self._remove_temporary(tmp_dir)
```
#### File: utils/enums/write_out_enums.py
```python
class WriteOutEnum:
RDKIT_NAME = "_Name"
INDEX_STRING = "index_string"
COMPOUND_NAME = "compound_name"
# REINVENT-compatible JSON write-out
JSON_RESULTS = "results"
JSON_NAMES = "names"
JSON_NA = ""
JSON_VALUES = "values"
JSON_VALUES_KEY = "values_key"
SDF = "sdf"
PDB = "pdb"
# try to find the internal value and return
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
# prohibit any attempt to set any values
def __setattr__(self, key, value):
raise ValueError("No changes allowed.")
class RunVariablesEnum:
PREFIX = "["
POSTFIX = "]"
COMPOUND_ID = "compound_id"
ENUMERATION_ID = "enumeration_id"
CONFORMER_ID = "conformer_id"
COMPOUND_NAME = "compound_name"
ENUMERATION_STRING = "enumeration_string"
CONFORMER_STRING = "conformer_string"
# try to find the internal value and return
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
# prohibit any attempt to set any values
def __setattr__(self, key, value):
raise ValueError("No changes allowed.")
```
#### File: utils/execute_external/cresset_executor.py
```python
from icolos.utils.execute_external.execute import ExecutorBase
class CressetExecutor(ExecutorBase):
"""For the execution of Cresset binaries binary."""
def __init__(self, prefix_execution=None, binary_location=None):
super().__init__(
prefix_execution=prefix_execution, binary_location=binary_location
)
def execute(
self, command: str, arguments: list, check=True, location=None, pipe_input=None
):
# check, whether a proper executable is provided
# if command not in [EE.OMEGA]:
# raise ValueError(
# "Parameter command must be an dictionary of the internal Omega executable list."
# )
return super().execute(
command=command,
arguments=arguments,
check=check,
location=None,
pipe_input=pipe_input,
)
def is_available(self):
# try:
# result = self.execute(
# command=EE.OMEGA, arguments=[EE.OMEGA_HELP], check=True
# )
# if EE.OMEGA_HELP_IDENTIFICATION_STRING in result.stderr:
# return True
# return False
# except Exception as e:
# return False
pass
```
#### File: utils/execute_external/fep_plus.py
```python
from icolos.utils.execute_external.execute import ExecutorBase
from icolos.utils.enums.program_parameters import (
FepPlusEnum,
SchrodingerExecutablesEnum,
)
FE = FepPlusEnum()
SEE = SchrodingerExecutablesEnum()
class FepPlusExecutor(ExecutorBase):
def __init__(self, prefix_execution=None, binary_location=None):
super().__init__(
prefix_execution=prefix_execution, binary_location=binary_location
)
def execute(
self, command: str, arguments: list, check=True, location=None, pipe_input=None
):
if command not in [
FE.FEP_MAPPER,
FE.FEP_EXECUTOR,
FE.JSC_LIST,
FE.JSC_TAIL_FILE,
]:
raise ValueError(
"Execution command must be recognised by the executable's enum"
)
return super().execute(
command=command,
arguments=arguments,
check=check,
location=location,
pipe_input=pipe_input,
)
def is_available(self):
try:
result = self.execute(
command=FE.FEP_MAPPER, arguments=[FE.FEP_HELP], check=True
)
if FE.FEP_MAPPER_HELP_SUCCESS_STRING in result.stdout:
return True
return False
except Exception as e:
print(str(e))
return False
```
#### File: utils/execute_external/gromacs.py
```python
from icolos.utils.enums.program_parameters import GromacsEnum
from icolos.utils.execute_external.execute import ExecutorBase
_GE = GromacsEnum()
class GromacsExecutor(ExecutorBase):
def __init__(self, prefix_execution=None, binary_location=None):
super().__init__(
prefix_execution=prefix_execution, binary_location=binary_location
)
def execute(
self, command: str, arguments: list, check=True, location=None, pipe_input=None
):
gmx_commands = [
_GE.EDITCONF,
_GE.GENION,
_GE.GROMPP,
_GE.SOLVATE,
_GE.MDRUN,
_GE.PDB2GMX,
_GE.MAKE_NDX,
_GE.GENRESTR,
_GE.TRJCONV,
_GE.TRJCAT,
_GE.CLUSTER,
_GE.MMPBSA,
_GE.DO_DSSP,
_GE.RMS,
]
if not any([cmd in command for cmd in gmx_commands]):
raise ValueError(
"Command must be present in internal list of GROMACS executables"
)
# handle for dealing with programs that want interactive input
return super().execute(
command=command,
arguments=arguments,
check=check,
location=location,
pipe_input=pipe_input,
)
def is_available(self):
try:
result = self.execute(command=_GE.PDB2GMX, arguments=[], check=False)
if _GE.PDB2GMX_FAIL_ID_STRING in result.stderr:
return True
return False
except Exception as e:
return False
```
#### File: utils/execute_external/license_token_guard.py
```python
import time
from typing import Dict
from pydantic import BaseModel, PrivateAttr
from icolos.utils.execute_external.execute import Executor
from icolos.loggers.steplogger import StepLogger
from icolos.utils.enums.logging_enums import LoggingConfigEnum
from icolos.utils.enums.program_parameters import SchrodingerExecutablesEnum
_EE = SchrodingerExecutablesEnum()
_LE = LoggingConfigEnum()
class TokenGuardParameters(BaseModel):
prefix_execution: str = None
binary_location: str = None
token_pools: Dict
wait_interval_seconds: int = 30
wait_limit_seconds: int = 0
class SchrodingerLicenseTokenGuard(BaseModel):
"""Class that checks, whether enough tokens to execute Schrodinger binaries are available."""
token_guard: TokenGuardParameters
class Config:
underscore_attrs_are_private = True
_logger = PrivateAttr()
_executor = PrivateAttr()
def __init__(self, **data):
super().__init__(**data)
self._logger = StepLogger()
# initialize the executor for all "Schrodinger" related calls and also check if it is available
self._executor = Executor(
prefix_execution=self.token_guard.prefix_execution,
binary_location=self.token_guard.binary_location,
)
def _get_token_pool_info(self, licadmin_output: list, token_pool: str) -> dict:
result = {"found": False}
for line in licadmin_output:
if token_pool in line:
parts = line.split(" ")
if len(parts) == 16:
result["total"] = int(parts[6])
result["available"] = int(parts[6]) - int(parts[12])
result["found"] = True
break
return result
def _check_licstat_output(self, licadmin_output: list) -> bool:
all_pools_available = True
for pool_key, pool_token_numbers in self.token_guard.token_pools.items():
pool_status = self._get_token_pool_info(licadmin_output, pool_key)
if pool_status["found"]:
if pool_status["available"] >= pool_token_numbers:
self._logger.log(
f"Enough tokens available ({pool_status['available']}) to satisfy requirement ({pool_token_numbers} free tokens) for pool {pool_key}.",
_LE.DEBUG,
)
else:
self._logger.log(
f"Not enough tokens available ({pool_status['available']}) to satisfy requirement ({pool_token_numbers} free tokens) for pool {pool_key}.",
_LE.DEBUG,
)
all_pools_available = False
else:
all_pools_available = False
self._logger.log(
f"Could not find information on token pool {pool_key}.", _LE.WARNING
)
return all_pools_available
def _get_licstat_output(self):
result = self._executor.execute(
command=_EE.LICADMIN, arguments=[_EE.LICADMIN_STAT], check=True
)
if result.returncode != 0:
self._logger.log(
f"Could not execute the Schrodinger license token guard - do you need to export the licadmin path?",
_LE.WARNING,
)
return result.stdout.split("\n")
def guard(self) -> bool:
# loop over the token pools until they are all satisfied or the time limit has run out
counter = 0
success = False
while True:
if (
self.token_guard.wait_limit_seconds != 0
and (counter * self.token_guard.wait_interval_seconds)
>= self.token_guard.wait_limit_seconds
):
self._logger.log(
f"Wait period ({self.token_guard.wait_limit_seconds} seconds) set for Schrodinger token guard has been exceeded.",
_LE.ERROR,
)
break
# reload the output from "licadmin"
# at this stage, the output from licadmin is a list of strings
licadmin_output = self._get_licstat_output()
all_pools_available = self._check_licstat_output(
licadmin_output=licadmin_output
)
if all_pools_available:
self._logger.log(
"All token pool requirements for Schrodinger have been met - proceeding.",
_LE.DEBUG,
)
success = True
break
else:
time.sleep(self.token_guard.wait_interval_seconds)
counter = counter + 1
return success
```
#### File: utils/general/files_paths.py
```python
import os
import shutil
import time
import json
import tempfile
from typing import Tuple
from icolos.utils.enums.step_enums import StepBaseEnum
from icolos.utils.enums.general_utils_enums import CheckFileGenerationEnum
_SE = StepBaseEnum
_FG = CheckFileGenerationEnum()
def check_file_availability(
path: str, interval_sec: int = 1, maximum_sec: int = 10
) -> str:
counter = 0
while not os.path.exists(path):
# wait for an interval
time.sleep(interval_sec)
counter = counter + 1
# if there's time left, proceed
if maximum_sec is not None and (counter * interval_sec) > maximum_sec:
break
if os.path.exists(path):
if os.path.getsize(path) == 0:
return _FG.GENERATED_EMPTY
else:
return _FG.GENERATED_SUCCESS
else:
return _FG.NOT_GENERATED
def remove_folder(folder_path: str):
if os.path.isdir(folder_path):
shutil.rmtree(folder_path)
def empty_output_dir(path: str):
for root, subf, files in os.walk(path):
for file in files:
os.remove(os.path.join(root, file))
def move_up_directory(path, n=1):
"""Function, to move up 'n' directories for a given "path"."""
# add +1 to take file into account
if os.path.isfile(path):
n += 1
for _ in range(n):
path = os.path.dirname(os.path.abspath(path))
return path
def attach_root_path(path):
"""Function to attach the root path of the module for a given "path"."""
ROOT_DIR = move_up_directory(os.path.abspath(__file__), n=4)
return os.path.join(ROOT_DIR, path)
def lines_in_file(path):
with open(path) as f:
for i, l in enumerate(f):
pass
return i + 1
def dict_from_json_file(path):
with open(path, "r") as f:
return json.load(f)
def any_in_file(path, strings):
if isinstance(strings, str):
strings = [strings]
if os.path.isfile(path):
with open(path, "r") as f:
file_raw = f.readlines()
for string in strings:
if any(string in line for line in file_raw):
return True
return False
else:
return False
def infer_input_type(path: str) -> str:
basename = os.path.basename(path)
ending = basename[-3:].upper()
if ending in [_SE.FORMAT_SDF, _SE.FORMAT_CSV, _SE.FORMAT_SMI]:
return ending
else:
raise ValueError(f"Ending {ending} not supported.")
def gen_tmp_file(
suffix: str = None, prefix: str = None, dir: str = None, text: bool = True
) -> Tuple[str, str]:
"""Function wraps tempfile.mkstemp(), but closes the connection and returns the file name instead of the handler."""
# note that in contrast to the underlying "mkstemp" function, "text" is set to True here
fhandle, path = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir, text=text)
os.close(fhandle)
return os.path.basename(path), path
```
#### File: utils/general/icolos_exceptions.py
```python
class ExecutionFailed(Exception):
pass
class StepFailed(Exception):
pass
class ContainerCorrupted(Exception):
pass
def get_exception_message(e: Exception):
if e is None:
return None
if hasattr(e, "message"):
return e.message
else:
return e
def get_exception_type(e: Exception) -> str:
if e is None:
return None
return type(e).__name__
```
#### File: utils/general/progress_bar.py
```python
def get_progress_bar_string(
done, total, prefix="", suffix="", decimals=1, length=100, fill="█"
):
percent = ("{0:." + str(decimals) + "f}").format(100 * (done / float(total)))
filledLength = int(length * done // total)
bar = fill * filledLength + "-" * (length - filledLength)
return f"{prefix}|{bar}| {percent}% {suffix}"
```
#### File: utils/general/strings.py
```python
def stringify(obj):
"""Converts all objects in a dict to str, recursively."""
if isinstance(obj, dict):
return {str(key): stringify(value) for key, value in obj.items()}
elif isinstance(obj, list):
return [stringify(value) for value in obj]
else:
return str(obj)
```
#### File: tests/autodockvina/test_adv_docking.py
```python
import os
import unittest
from icolos.core.workflow_steps.autodockvina.docking import StepAutoDockVina
from icolos.utils.enums.step_enums import StepBaseEnum, StepAutoDockVinaEnum
from icolos.utils.enums.program_parameters import AutoDockVinaEnum
from tests.tests_paths import PATHS_EXAMPLEDATA, get_1UYD_ligands_as_Compounds, PATHS_1UYD
from icolos.utils.general.files_paths import attach_root_path
_SBE = StepBaseEnum
_SAE = StepAutoDockVinaEnum()
_EE = AutoDockVinaEnum()
class Test_ADV_docking(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._test_dir = attach_root_path("tests/junk/ADV")
if not os.path.isdir(cls._test_dir):
os.makedirs(cls._test_dir)
def setUp(self):
self._1UYD_compounds = get_1UYD_ligands_as_Compounds(
abs_path=PATHS_EXAMPLEDATA.PARACETAMOL_PATH
)
self.receptor_path = PATHS_1UYD.PDBQT_PATH
def test_ADV_run(self):
step_conf = {
_SBE.STEPID: "01_ADV",
_SBE.STEP_TYPE: _SBE.STEP_AUTODOCKVINA_DOCKING,
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load AutoDock_Vina",
_SBE.EXEC_PARALLELIZATION: {_SBE.EXEC_PARALLELIZATION_CORES: 4},
_SBE.EXEC_FAILUREPOLICY: {_SBE.EXEC_FAILUREPOLICY_NTRIES: 1},
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {},
},
_SBE.SETTINGS_ADDITIONAL: {
_SAE.CONFIGURATION: {
_SAE.ADV_SEARCH_SPACE: {
_SAE.ADV_SEARCH_SPACE_CENTER_X: 3.3,
_SAE.ADV_SEARCH_SPACE_CENTER_Y: 11.5,
_SAE.ADV_SEARCH_SPACE_CENTER_Z: 24.8,
_SAE.ADV_SEARCH_SPACE_SIZE_Y: 10,
_SAE.ADV_SEARCH_SPACE_SIZE_Z: 10,
},
_SAE.NUMBER_POSES: 2,
_SAE.ADV_RECEPTOR_PATH: self.receptor_path,
}
},
},
}
adv_step = StepAutoDockVina(**step_conf)
adv_step.data.compounds = self._1UYD_compounds
adv_step.execute()
self.assertEqual(len(adv_step.get_compounds()), 1)
self.assertEqual(len(adv_step.get_compounds()[0][0].get_conformers()), 2)
self.assertListEqual(
list(
adv_step.get_compounds()[0][0][0]
.get_molecule()
.GetConformer(0)
.GetPositions()[0]
),
[5.305, 11.464, 24.663]
)
self.assertEqual(
adv_step.get_compounds()[0][0][0]
.get_molecule()
.GetProp(_SBE.ANNOTATION_TAG_DOCKING_SCORE),
"-6.0",
)
# check SDF write-out
out_path = os.path.join(self._test_dir, "adv_docked.sdf")
adv_step.write_conformers(out_path)
stat_inf = os.stat(out_path)
self.assertGreater(stat_inf.st_size, 3500)
```
#### File: tests/clustering/test_clustering.py
```python
import unittest
from icolos.core.containers.compound import Compound, Enumeration
from icolos.core.workflow_steps.calculation.clustering import StepClustering
from icolos.utils.enums.step_enums import StepBaseEnum, StepClusteringEnum
from tests.tests_paths import PATHS_EXAMPLEDATA, get_mol_as_Conformer
_SBE = StepBaseEnum
_SC = StepClusteringEnum()
class Test_Clustering(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def setUp(self):
pass
@classmethod
def tearDownClass(cls):
pass
def test_Clustering(self):
step_conf = {
_SBE.STEPID: "01_clustering",
_SBE.STEP_TYPE: _SBE.STEP_CLUSTERING,
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
_SC.N_CLUSTERS: 2,
_SC.MAX_ITER: 300,
},
},
_SBE.SETTINGS_ADDITIONAL: {
_SC.TOP_N_PER_SOLVENT: 3,
_SC.FEATURES: ["area", "dipole", "HB_acc"],
_SC.FREE_ENERGY_SOLVENT_TAGS: [
"G_h2o",
"G_meoh",
"G_octanol",
"G_dmso",
"G_cychex",
"G_acn",
"G_thf",
],
},
},
}
cl_step = StepClustering(**step_conf)
cl_step.get_compounds().append(Compound(compound_number=1))
cl_step.get_compounds()[0].add_enumeration(Enumeration(), auto_update=True)
conformers = get_mol_as_Conformer(PATHS_EXAMPLEDATA.CLUSTERING_11CONFS)
cl_step.data.compounds[0][0].add_conformers(conformers, auto_update=True)
# 11 conformers are put in, but due to clustering only 10 should come out; note, that if only one solvent was
# selected, only 9 would be outputted (this is because 2 of the clusters have 4 members and TOP_N_PER_SOLVENT
# is set to 3)
self.assertEqual(len(cl_step.get_compounds()[0][0].get_conformers()), 11)
cl_step.execute()
self.assertEqual(len(cl_step.get_compounds()[0][0].get_conformers()), 10)
# make sure it is the 10th element (index 9) that has been removed
self.assertListEqual(
[
list(
cl_step.get_compounds()[0][0]
.get_conformers()[i]
.get_molecule()
.GetConformer(0)
.GetPositions()[0]
)
for i in range(10)
],
[
[0.8838, 0.6808, -0.1373],
[-4.2269, -0.441, 0.2359],
[-4.1693, -0.532, -0.0567],
[-4.2326, -0.4701, 0.3534],
[-4.201, -0.5446, 0.131],
[-4.2198, -0.4705, 0.1656],
[-4.2318, -0.444, 0.2474],
[-4.2316, -0.14, 0.0848],
[-4.1953, -0.1989, -0.1017],
[-4.1528, -0.0208, 0.0932],
],
)
```
#### File: tests/gromacs/test_genion.py
```python
from icolos.core.containers.generic import GenericData
from icolos.core.workflow_steps.gromacs.genion import StepGMXGenion
import unittest
import os
from icolos.utils.enums.step_enums import StepBaseEnum, StepGromacsEnum
from tests.tests_paths import PATHS_EXAMPLEDATA, export_unit_test_env_vars
from icolos.utils.general.files_paths import attach_root_path
_SGE = StepGromacsEnum()
_SBE = StepBaseEnum
class Test_Genion(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._test_dir = attach_root_path("tests/junk/gromacs")
if not os.path.isdir(cls._test_dir):
os.makedirs(cls._test_dir)
export_unit_test_env_vars()
def setUp(self):
with open(attach_root_path(PATHS_EXAMPLEDATA.GROMACS_1BVG_TOP), "r") as f:
self.topol = f.read()
with open(attach_root_path(PATHS_EXAMPLEDATA.GROMACS_1BVG_TPR), "rb") as f:
self.tpr = f.read()
def test_genion_run(self):
step_conf = {
_SBE.STEPID: "test_genion",
_SBE.STEP_TYPE: "genion",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2021-fosscuda-2019a-PLUMED-2.7.1-Python-3.7.2"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: ["-neutral"],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
"-pname": "NA",
"-nname": "CL",
},
},
_SBE.SETTINGS_ADDITIONAL: {_SBE.PIPE_INPUT: "3"},
},
}
step_genion = StepGMXGenion(**step_conf)
step_genion.data.generic.add_file(
GenericData(file_name="topol.top", file_data=self.topol, argument=True)
)
step_genion.data.generic.add_file(
GenericData(file_name="structure.tpr", file_data=self.tpr, argument=True)
)
step_genion.execute()
out_path = os.path.join(self._test_dir, "structure.gro")
step_genion.write_generic_by_name(self._test_dir, "structure.gro")
stat_inf = os.stat(out_path)
self.assertGreater(stat_inf.st_size, 2102900)
```
#### File: tests/integration_tests/test_fep_plus.py
```python
import unittest
import os
from tests.tests_paths import PATHS_EXAMPLEDATA
from icolos.utils.general.files_paths import attach_root_path
from icolos.core.composite_agents.workflow import WorkFlow
from icolos.utils.enums.composite_agents_enums import WorkflowEnum
from icolos.utils.enums.step_enums import StepBaseEnum, StepGlideEnum, TokenGuardEnum
_WE = WorkflowEnum()
_SBE = StepBaseEnum
_SGE = StepGlideEnum()
_TE = TokenGuardEnum()
class TestFEPPlusWorkflow(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._test_dir = attach_root_path("tests/junk/integration")
if not os.path.isdir(cls._test_dir):
os.makedirs(cls._test_dir)
@classmethod
def tearDownClass(cls):
pass
def test_fep_plus_workflow(self):
conf = {
_WE.HEADER: {
_WE.ID: "Docking/FEP+ combined workflow",
_WE.DESCRIPTION: "test setup for FEP+ integration",
_WE.ENVIRONMENT: {_WE.ENVIRONMENT_EXPORT: []},
_WE.GLOBAL_VARIABLES: {
"smiles": "3,4-DIAMINOBENZOTRIFLUORIDE:Nc1ccc(cc1N)C(F)(F)F"
},
},
_WE.STEPS: [
{
_SBE.STEPID: "initialization_smile",
_SBE.STEP_TYPE: "initialization",
_SBE.INPUT: {
_SBE.INPUT_COMPOUNDS: [
{
_SBE.INPUT_SOURCE: "{smiles}",
_SBE.INPUT_SOURCE_TYPE: "string",
}
]
},
},
{
_SBE.STEPID: "Ligprep",
_SBE.STEP_TYPE: "ligprep",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load schrodinger/2021-1-js-aws",
"parallelization": {"cores": 2, "max_length_sublists": 1},
"failure_policy": {"n_tries": 3},
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: ["-epik"],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
"-ph": 7.0,
"-pht": 2.0,
"-s": 10,
"-bff": 14,
"-HOST": "localhost",
},
},
_SBE.SETTINGS_ADDITIONAL: {
"filter_file": {"Total_charge": "!= 0"}
},
},
_SBE.INPUT: {
_SBE.INPUT_COMPOUNDS: [
{
_SBE.INPUT_SOURCE: "initialization_smile",
_SBE.INPUT_SOURCE_TYPE: "step",
}
]
},
_SBE.WRITEOUT: [
{
_SBE.INPUT_COMPOUNDS: {
_SBE.WRITEOUT_COMP_CATEGORY: _SBE.WRITEOUT_COMP_CATEGORY_ENUMERATIONS
},
_SBE.WRITEOUT_DESTINATION: {
_SBE.WRITEOUT_DESTINATION_RESOURCE: "{entrypoint_dir}/ligprep_enums.sdf",
_SBE.STEP_TYPE: "file",
_SBE.WRITEOUT_DESTINATION_FORMAT: "SDF",
},
}
],
},
{
_SBE.STEPID: "Glide",
_SBE.STEP_TYPE: "glide",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load schrodinger/2021-1-js-aws",
_SBE.EXEC_PARALLELIZATION: {
_SBE.EXEC_PARALLELIZATION_CORES: 4,
_SBE.EXEC_PARALLELIZATION_MAXLENSUBLIST: 1,
},
_SBE.EXEC_FAILUREPOLICY: {_SBE.EXEC_FAILUREPOLICY_NTRIES: 3},
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {"-HOST": "localhost"},
},
_SBE.SETTINGS_ADDITIONAL: {
"configuration": {
"AMIDE_MODE": "trans",
"EXPANDED_SAMPLING": "True",
"GRIDFILE": [PATHS_EXAMPLEDATA.PRIME_COX2_GRID],
"NENHANCED_SAMPLING": "1",
"POSE_OUTTYPE": "poseviewer",
"POSES_PER_LIG": "1",
"POSTDOCK_NPOSE": "25",
"POSTDOCKSTRAIN": "True",
"PRECISION": "SP",
"REWARD_INTRA_HBONDS": "True",
}
},
},
_SBE.INPUT: {
_SBE.INPUT_COMPOUNDS: [
{
_SBE.INPUT_SOURCE: "Ligprep",
_SBE.INPUT_SOURCE_TYPE: "step",
}
]
},
_SBE.WRITEOUT: [
{
_SBE.INPUT_COMPOUNDS: {"category": "conformers"},
"destination": {
"resource": "{entrypoint_dir}/tests/junk/docked_conformers_cox2_actives.sdf",
_SBE.STEP_TYPE: "file",
"format": "SDF",
},
}
],
},
{
_SBE.STEPID: "FEP_plus_setup",
_SBE.STEP_TYPE: "fep_plus_setup",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load schrodinger/2021-1-js-aws"
},
_SBE.SETTINGS: {},
_SBE.INPUT: {
_SBE.INPUT_COMPOUNDS: [
{
_SBE.INPUT_SOURCE: "Glide",
_SBE.INPUT_SOURCE_TYPE: "step",
"target_field": _SBE.INPUT_COMPOUNDS,
}
]
},
},
{
_SBE.STEPID: "FEP_plus_exec",
_SBE.STEP_TYPE: "fep_plus_exec",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load schrodinger/2021-1-js-aws"
},
_SBE.TOKEN_GUARD: {
_SBE.EXEC_PREFIXEXECUTION: "module load schrodinger/2021-1-js-aws",
_SBE.EXEC_BINARYLOCATION: "ssh 10.220.1.4 /opt/schrodinger/suite/installations/default",
_TE.TG_TOKEN_POOLS: {"FEP_GPGPU": 16},
"wait_interval_seconds": 30,
"wait_limit_seconds": 0,
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
"-JOBNAME": "test",
"-HOST": "fep-compute",
},
}
},
_SBE.INPUT: {
_SBE.INPUT_COMPOUNDS: [
{
_SBE.INPUT_SOURCE: "Glide",
_SBE.INPUT_SOURCE_TYPE: "step",
"target_field": _SBE.INPUT_COMPOUNDS,
}
],
"generic": [
{_SBE.INPUT_SOURCE: "FEP_plus_setup", "extension": "fmp"}
],
},
_SBE.WRITEOUT: [
{
_SBE.INPUT_COMPOUNDS: {
_SBE.WRITEOUT_COMP_CATEGORY: _SBE.WRITEOUT_COMP_CATEGORY_CONFORMERS,
_SBE.WRITEOUT_COMP_SELECTED_TAGS: [
"dG",
"docking_score",
],
},
_SBE.WRITEOUT_DESTINATION: {
_SBE.WRITEOUT_DESTINATION: os.path.join(
self._test_dir, "fep_scored_conformers.csv"
),
_SBE.STEP_TYPE: "file",
_SBE.WRITEOUT_DESTINATION_FORMAT: "CSV",
},
}
],
},
],
}
wflow = WorkFlow(**conf)
wflow.initialize()
wflow.execute()
out_path = os.path.join(self._test_dir, "fep_scored_conformers.csv")
stat_inf = os.stat(out_path)
self.assertGreaterEqual(stat_inf.st_size, 4252)
```
#### File: tests/integration_tests/test_gromacs.py
```python
import unittest
import os
from tests.tests_paths import MAIN_CONFIG, PATHS_EXAMPLEDATA, export_unit_test_env_vars
from icolos.utils.general.files_paths import attach_root_path
from icolos.core.composite_agents.workflow import WorkFlow
from icolos.utils.enums.composite_agents_enums import WorkflowEnum
from icolos.utils.enums.step_enums import StepBaseEnum, StepGromacsEnum
_WE = WorkflowEnum()
_SBE = StepBaseEnum
_SGE = StepGromacsEnum()
class Test_MD_Fpocket(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._test_dir = attach_root_path("tests/junk/integration")
if not os.path.isdir(cls._test_dir):
os.makedirs(cls._test_dir)
def test_workflow_MD_fpocket_holo(self):
"""
run a full gromacs MD workflow from a pdb structure, then fpocket on the resulting trajectory
MDPocket is run on the holo structure
"""
conf = {
_WE.HEADER: {
_WE.ID: "gromacs_test",
_WE.DESCRIPTION: "full md run with gromacs, with MDpocket run to extract descriptors for binding pocket",
_WE.ENVIRONMENT: {
_WE.ENVIRONMENT_EXPORT: [
{
_WE.ENVIRONMENT_EXPORT_KEY: "<KEY>",
_WE.ENVIRONMENT_EXPORT_VALUE: "True",
},
{
_WE.ENVIRONMENT_EXPORT_KEY: "<KEY>",
_WE.ENVIRONMENT_EXPORT_VALUE: "True",
},
{
_WE.ENVIRONMENT_EXPORT_KEY: "GMX_FORCE_UPDATE_DEFAULT_GPU",
_WE.ENVIRONMENT_EXPORT_VALUE: "True",
},
{
_WE.ENVIRONMENT_EXPORT_KEY: "GMXLIB",
_WE.ENVIRONMENT_EXPORT_VALUE: "<path>/forcefields/",
},
]
},
_WE.GLOBAL_VARIABLES: {
"root_dir": "<path>/icolos",
"file_base": os.path.join(
MAIN_CONFIG["ICOLOS_TEST_DATA"], "gromacs/protein"
),
"output_dir": attach_root_path("tests/junk/integration"),
},
},
_WE.STEPS: [
{
_SBE.STEPID: "01_pdb2gmx",
_SBE.STEP_TYPE: "pdb2gmx",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: ["-ignh"],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
"-water": "tip4p",
"-ff": "amber14sb_OL15",
},
},
_SBE.SETTINGS_ADDITIONAL: {},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: attach_root_path(
PATHS_EXAMPLEDATA.MDPOCKET_PDB_FILE_DRY
),
_SBE.INPUT_EXTENSION: "pdb",
}
]
},
},
{
_SBE.STEPID: "02_editconf",
_SBE.STEP_TYPE: "editconf",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
"-d": "1.5",
"-bt": "dodecahedron",
},
},
_SBE.SETTINGS_ADDITIONAL: {},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "01_pdb2gmx",
_SBE.INPUT_EXTENSION: "gro",
}
]
},
},
{
_SBE.STEPID: "03_solvate",
_SBE.STEP_TYPE: "solvate",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {"-cs": "tip4p"},
},
_SBE.SETTINGS_ADDITIONAL: {},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "02_editconf",
_SBE.INPUT_EXTENSION: "gro",
},
{
_SBE.INPUT_SOURCE: "01_pdb2gmx",
_SBE.INPUT_EXTENSION: "top",
},
]
},
},
{
_SBE.STEPID: "04_grompp",
_SBE.STEP_TYPE: "grompp",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {},
},
_SBE.SETTINGS_ADDITIONAL: {
"-r": False,
},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "03_solvate",
_SBE.INPUT_EXTENSION: "gro",
},
{
_SBE.INPUT_SOURCE: "{file_base}/ions.mdp",
_SBE.INPUT_EXTENSION: "mdp",
},
{
_SBE.INPUT_SOURCE: "03_solvate",
_SBE.INPUT_EXTENSION: "top",
},
{
_SBE.INPUT_SOURCE: "01_pdb2gmx",
_SBE.INPUT_EXTENSION: "itp",
},
]
},
},
{
_SBE.STEPID: "05_genion",
_SBE.STEP_TYPE: "genion",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: ["-neutral"],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
"-pname": "NA",
"-nname": "CL",
},
},
_SBE.SETTINGS_ADDITIONAL: {
"pipe_input": "SOL",
},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "04_grompp",
_SBE.INPUT_EXTENSION: "tpr",
},
{
_SBE.INPUT_SOURCE: "04_grompp",
_SBE.INPUT_EXTENSION: "top",
},
]
},
},
{
_SBE.STEPID: "06_grompp_eminim",
_SBE.STEP_TYPE: "grompp",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {},
},
_SBE.SETTINGS_ADDITIONAL: {
"-r": False,
},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "05_genion",
_SBE.INPUT_EXTENSION: "gro",
},
{
_SBE.INPUT_SOURCE: "{file_base}/minim.mdp",
_SBE.INPUT_EXTENSION: "mdp",
},
{
_SBE.INPUT_SOURCE: "05_genion",
_SBE.INPUT_EXTENSION: "top",
},
{
_SBE.INPUT_SOURCE: "01_pdb2gmx",
_SBE.INPUT_EXTENSION: "itp",
},
]
},
},
{
_SBE.STEPID: "07_eminim_mdrun",
_SBE.STEP_TYPE: "mdrun",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {},
},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "06_grompp_eminim",
_SBE.INPUT_EXTENSION: "tpr",
}
]
},
},
{
_SBE.STEPID: "08_nvt_grompp",
_SBE.STEP_TYPE: "grompp",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {"-n": "index.ndx"},
},
_SBE.SETTINGS_ADDITIONAL: {
"-r": True,
"make_ndx_command": "auto",
},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "07_eminim_mdrun",
_SBE.INPUT_EXTENSION: "gro",
},
{
_SBE.INPUT_SOURCE: "05_genion",
_SBE.INPUT_EXTENSION: "top",
},
{
_SBE.INPUT_SOURCE: "{file_base}/nvt_equil.mdp",
_SBE.INPUT_EXTENSION: "mdp",
},
{
_SBE.INPUT_SOURCE: "01_pdb2gmx",
_SBE.INPUT_EXTENSION: "itp",
},
]
},
},
{
_SBE.STEPID: "09_nvt_mdrun",
_SBE.STEP_TYPE: "mdrun",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {},
},
_SBE.SETTINGS_ADDITIONAL: {},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "08_nvt_grompp",
_SBE.INPUT_EXTENSION: "tpr",
}
]
},
},
{
_SBE.STEPID: "10_npt_grompp",
_SBE.STEP_TYPE: "grompp",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {"-n": "index.ndx"},
},
_SBE.SETTINGS_ADDITIONAL: {
"-r": True,
"make_ndx_command": "auto",
},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "09_nvt_mdrun",
_SBE.INPUT_EXTENSION: "gro",
},
{
_SBE.INPUT_SOURCE: "05_genion",
_SBE.INPUT_EXTENSION: "top",
},
{
_SBE.INPUT_SOURCE: "{file_base}/npt_equil.mdp",
_SBE.INPUT_EXTENSION: "mdp",
},
{
_SBE.INPUT_SOURCE: "01_pdb2gmx",
_SBE.INPUT_EXTENSION: "itp",
},
]
},
},
{
_SBE.STEPID: "11_npt_mdrun",
_SBE.STEP_TYPE: "mdrun",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {},
},
_SBE.SETTINGS_ADDITIONAL: {},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "10_npt_grompp",
_SBE.INPUT_EXTENSION: "tpr",
}
]
},
},
{
_SBE.STEPID: "12_prod_md_grompp",
_SBE.STEP_TYPE: "grompp",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {"-n": "index.ndx"},
},
_SBE.SETTINGS_ADDITIONAL: {
"-r": False,
"make_ndx_command": "auto",
"fields": {"nsteps": "5000"},
},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "11_npt_mdrun",
_SBE.INPUT_EXTENSION: "gro",
},
{
_SBE.INPUT_SOURCE: "05_genion",
_SBE.INPUT_EXTENSION: "top",
},
{
_SBE.INPUT_SOURCE: "{file_base}/md.mdp",
_SBE.INPUT_EXTENSION: "mdp",
},
{
_SBE.INPUT_SOURCE: "01_pdb2gmx",
_SBE.INPUT_EXTENSION: "itp",
},
]
},
},
{
_SBE.STEPID: "13_prod_mdrun",
_SBE.STEP_TYPE: "mdrun",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
"-nb": "gpu",
"-bonded": "gpu",
"-pme": "gpu",
"-c": "structure.pdb",
},
}
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "12_prod_md_grompp",
_SBE.INPUT_EXTENSION: "tpr",
}
]
},
"writeout": [
{
_SBE.INPUT_GENERIC: {_SBE.WRITEOUT_GENERIC_KEY: "xtc"},
_SBE.WRITEOUT_DESTINATION: {
_SBE.WRITEOUT_DESTINATION_RESOURCE: "{output_dir}/md_0_1.xtc",
_SBE.STEP_TYPE: "file",
_SBE.WRITEOUT_DESTINATION_FORMAT: "TXT",
},
},
{
_SBE.INPUT_GENERIC: {_SBE.WRITEOUT_GENERIC_KEY: "log"},
_SBE.WRITEOUT_DESTINATION: {
_SBE.WRITEOUT_DESTINATION_RESOURCE: "{output_dir}/md_0_1.log",
_SBE.STEP_TYPE: "file",
_SBE.WRITEOUT_DESTINATION_FORMAT: "TXT",
},
},
{
_SBE.INPUT_GENERIC: {_SBE.WRITEOUT_GENERIC_KEY: "gro"},
_SBE.WRITEOUT_DESTINATION: {
_SBE.WRITEOUT_DESTINATION_RESOURCE: "{output_dir}/md_0_1.gro",
_SBE.STEP_TYPE: "file",
_SBE.WRITEOUT_DESTINATION_FORMAT: "TXT",
},
},
{
_SBE.INPUT_GENERIC: {_SBE.WRITEOUT_GENERIC_KEY: "tpr"},
_SBE.WRITEOUT_DESTINATION: {
_SBE.WRITEOUT_DESTINATION_RESOURCE: "{output_dir}/md_0_1.tpr",
_SBE.STEP_TYPE: "file",
_SBE.WRITEOUT_DESTINATION_FORMAT: "TXT",
},
},
],
},
{
_SBE.STEPID: "14_trjconv",
_SBE.STEP_TYPE: "trjconv",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: ["-center"]
},
_SBE.SETTINGS_ADDITIONAL: {"pipe_input": "echo -ne 1 0"},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "13_prod_mdrun",
_SBE.INPUT_EXTENSION: "xtc",
},
{
_SBE.INPUT_SOURCE: "13_prod_mdrun",
_SBE.INPUT_EXTENSION: "tpr",
},
]
},
"writeout": [
{
_SBE.INPUT_GENERIC: {_SBE.WRITEOUT_GENERIC_KEY: "xtc"},
_SBE.WRITEOUT_DESTINATION: {
_SBE.WRITEOUT_DESTINATION_RESOURCE: "{output_dir}/md_0_1_trjconv.xtc",
_SBE.STEP_TYPE: "file",
_SBE.WRITEOUT_DESTINATION_FORMAT: "TXT",
},
}
],
},
{
_SBE.STEPID: "15_MDpocket",
_SBE.STEP_TYPE: "mdpocket",
_SBE.EXEC: {_SBE.EXEC_PREFIXEXECUTION: "module load fpocket"},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {}
},
_SBE.SETTINGS_ADDITIONAL: {"format": "gromacs"},
_SBE.SETTINGS_ADDITIONAL: {"format": "gromacs"},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "14_trjconv",
_SBE.INPUT_EXTENSION: "xtc",
},
{
_SBE.INPUT_SOURCE: "13_prod_mdrun",
_SBE.INPUT_EXTENSION: "pdb",
},
]
},
_SBE.WRITEOUT: [
{
_SBE.WRITEOUT_GENERIC: {_SBE.WRITEOUT_GENERIC_KEY: "pdb"},
_SBE.WRITEOUT_DESTINATION: {
_SBE.WRITEOUT_DESTINATION_RESOURCE: "{output_dir}",
_SBE.STEP_TYPE: "file",
_SBE.WRITEOUT_DESTINATION_FORMAT: "TXT",
_SBE.WRITEOUT_DESTINATION_MODE: "dir",
},
}
],
},
],
}
export_unit_test_env_vars()
wflow = WorkFlow(**conf)
wflow.initialize()
self.assertEqual(len(wflow.steps), 15)
wflow.execute()
out_path = os.path.join(self._test_dir, "md_0_1_0.xtc")
stat_inf = os.stat(out_path)
self.assertGreater(stat_inf.st_size, 62400)
def test_md_ligparam(self):
conf = {
_WE.HEADER: {
_WE.ID: "gromacs_test_ligparam",
_WE.DESCRIPTION: "full md run with gromacs, with ligand parametrisation",
_WE.ENVIRONMENT: {
_WE.ENVIRONMENT_EXPORT: [
{
_WE.ENVIRONMENT_EXPORT_KEY: "<KEY>",
_WE.ENVIRONMENT_EXPORT_VALUE: "True",
},
{
_WE.ENVIRONMENT_EXPORT_KEY: "<KEY>",
_WE.ENVIRONMENT_EXPORT_VALUE: "True",
},
{
_WE.ENVIRONMENT_EXPORT_KEY: "GMX_FORCE_UPDATE_DEFAULT_GPU",
_WE.ENVIRONMENT_EXPORT_VALUE: "True",
},
{
_WE.ENVIRONMENT_EXPORT_KEY: "ACPYPE",
_WE.ENVIRONMENT_EXPORT_VALUE: "/projects/cc/mai/binaries/acpype",
},
{
_WE.ENVIRONMENT_EXPORT_KEY: "GMXLIB",
_WE.ENVIRONMENT_EXPORT_VALUE: "<path>/gmx_workflow/forcefields/",
},
]
},
_WE.GLOBAL_VARIABLES: {
"forcefield": "<path>/gmx_workflow/forcefields/amber14sb_OL15.ff",
"output_dir": attach_root_path("tests/junk/integration"),
"file_base": PATHS_EXAMPLEDATA.GROMACS_PROTEIN_FILE_BASE,
},
},
_WE.STEPS: [
{
_SBE.STEPID: "01_pdb2gmx",
_SBE.STEP_TYPE: "pdb2gmx",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: ["-ignh"],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
"-water": "tip4p",
"-ff": "amber14sb_OL15",
},
},
_SBE.SETTINGS_ADDITIONAL: {"forcefield": "{forcefield}"},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: attach_root_path(
PATHS_EXAMPLEDATA.GROMACS_HOLO_STRUCTURE
),
_SBE.INPUT_EXTENSION: "pdb",
}
]
},
},
{
_SBE.STEPID: "02_editconf",
_SBE.STEP_TYPE: "editconf",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
"-d": "1.5",
"-bt": "dodecahedron",
},
},
_SBE.SETTINGS_ADDITIONAL: {"forcefield": "{forcefield}"},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "01_pdb2gmx",
_SBE.INPUT_EXTENSION: "gro",
}
]
},
},
{
_SBE.STEPID: "03_solvate",
_SBE.STEP_TYPE: "solvate",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {"-cs": "tip4p"},
},
_SBE.SETTINGS_ADDITIONAL: {},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "02_editconf",
_SBE.INPUT_EXTENSION: "gro",
},
{
_SBE.INPUT_SOURCE: "01_pdb2gmx",
_SBE.INPUT_EXTENSION: "top",
},
]
},
},
{
_SBE.STEPID: "04_grompp",
_SBE.STEP_TYPE: "grompp",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {},
},
_SBE.SETTINGS_ADDITIONAL: {
"-r": False,
},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "03_solvate",
_SBE.INPUT_EXTENSION: "gro",
},
{
_SBE.INPUT_SOURCE: "{file_base}/ions.mdp",
_SBE.INPUT_EXTENSION: "mdp",
},
{
_SBE.INPUT_SOURCE: "03_solvate",
_SBE.INPUT_EXTENSION: "top",
},
{
_SBE.INPUT_SOURCE: "01_pdb2gmx",
_SBE.INPUT_EXTENSION: "itp",
},
]
},
},
{
_SBE.STEPID: "05_genion",
_SBE.STEP_TYPE: "genion",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: ["-neutral"],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
"-pname": "NA",
"-nname": "CL",
},
},
_SBE.SETTINGS_ADDITIONAL: {
"pipe_input": "SOL",
},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "04_grompp",
_SBE.INPUT_EXTENSION: "tpr",
},
{
_SBE.INPUT_SOURCE: "04_grompp",
_SBE.INPUT_EXTENSION: "top",
},
]
},
},
{
_SBE.STEPID: "06_grompp_eminim",
_SBE.STEP_TYPE: "grompp",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {},
},
_SBE.SETTINGS_ADDITIONAL: {
"-r": False,
},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "05_genion",
_SBE.INPUT_EXTENSION: "gro",
},
{
_SBE.INPUT_SOURCE: "{file_base}/minim.mdp",
_SBE.INPUT_EXTENSION: "mdp",
},
{
_SBE.INPUT_SOURCE: "05_genion",
_SBE.INPUT_EXTENSION: "top",
},
{
_SBE.INPUT_SOURCE: "01_pdb2gmx",
_SBE.INPUT_EXTENSION: "itp",
},
]
},
},
{
_SBE.STEPID: "07_eminim_mdrun",
_SBE.STEP_TYPE: "mdrun",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {},
},
_SBE.SETTINGS_ADDITIONAL: {},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "06_grompp_eminim",
_SBE.INPUT_EXTENSION: "tpr",
}
]
},
},
{
_SBE.STEPID: "08_nvt_grompp",
_SBE.STEP_TYPE: "grompp",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {"-n": "index.ndx"},
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {"-n": "index.ndx"},
},
_SBE.SETTINGS_ADDITIONAL: {
"-r": True,
"make_ndx_command": "auto",
},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "07_eminim_mdrun",
_SBE.INPUT_EXTENSION: "gro",
},
{
_SBE.INPUT_SOURCE: "05_genion",
_SBE.INPUT_EXTENSION: "top",
},
{
_SBE.INPUT_SOURCE: "{file_base}/nvt_equil.mdp",
_SBE.INPUT_EXTENSION: "mdp",
},
{
_SBE.INPUT_SOURCE: "01_pdb2gmx",
_SBE.INPUT_EXTENSION: "itp",
},
]
},
},
{
_SBE.STEPID: "09_nvt_mdrun",
_SBE.STEP_TYPE: "mdrun",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {},
},
_SBE.SETTINGS_ADDITIONAL: {},
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {},
},
_SBE.SETTINGS_ADDITIONAL: {},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "08_nvt_grompp",
_SBE.INPUT_EXTENSION: "tpr",
}
]
},
},
{
_SBE.STEPID: "10_npt_grompp",
_SBE.STEP_TYPE: "grompp",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {"-n": "index.ndx"},
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {"-n": "index.ndx"},
},
_SBE.SETTINGS_ADDITIONAL: {
"-r": True,
"make_ndx_command": "auto",
},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "09_nvt_mdrun",
_SBE.INPUT_EXTENSION: "gro",
},
{
_SBE.INPUT_SOURCE: "05_genion",
_SBE.INPUT_EXTENSION: "top",
},
{
_SBE.INPUT_SOURCE: "{file_base}/npt_equil.mdp",
_SBE.INPUT_EXTENSION: "mdp",
},
{
_SBE.INPUT_SOURCE: "01_pdb2gmx",
_SBE.INPUT_EXTENSION: "itp",
},
]
},
},
{
_SBE.STEPID: "11_npt_mdrun",
_SBE.STEP_TYPE: "mdrun",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {},
},
_SBE.SETTINGS_ADDITIONAL: {},
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {},
_SBE.SETTINGS_ADDITIONAL: {},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "10_npt_grompp",
_SBE.INPUT_EXTENSION: "tpr",
}
]
},
},
{
_SBE.STEPID: "12_prod_md_grompp",
_SBE.STEP_TYPE: "grompp",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
"-n": "index.ndx",
},
},
_SBE.SETTINGS_ADDITIONAL: {
"-r": False,
"fields": {"nsteps": "5000"},
"make_ndx_command": "auto",
"fields": {"nsteps": "5000"},
"make_ndx_command": "auto",
},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "11_npt_mdrun",
_SBE.INPUT_EXTENSION: "gro",
},
{
_SBE.INPUT_SOURCE: "05_genion",
_SBE.INPUT_EXTENSION: "top",
},
{
_SBE.INPUT_SOURCE: "{file_base}/md.mdp",
_SBE.INPUT_EXTENSION: "mdp",
},
{
_SBE.INPUT_SOURCE: "01_pdb2gmx",
_SBE.INPUT_EXTENSION: "itp",
},
]
},
},
{
_SBE.STEPID: "13_prod_mdrun",
_SBE.STEP_TYPE: "mdrun",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
"-nb": "gpu",
"-bonded": "gpu",
"-pme": "gpu",
"-c": "structure.pdb",
},
}
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "12_prod_md_grompp",
_SBE.INPUT_EXTENSION: "tpr",
}
]
},
_SBE.WRITEOUT: [
{
_SBE.INPUT_GENERIC: {_SBE.WRITEOUT_GENERIC_KEY: "xtc"},
_SBE.WRITEOUT_DESTINATION: {
_SBE.WRITEOUT_DESTINATION_RESOURCE: "{output_dir}/md_0_1.xtc",
_SBE.STEP_TYPE: "file",
_SBE.WRITEOUT_DESTINATION_FORMAT: "TXT",
},
},
{
_SBE.INPUT_GENERIC: {_SBE.WRITEOUT_GENERIC_KEY: "log"},
_SBE.WRITEOUT_DESTINATION: {
_SBE.WRITEOUT_DESTINATION_RESOURCE: "{output_dir}/md_0_1.log",
_SBE.STEP_TYPE: "file",
_SBE.WRITEOUT_DESTINATION_FORMAT: "TXT",
},
},
{
_SBE.INPUT_GENERIC: {_SBE.WRITEOUT_GENERIC_KEY: "gro"},
_SBE.WRITEOUT_DESTINATION: {
_SBE.WRITEOUT_DESTINATION_RESOURCE: "{output_dir}/md_0_1.gro",
_SBE.STEP_TYPE: "file",
_SBE.WRITEOUT_DESTINATION_FORMAT: "TXT",
},
},
{
_SBE.INPUT_GENERIC: {_SBE.WRITEOUT_GENERIC_KEY: "tpr"},
_SBE.WRITEOUT_DESTINATION: {
_SBE.WRITEOUT_DESTINATION_RESOURCE: "{output_dir}/md_0_1.tpr",
_SBE.STEP_TYPE: "file",
_SBE.WRITEOUT_DESTINATION_FORMAT: "TXT",
},
},
],
},
{
_SBE.STEPID: "14_trjconv",
_SBE.STEP_TYPE: "trjconv",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2020.3-fosscuda-2019a"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: ["-center"]
},
_SBE.SETTINGS_ADDITIONAL: {"pipe_input": "echo -ne 1 0"},
},
_SBE.INPUT: {
_SBE.INPUT_GENERIC: [
{
_SBE.INPUT_SOURCE: "13_prod_mdrun",
_SBE.INPUT_EXTENSION: "xtc",
},
{
_SBE.INPUT_SOURCE: "13_prod_mdrun",
_SBE.INPUT_EXTENSION: "tpr",
},
]
},
_SBE.WRITEOUT: [
{
_SBE.INPUT_GENERIC: {_SBE.WRITEOUT_GENERIC_KEY: "xtc"},
_SBE.WRITEOUT_DESTINATION: {
_SBE.WRITEOUT_DESTINATION_RESOURCE: "{output_dir}/md_0_1_trjconv.xtc",
_SBE.STEP_TYPE: "file",
_SBE.WRITEOUT_DESTINATION_FORMAT: "TXT",
},
}
],
},
],
}
export_unit_test_env_vars()
wflow = WorkFlow(**conf)
wflow.initialize()
self.assertEqual(len(wflow.steps), 14)
wflow.execute()
out_path = os.path.join(self._test_dir, "md_0_1_0.xtc")
stat_inf = os.stat(out_path)
self.assertGreater(stat_inf.st_size, 324000)
```
#### File: tests/io/test_embedder.py
```python
import unittest
from icolos.core.workflow_steps.io.embedder import StepEmbedding
from icolos.utils.enums.step_enums import StepBaseEnum, StepEmbeddingEnum
from tests.tests_paths import PATHS_EXAMPLEDATA
_SBE = StepBaseEnum
_SEE = StepEmbeddingEnum()
class Test_Embedder(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def setUp(self):
self._SMI_path = PATHS_EXAMPLEDATA.SMALL_MOLECULES_SMI_PATH
@classmethod
def tearDownClass(cls):
pass
def test_embed_with_RDkit_no_protonation(self):
step_conf = {
_SBE.STEPID: "01_embed_molecule",
_SBE.STEP_TYPE: _SBE.STEP_EMBEDDING,
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
_SEE.RDKIT_PROTONATE: False,
_SEE.METHOD: _SEE.METHOD_RDKIT,
}
}
},
_SBE.INPUT: {
_SBE.INPUT_COMPOUNDS: [
{
_SBE.INPUT_SOURCE: self._SMI_path,
_SBE.INPUT_SOURCE_TYPE: _SBE.INPUT_SOURCE_TYPE_FILE,
_SBE.INPUT_FORMAT: _SBE.FORMAT_SMI,
}
]
},
}
init_step = StepEmbedding(**step_conf)
init_step.generate_input()
init_step.execute()
self.assertEqual(len(init_step.get_compounds()), 2)
self.assertEqual(len(init_step.get_compounds()[0]), 1)
self.assertEqual(len(init_step.get_compounds()[0][0]), 0)
self.assertListEqual(
list(
init_step.get_compounds()[0][0]
.get_molecule()
.GetConformer(0)
.GetPositions()[0]
),
[-3.676505807445281, -0.44491027005777944, 0.9478288681339868],
)
self.assertListEqual(
list(
init_step.get_compounds()[1][0]
.get_molecule()
.GetConformer(0)
.GetPositions()[0]
),
[-2.1635386441070907, 0.781887672700409, 2.383883775746168],
)
self.assertEqual(
11, init_step.get_compounds()[0][0].get_molecule().GetNumAtoms()
)
def test_embed_with_RDkit_protonation(self):
step_conf = {
_SBE.STEPID: "01_embed_molecule",
_SBE.STEP_TYPE: _SBE.STEP_EMBEDDING,
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
_SEE.RDKIT_PROTONATE: True,
_SEE.METHOD: _SEE.METHOD_RDKIT,
}
}
},
_SBE.INPUT: {
_SBE.INPUT_COMPOUNDS: [
{
_SBE.INPUT_SOURCE: self._SMI_path,
_SBE.INPUT_SOURCE_TYPE: _SBE.INPUT_SOURCE_TYPE_FILE,
_SBE.INPUT_FORMAT: _SBE.FORMAT_SMI,
}
]
},
}
init_step = StepEmbedding(**step_conf)
init_step.generate_input()
init_step.execute()
self.assertEqual(len(init_step.get_compounds()), 2)
self.assertEqual(len(init_step.get_compounds()[0]), 1)
self.assertEqual(len(init_step.get_compounds()[0][0]), 0)
self.assertListEqual(
list(
init_step.get_compounds()[0][0]
.get_molecule()
.GetConformer(0)
.GetPositions()[0]
),
[-3.676505807445281, -0.44491027005777944, 0.9478288681339868],
)
self.assertListEqual(
list(
init_step.get_compounds()[1][0]
.get_molecule()
.GetConformer(0)
.GetPositions()[0]
),
[-2.1635386441070907, 0.781887672700409, 2.383883775746168],
)
self.assertEqual(
20, init_step.get_compounds()[0][0].get_molecule().GetNumAtoms()
)
self.assertListEqual(
list(
init_step.get_compounds()[0][0]
.get_molecule()
.GetConformer(0)
.GetPositions()[18]
),
[3.294854134599972, -0.7589589232493622, -0.4334701745138959],
)
```
#### File: tests/pmx/test_box_water_ions.py
```python
import unittest
import os
from icolos.core.workflow_steps.pmx.box_water_ions import StepPMXBoxWaterIons
from icolos.core.composite_agents.workflow import WorkFlow
from icolos.utils.enums.step_enums import StepBaseEnum
from tests.tests_paths import (
PATHS_EXAMPLEDATA,
create_test_dir,
export_unit_test_env_vars,
get_ligands_as_compounds_with_conformers,
MAIN_CONFIG,
)
from icolos.utils.general.files_paths import attach_root_path
from icolos.core.containers.perturbation_map import PerturbationMap
_SBE = StepBaseEnum
class Test_PMXBoxWaterIons(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._test_dir = attach_root_path("tests/junk/pmx/test_box_water_ions")
create_test_dir(PATHS_EXAMPLEDATA.BOX_WATER_IONS_TEST_DIR, cls._test_dir)
export_unit_test_env_vars()
def setUp(self):
# initialise the map object for the two test ligands
self.compounds = get_ligands_as_compounds_with_conformers(
PATHS_EXAMPLEDATA.PMX_TNKS_LIGANDS
)
p_map = PerturbationMap(compounds=self.compounds)
p_map.parse_map_file(file_path=PATHS_EXAMPLEDATA.PMX_TNKS_MAP)
self.p_map = p_map
# def tearDown(self):
# shutil.rmtree(self._test_dir)
def test_box_water_ions(self):
conf = {
_SBE.STEPID: "01_PMX_BOX_WATER_IONS",
_SBE.STEP_TYPE: _SBE.STEP_PMX_BOX_WATER_IONS,
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load GROMACS/2021-fosscuda-2019a-PLUMED-2.7.1-Python-3.7.2",
_SBE.EXEC_PARALLELIZATION: {
_SBE.EXEC_PARALLELIZATION_CORES: 8,
_SBE.EXEC_PARALLELIZATION_MAXLENSUBLIST: 1,
},
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ADDITIONAL: {},
},
}
step = StepPMXBoxWaterIons(**conf)
step.data.compounds = self.compounds
step.work_dir = self._test_dir
step._workflow_object = WorkFlow()
step._workflow_object.workflow_data.perturbation_map = self.p_map
step.execute()
stat_inf = os.stat(
os.path.join(self._test_dir, "0ec09ef_4afa8f9/ligand/tpr.tpr")
)
self.assertGreater(stat_inf.st_size, 147300)
stat_inf = os.stat(
os.path.join(self._test_dir, "0ec09ef_4afa8f9/complex/tpr.tpr")
)
self.assertGreater(stat_inf.st_size, 1245800)
```
#### File: tests/prediction/test_model_building.py
```python
import json
import unittest
import os
from icolos.core.workflow_steps.prediction.model_building import StepModelBuilder
from icolos.utils.enums.program_parameters import ModelBuilderEnum
from icolos.utils.enums.step_enums import StepBaseEnum, StepModelBuilderEnum
from tests.tests_paths import PATHS_EXAMPLEDATA, load_SDF_docked, MAIN_CONFIG
from icolos.utils.general.files_paths import attach_root_path
_SBE = StepBaseEnum
_SME = ModelBuilderEnum()
_SMBE = StepModelBuilderEnum()
class Test_Model_Building(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._test_dir = attach_root_path("tests/junk/model_building")
if not os.path.isdir(cls._test_dir):
os.makedirs(cls._test_dir)
def setUp(self):
self._example_JSON = PATHS_EXAMPLEDATA.MODEL_BUILDER_EXAMPLE_JSON
self._compounds = load_SDF_docked(
PATHS_EXAMPLEDATA.MODEL_BUILDER_TEST_INPUT_SDF
)
@classmethod
def tearDownClass(cls):
pass
def test_build_model(self):
step_conf = {
_SBE.STEPID: "01_model_building",
_SBE.STEP_TYPE: _SBE.STEP_PREDICTION,
_SBE.EXEC: {
_SBE.EXEC_BINARYLOCATION: " ".join(
[
MAIN_CONFIG["OPTUNA_AZ"]["ENVIRONMENT_PYTHON"],
MAIN_CONFIG["OPTUNA_AZ"]["ENTRY_POINT_LOCATION"],
]
)
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
_SME.CONFIG: self._example_JSON,
_SME.BEST_BUILDCONFIG_OUTPATH: os.path.join(
self._test_dir, "buildconfig.json"
),
_SME.BEST_MODEL_OUTPATH: os.path.join(
self._test_dir, "best_model_trial.pkl"
),
_SME.MERGED_MODEL_OUTPATH: os.path.join(
self._test_dir, "production_model.pkl"
),
}
},
_SBE.SETTINGS_ADDITIONAL: {
_SMBE.DATA: {
_SMBE.DATA_INPUT_COLUMN: "original_smiles",
_SMBE.DATA_RESPONSE_COLUMN: _SBE.ANNOTATION_TAG_DOCKING_SCORE,
}
},
},
}
model_step = StepModelBuilder(**step_conf)
model_step.data.compounds = self._compounds
model_step.execute()
# check, that the input data has been written as expected
out_path = os.path.join(self._test_dir, "best_param.json")
container = model_step.data.generic.get_files_by_extension(ext="json")[0]
with open(out_path, "w") as f:
json.dump(container.get_data(), f, indent=4)
stat_inf = os.stat(out_path)
self.assertEqual(_SMBE.TMP_OUTPUT_BEST_PARAMETERS, container.get_file_name())
self.assertGreater(stat_inf.st_size, 800)
# check, that a model has been produced
# note, that the model's size strongly depends on the underlying algorithm / hyper-parameters chosen
out_path = os.path.join(self._test_dir, "production_model.pkl")
data = model_step.data.generic.get_files_by_extension(ext="pkl")[0].get_data()
with open(out_path, "wb") as f:
f.write(data)
stat_inf = os.stat(out_path)
self.assertGreater(stat_inf.st_size, 5000)
```
#### File: tests/prediction/test_predictor.py
```python
import unittest
import os
from icolos.core.containers.compound import Compound, Enumeration
from icolos.core.workflow_steps.prediction.predictor import StepPredictor
from icolos.utils.enums.step_enums import StepBaseEnum, StepPredictorEnum
from tests.tests_paths import PATHS_EXAMPLEDATA, get_mol_as_Conformer
from icolos.utils.general.files_paths import attach_root_path
_SBE = StepBaseEnum
_SPE = StepPredictorEnum()
class Test_Predictor(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._test_dir = attach_root_path("tests/junk/Prediction")
if not os.path.isdir(cls._test_dir):
os.makedirs(cls._test_dir)
def setUp(self):
self._example_model_path = attach_root_path(PATHS_EXAMPLEDATA.EPSA_MODEL_PATH)
self._example_mol_path = attach_root_path(
PATHS_EXAMPLEDATA.EPSA_EXAMPLE_MOLECULE
)
@classmethod
def tearDownClass(cls):
pass
def test_predict_ePSA_with_descriptors(self):
step_conf = {
_SBE.STEPID: "01_predict_ePSA",
_SBE.STEP_TYPE: _SBE.STEP_PREDICTION,
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {}},
_SBE.SETTINGS_ADDITIONAL: {
_SPE.MODEL_PATH: self._example_model_path,
_SPE.FEATURES: [
"bf_weighted_volume_boltzfactor_dmso",
"bf_weighted_area_boltzfactor_dmso",
"bf_weighted_HB_acc_boltzfactor_dmso",
"bf_weighted_HB_don_boltzfactor_dmso",
"bf_weighted_sigma2_boltzfactor_dmso",
"bf_weighted_Gsolv_meoh_boltzfactor_dmso",
],
_SPE.NAME_PREDICTED: "pred_ePSA",
},
},
}
pred_step = StepPredictor(**step_conf)
pred_step.get_compounds().append(Compound())
pred_step.get_compounds()[0].add_enumeration(Enumeration(), auto_update=True)
conformer = get_mol_as_Conformer(self._example_mol_path)
pred_step.data.compounds[0][0].add_conformers(conformer, auto_update=True)
pred_step.execute()
self.assertEqual(len(pred_step.get_compounds()), 1)
self.assertEqual(len(pred_step.get_compounds()[0]), 1)
self.assertEqual(len(pred_step.get_compounds()[0][0]), 1)
# check SDF write-out (including ePSA prediction as tag)
out_path = os.path.join(self._test_dir, "ePSA_predicted_annotated.sdf")
pred_step.write_conformers(out_path)
stat_inf = os.stat(out_path)
self.assertEqual(stat_inf.st_size, 4448)
```
#### File: tests/schrodinger/test_desmond_setup.py
```python
from icolos.core.containers.generic import GenericData
import unittest
from icolos.core.workflow_steps.schrodinger.desmond_preprocessor import StepDesmondSetup
from icolos.utils.general.files_paths import attach_root_path
import os
from tests.tests_paths import PATHS_EXAMPLEDATA
from icolos.utils.enums.step_enums import StepBaseEnum, StepDesmondEnum
_SBE = StepBaseEnum
_SDE = StepDesmondEnum()
class Test_Desmond_Setup(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._test_dir = attach_root_path("tests/junk/schrodinger")
if not os.path.isdir(cls._test_dir):
os.makedirs(cls._test_dir)
def setUp(self):
with open(attach_root_path(PATHS_EXAMPLEDATA.DESMOND_SETUP_PDB), "r") as f:
self.pdb = f.read()
def test_desmond_preprocess(self):
step_conf = {
_SBE.STEPID: "test_desmond_setup",
_SBE.STEP_TYPE: "desmond_preprocess",
_SBE.EXEC: {
_SBE.EXEC_PREFIXEXECUTION: "module load schrodinger/2021-1-js-aws"
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {},
_SBE.SETTINGS_ADDITIONAL: {_SDE.MSJ_FIELDS: {}},
},
}
step_desmond_preprocess = StepDesmondSetup(**step_conf)
step_desmond_preprocess.data.generic.add_file(
GenericData(file_name="structure.pdb", file_data=self.pdb, argument=True)
)
step_desmond_preprocess.execute()
out_path = os.path.join(self._test_dir, "setup.cms")
step_desmond_preprocess.data.generic.write_out_all_files(self._test_dir)
stat_inf = os.stat(out_path)
self.assertEqual(stat_inf.st_size, 22560500)
```
#### File: tests/step_utils/test_input_merger.py
```python
import unittest
from icolos.core.step_utils.input_merger import InputMerger, StepMerge
from icolos.core.containers.compound import Conformer, Enumeration, Compound
from icolos.utils.enums.step_enums import StepBaseEnum
_SBE = StepBaseEnum
class Test_InputMerger(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def setUp(self):
# comp1 has 2 enumerations, one with 2 and one with 3 conformers
comp1 = Compound(name="test_molecule", compound_number=0)
comp1_enum1 = Enumeration(smile="abc", molecule=None, enumeration_id=1)
comp1_enum1.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp1_enum1.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp1_enum2 = Enumeration(smile="def", molecule=None, enumeration_id=2)
comp1_enum2.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp1_enum2.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp1_enum2.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp1.add_enumeration(comp1_enum1, auto_update=False)
comp1.add_enumeration(comp1_enum2, auto_update=False)
# comp2 has 3 enumerations, one with 1, one with 3 and one with 4 conformers
comp2 = Compound(name="test_molecule_new", compound_number=0)
comp2_enum1 = Enumeration(smile="kk", molecule=None, enumeration_id=0)
comp2_enum1.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp2_enum1.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp2_enum2 = Enumeration(smile="abc", molecule=None, enumeration_id=1)
comp2_enum2.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp2_enum2.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp2_enum2.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp2_enum3 = Enumeration(smile="xyz", molecule=None, enumeration_id=2)
comp2_enum3.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp2_enum3.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp2_enum3.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp2_enum3.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp2.add_enumeration(comp2_enum1, auto_update=False)
comp2.add_enumeration(comp2_enum2, auto_update=False)
comp2.add_enumeration(comp2_enum3, auto_update=False)
# comp3 has 1 enumeration, with 2 conformers (and a different number and name)
comp3 = Compound(name="test_molecule", compound_number=1)
comp3_enum1 = Enumeration(smile="abc", molecule=None, enumeration_id=0)
comp3_enum1.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp3_enum1.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp3_enum2 = Enumeration(smile="def", molecule=None, enumeration_id=1)
comp3_enum2.add_conformer(Conformer(conformer_id=0), auto_update=False)
comp3_enum2.add_conformer(Conformer(conformer_id=0), auto_update=False)
comp3_enum2.add_conformer(Conformer(conformer_id=0), auto_update=False)
comp3.add_enumeration(comp3_enum1, auto_update=False)
comp3.add_enumeration(comp3_enum2, auto_update=False)
self.list_compounds = [comp1, comp2, comp3]
@classmethod
def tearDownClass(cls):
pass
def test_merging_by_name_compound(self):
conf = {
_SBE.INPUT_MERGE_COMPOUNDS: True,
_SBE.INPUT_MERGE_COMPOUNDS_BY: _SBE.INPUT_MERGE_BY_NAME,
_SBE.INPUT_MERGE_ENUMERATIONS: False,
}
conf = StepMerge(**conf)
merger = InputMerger(conf)
list_compounds = merger.merge(self.list_compounds)
self.assertEqual(len(list_compounds), 2)
self.assertEqual(len(list_compounds[0].get_enumerations()), 4)
self.assertEqual(len(list_compounds[1].get_enumerations()), 3)
self.assertListEqual(
[c.get_name() for c in list_compounds],
["test_molecule", "test_molecule_new"],
)
self.assertListEqual(
[
conf.get_index_string()
for c in list_compounds
for e in c.get_enumerations()
for conf in e.get_conformers()
],
[
"0:0:0",
"0:0:1",
"0:1:0",
"0:1:1",
"0:1:2",
"0:2:0",
"0:2:1",
"0:3:0",
"0:3:1",
"0:3:2",
"1:0:0",
"1:0:1",
"1:1:0",
"1:1:1",
"1:1:2",
"1:2:0",
"1:2:1",
"1:2:2",
"1:2:3",
],
)
def test_merging_by_id_compound(self):
conf = {
_SBE.INPUT_MERGE_COMPOUNDS: True,
_SBE.INPUT_MERGE_COMPOUNDS_BY: _SBE.INPUT_MERGE_BY_ID,
_SBE.INPUT_MERGE_ENUMERATIONS: False,
}
conf = StepMerge(**conf)
merger = InputMerger(conf)
list_compounds = merger.merge(self.list_compounds)
self.assertEqual(len(list_compounds), 2)
self.assertEqual(len(list_compounds[0].get_enumerations()), 5)
self.assertEqual(len(list_compounds[1].get_enumerations()), 2)
self.assertListEqual([c.get_name() for c in list_compounds], ["0", "1"])
self.assertListEqual(
[
conf.get_index_string()
for c in list_compounds
for e in c.get_enumerations()
for conf in e.get_conformers()
],
[
"0:0:0",
"0:0:1",
"0:1:0",
"0:1:1",
"0:1:2",
"0:2:0",
"0:2:1",
"0:3:0",
"0:3:1",
"0:3:2",
"0:4:0",
"0:4:1",
"0:4:2",
"0:4:3",
"1:0:0",
"1:0:1",
"1:1:0",
"1:1:1",
"1:1:2",
],
)
def test_merging_by_name_compound_enumeration_smile(self):
conf = {
_SBE.INPUT_MERGE_COMPOUNDS: True,
_SBE.INPUT_MERGE_COMPOUNDS_BY: _SBE.INPUT_MERGE_BY_NAME,
_SBE.INPUT_MERGE_ENUMERATIONS: True,
_SBE.INPUT_MERGE_ENUMERATIONS_BY: _SBE.INPUT_MERGE_BY_SMILE,
}
conf = StepMerge(**conf)
merger = InputMerger(conf)
list_compounds = merger.merge(self.list_compounds)
self.assertEqual(len(list_compounds), 2)
self.assertEqual(len(list_compounds[0].get_enumerations()), 2)
self.assertEqual(len(list_compounds[1].get_enumerations()), 3)
self.assertListEqual(
[c.get_name() for c in list_compounds],
["test_molecule", "test_molecule_new"],
)
self.assertListEqual(
[
conf.get_index_string()
for c in list_compounds
for e in c.get_enumerations()
for conf in e.get_conformers()
],
[
"0:0:0",
"0:0:1",
"0:0:2",
"0:0:3",
"0:1:0",
"0:1:1",
"0:1:2",
"0:1:3",
"0:1:4",
"0:1:5",
"1:0:0",
"1:0:1",
"1:1:0",
"1:1:1",
"1:1:2",
"1:2:0",
"1:2:1",
"1:2:2",
"1:2:3",
],
)
self.assertListEqual(
[e.get_smile() for c in list_compounds for e in c.get_enumerations()],
["abc", "def", "kk", "abc", "xyz"],
)
def test_merging_by_name_compound_enumeration_id(self):
conf = {
_SBE.INPUT_MERGE_COMPOUNDS: True,
_SBE.INPUT_MERGE_COMPOUNDS_BY: _SBE.INPUT_MERGE_BY_NAME,
_SBE.INPUT_MERGE_ENUMERATIONS: True,
_SBE.INPUT_MERGE_ENUMERATIONS_BY: _SBE.INPUT_MERGE_BY_ID,
}
conf = StepMerge(**conf)
merger = InputMerger(conf)
list_compounds = merger.merge(self.list_compounds)
self.assertEqual(len(list_compounds), 2)
self.assertEqual(len(list_compounds[0].get_enumerations()), 3)
self.assertEqual(len(list_compounds[1].get_enumerations()), 3)
self.assertListEqual(
[c.get_name() for c in list_compounds],
["test_molecule", "test_molecule_new"],
)
self.assertListEqual(
[
conf.get_index_string()
for c in list_compounds
for e in c.get_enumerations()
for conf in e.get_conformers()
],
[
"0:0:0",
"0:0:1",
"0:0:2",
"0:0:3",
"0:0:4",
"0:1:0",
"0:1:1",
"0:1:2",
"0:2:0",
"0:2:1",
"1:0:0",
"1:0:1",
"1:1:0",
"1:1:1",
"1:1:2",
"1:2:0",
"1:2:1",
"1:2:2",
"1:2:3",
],
)
self.assertListEqual(
[e.get_smile() for c in list_compounds for e in c.get_enumerations()],
["abc", "def", "abc", "kk", "abc", "xyz"],
)
```
#### File: tests/step_utils/test_input_preparator.py
```python
import os
import unittest
from icolos.core.composite_agents.workflow import WorkFlow
from icolos.core.step_utils.input_preparator import (
InputPreparator,
StepInputParameters,
StepInputSource,
)
from icolos.core.containers.compound import Conformer, Enumeration, Compound
from icolos.core.workflow_steps.step import StepBase
from icolos.utils.enums.step_enums import StepBaseEnum
from icolos.utils.general.files_paths import attach_root_path
from tests.tests_paths import PATHS_EXAMPLEDATA
_SBE = StepBaseEnum
class Test_InputPreparator(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._test_dir = attach_root_path("tests/junk/InputPreparator")
if not os.path.isdir(cls._test_dir):
os.makedirs(cls._test_dir)
def setUp(self):
# comp1 has 2 enumerations, one with 2 and one with 3 conformers
comp1 = Compound(name="test_molecule", compound_number=0)
comp1_enum1 = Enumeration(smile="abc", molecule=None, enumeration_id=1)
comp1_enum1.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp1_enum1.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp1_enum2 = Enumeration(smile="def", molecule=None, enumeration_id=2)
comp1_enum2.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp1_enum2.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp1_enum2.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp1.add_enumeration(comp1_enum1, auto_update=False)
comp1.add_enumeration(comp1_enum2, auto_update=False)
source1 = StepInputSource(
source="mol1:cccccc1",
source_type=_SBE.INPUT_SOURCE_TYPE_STRING,
source_field="new_string",
)
source2 = StepInputSource(
source="prev_step", source_type=_SBE.INPUT_SOURCE_TYPE_STEP
)
source3 = StepInputSource(
source="mock_step",
source_type=_SBE.INPUT_SOURCE_TYPE_STEP,
source_field="old_input_field",
target_field="new_input_field",
)
source4 = StepInputSource(
source="mol2:cccc1", source_type=_SBE.INPUT_SOURCE_TYPE_STRING
)
source5 = StepInputSource(
source=attach_root_path(PATHS_EXAMPLEDATA.PARACETAMOL_COSMO),
source_type=_SBE.INPUT_SOURCE_TYPE_PATH,
source_field="cosmo",
target_field="cosmo",
)
source6 = StepInputSource(
source=attach_root_path(PATHS_EXAMPLEDATA.PARACETAMOL_COSMO),
source_type=_SBE.INPUT_SOURCE_TYPE_FILE,
source_field="cosmo_filepath",
target_field="cosmo_test_file",
)
source7 = StepInputSource(
source=PATHS_EXAMPLEDATA.PANTHER_NEGATIVE_IMAGE,
extension="mol2",
)
self.params = StepInputParameters(
compounds=[source1, source4, source2], generic=[source7]
)
blank_params = StepInputParameters(compounds=[], generic=[])
mock_step = StepBase(step_id="mock_step", type=None, input=self.params)
prev_step = StepBase(step_id="prev_step", type=None, input=blank_params)
prev_step.data.compounds = [comp1]
workflow = WorkFlow()
workflow.add_step(prev_step)
workflow.add_step(mock_step)
self.workflow = workflow
@classmethod
def tearDownClass(cls):
pass
def test_input_preparation(self):
preparator = InputPreparator(workflow=self.workflow, logger=None)
data, work_dir = preparator.generate_input(
step_input=self.params, step_type=_SBE.STEP_SHAEP
)
self.assertEqual(len(data.compounds), 3)
self.assertEqual(len(data.generic.get_all_files()), 1)
print(data.generic.get_all_files())
with open(PATHS_EXAMPLEDATA.PANTHER_NEGATIVE_IMAGE, "r") as f:
file = f.read()
self.assertEqual(
data.generic.get_file_by_name("1uyd_negative_image.mol2").get_data(), file
)
self.assertEqual(len(data.compounds[1]), 1)
self.assertEqual((len(data.compounds[2][1])), 3)
```
#### File: tests/step_utils/test_run_variables_resolver.py
```python
import unittest
from icolos.core.containers.compound import Conformer, Enumeration, Compound
from icolos.core.step_utils.run_variables_resolver import RunVariablesResolver
from icolos.utils.enums.step_enums import StepBaseEnum
_SBE = StepBaseEnum
class Test_RunVariablesResolver(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.resolver = RunVariablesResolver()
def setUp(self):
# comp1 has 2 enumerations, one with 2 and one with 3 conformers
comp1 = Compound(name="test_molecule", compound_number=0)
comp1_enum1 = Enumeration(
smile="abc", molecule=None, enumeration_id=1, compound_object=comp1
)
comp1_enum1.add_conformer(
Conformer(conformer_id=0, enumeration_object=comp1_enum1), auto_update=True
)
comp1_enum1.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp1_enum2 = Enumeration(smile="def", molecule=None, enumeration_id=2)
comp1_enum2.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp1_enum2.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp1_enum2.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp1.add_enumeration(comp1_enum1, auto_update=False)
comp1.add_enumeration(comp1_enum2, auto_update=False)
# comp2 has 3 enumerations, one with 1, one with 3 and one with 4 conformers
comp2 = Compound(name="test_molecule_new", compound_number=0)
comp2_enum1 = Enumeration(smile="kk", molecule=None, enumeration_id=0)
comp2_enum1.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp2_enum1.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp2_enum2 = Enumeration(smile="abc", molecule=None, enumeration_id=1)
comp2_enum2.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp2_enum2.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp2_enum2.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp2_enum3 = Enumeration(smile="xyz", molecule=None, enumeration_id=2)
comp2_enum3.add_conformer(
Conformer(conformer_id=0, enumeration_object=comp2_enum3), auto_update=True
)
comp2_enum3.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp2_enum3.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp2_enum3.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp2.add_enumeration(comp2_enum1, auto_update=False)
comp2.add_enumeration(comp2_enum2, auto_update=False)
comp2.add_enumeration(comp2_enum3, auto_update=False)
# comp3 has 1 enumeration, with 2 conformers (and a different number and name)
comp3 = Compound(name="test_molecule", compound_number=1)
comp3_enum1 = Enumeration(smile="abc", molecule=None, enumeration_id=0)
comp3_enum1.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp3_enum1.add_conformer(Conformer(conformer_id=0), auto_update=True)
comp3_enum2 = Enumeration(
smile="def", molecule=None, enumeration_id=1, compound_object=comp3
)
comp3_enum2.add_conformer(Conformer(conformer_id=0), auto_update=False)
comp3_enum2.add_conformer(Conformer(conformer_id=0), auto_update=False)
comp3_enum2.add_conformer(Conformer(conformer_id=0), auto_update=False)
comp3.add_enumeration(comp3_enum1, auto_update=False)
comp3.add_enumeration(comp3_enum2, auto_update=False)
self.list_compounds = [comp1, comp2, comp3]
@classmethod
def tearDownClass(cls):
pass
def test_compound_replacements(self):
inp = "/a/path/to/nowhere/[compound_id]/[compound_id]/compound_id/whatever/[compound_name]"
self.assertEqual(
self.resolver.resolve_compound_level(inp, self.list_compounds[0]),
"/a/path/to/nowhere/0/0/compound_id/whatever/test_molecule",
)
self.assertEqual(
self.resolver.resolve_compound_level(inp, self.list_compounds[1]),
"/a/path/to/nowhere/0/0/compound_id/whatever/test_molecule_new",
)
self.assertEqual(
self.resolver.resolve_compound_level(inp, self.list_compounds[2]),
"/a/path/to/nowhere/1/1/compound_id/whatever/test_molecule",
)
# test what happens, when no replacement is done
inp = "/a/string/withouttreplacement"
self.assertEqual(
self.resolver.resolve_compound_level(inp, self.list_compounds[0]), inp
)
def test_enumeration_replacements(self):
inp = "/a/path/to/nowhere/[compound_id]/[enumeration_id]/[enumeration_string]/whatever/[enumeration_id]"
self.assertEqual(
self.resolver.resolve_enumeration_level(inp, self.list_compounds[0][0]),
"/a/path/to/nowhere/[compound_id]/1/0:1/whatever/1",
)
self.assertEqual(
self.resolver.resolve_enumeration_level(inp, self.list_compounds[0][1]),
"/a/path/to/nowhere/[compound_id]/2/:2/whatever/2",
)
self.assertEqual(
self.resolver.resolve_enumeration_level(inp, self.list_compounds[2][1]),
"/a/path/to/nowhere/[compound_id]/1/1:1/whatever/1",
)
# test what happens, when no replacement is done
inp = "/a/string/withouttreplacement"
self.assertEqual(
self.resolver.resolve_enumeration_level(inp, self.list_compounds[0][0]), inp
)
def test_conformer_replacements(self):
inp = "/a/path/[conformer_string]to/nowhere/[compound_id]/[conformer_id]/[enumeration_string]/whatever/[conformer_id]"
self.assertEqual(
self.resolver.resolve_conformer_level(inp, self.list_compounds[0][0][0]),
"/a/path/0:1:0to/nowhere/[compound_id]/0/[enumeration_string]/whatever/0",
)
self.assertEqual(
self.resolver.resolve_conformer_level(inp, self.list_compounds[0][0][1]),
"/a/path/0:1:1to/nowhere/[compound_id]/1/[enumeration_string]/whatever/1",
)
self.assertEqual(
self.resolver.resolve_conformer_level(inp, self.list_compounds[2][0][1]),
"/a/path/:0:1to/nowhere/[compound_id]/1/[enumeration_string]/whatever/1",
)
self.assertEqual(
self.resolver.resolve_conformer_level(inp, self.list_compounds[1][2][0]),
"/a/path/:2:0to/nowhere/[compound_id]/0/[enumeration_string]/whatever/0",
)
# test what happens, when no replacement is done
inp = "/a/string/withouttreplacement"
self.assertEqual(
self.resolver.resolve_conformer_level(inp, self.list_compounds[0][0][0]),
inp,
)
def test_resolve(self):
inp = "/a/path/[conformer_string]to/nowhere/[compound_id]/[conformer_id]/[enumeration_string]/whatever/[compound_name]"
self.assertEqual(
self.resolver.resolve(inp, self.list_compounds[0][0][0]),
"/a/path/0:1:0to/nowhere/0/0/0:1/whatever/test_molecule",
)
self.assertEqual(
self.resolver.resolve(inp, self.list_compounds[0][0]),
"/a/path/[conformer_string]to/nowhere/0/[conformer_id]/0:1/whatever/test_molecule",
)
self.assertEqual(
self.resolver.resolve(inp, self.list_compounds[0]),
"/a/path/[conformer_string]to/nowhere/0/[conformer_id]/[enumeration_string]/whatever/test_molecule",
)
# fails for cases where the linking conformer -> enumeration -> compound is not established
try:
self.resolver.resolve(inp, self.list_compounds[2][0][1])
except Exception as e:
self.assertEqual(
e.__str__(), "'NoneType' object has no attribute 'get_compound_number'"
)
# test what happens, when no replacement is done
inp = "/a/string/withouttreplacement"
self.assertEqual(self.resolver.resolve(inp, self.list_compounds[0][0][0]), inp)
```
#### File: tests/step_utils/test_structconvert.py
```python
import os
import unittest
from icolos.core.step_utils.structconvert import StructConvert
from icolos.utils.enums.program_parameters import SchrodingerExecutablesEnum
from icolos.utils.enums.step_enums import StepBaseEnum
from icolos.utils.general.files_paths import attach_root_path, remove_folder
from tests.tests_paths import PATHS_EXAMPLEDATA
_SBE = StepBaseEnum
_SEE = SchrodingerExecutablesEnum()
class Test_Structconvert(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._test_dir = attach_root_path("tests/junk/structconvert")
remove_folder(cls._test_dir)
if not os.path.isdir(cls._test_dir):
os.makedirs(cls._test_dir)
def setUp(self):
pass
def test_sdf2pdb(self):
executor = StructConvert(prefix_execution=_SEE.SCHRODINGER_MODULE)
output_path = os.path.join(self._test_dir, "output_small_molecule.pdb")
executor.sdf2pdb(
sdf_file=PATHS_EXAMPLEDATA.SMALL_MOLECULE_SDF_PATH, pdb_file=output_path
)
stat_inf = os.stat(output_path)
self.assertEqual(stat_inf.st_size, 2209)
```
#### File: tests/XTB/test_XTB_confgen.py
```python
import unittest
import os
from icolos.core.workflow_steps.confgen.xtb import StepXTB
from icolos.utils.enums.step_enums import StepBaseEnum
from icolos.utils.enums.program_parameters import XTBEnum
from tests.tests_paths import (
PATHS_EXAMPLEDATA,
MAIN_CONFIG,
export_unit_test_env_vars,
get_mol_as_Compound,
get_ligands_as_compounds_with_conformers,
get_mol_as_Conformer,
)
from icolos.utils.general.files_paths import attach_root_path
import time
_SBE = StepBaseEnum
_CE = XTBEnum()
class Test_XTB_confgen(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._test_dir = attach_root_path("tests/junk/XTB")
if not os.path.isdir(cls._test_dir):
os.makedirs(cls._test_dir)
export_unit_test_env_vars()
def setUp(self):
self._paracetamol_molecule = get_mol_as_Compound(
PATHS_EXAMPLEDATA.PARACETAMOL_PATH
)
self._aspirin_molecule = get_mol_as_Compound(PATHS_EXAMPLEDATA.ASPIRIN_PATH)
self._medium_molecules = get_ligands_as_compounds_with_conformers(
PATHS_EXAMPLEDATA.SMALL_MOLECULES_SDF_PATH
)
@classmethod
def tearDownClass(cls):
pass
def test_coordinate_generation(self):
step_conf = {
_SBE.STEPID: "01_conf_genXTB",
_SBE.STEP_TYPE: _SBE.STEP_XTB,
_SBE.EXEC: {
_SBE.EXEC_BINARYLOCATION: attach_root_path(
os.path.join(MAIN_CONFIG["XTBHOME"])
),
_SBE.EXEC_PARALLELIZATION: {_SBE.EXEC_PARALLELIZATION_CORES: 7},
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
_CE.XTB_OPT: "vtight",
_CE.XTB_GBSA: "h2o",
},
}
},
}
xtb_step = StepXTB(**step_conf)
xtb_step.data.compounds = [self._paracetamol_molecule]
confs = get_mol_as_Conformer(
attach_root_path(PATHS_EXAMPLEDATA.CLUSTERING_11CONFS)
)
xtb_step.data.compounds[0][0].add_conformers(confs, auto_update=True)
self.assertListEqual(
list(
xtb_step.get_compounds()[0][0][0]
.get_molecule()
.GetConformer(0)
.GetPositions()[0]
),
[0.8785, 0.6004, -0.2173],
)
xtb_step.execute()
self.assertListEqual(
list(
xtb_step.get_compounds()[0][0][0]
.get_molecule()
.GetConformer(0)
.GetPositions()[0]
),
[0.8852, 0.6805, -0.1339],
)
# check number of conformers returned (only one Compound with only one Enumeration)
self.assertEqual(len(xtb_step.get_compounds()[0][0]), 2)
# check SDF write-out (including energy-as-tag annotation)
out_path = os.path.join(
self._test_dir, "XTB_conformers_from_OMEGA_paracetamol.sdf"
)
xtb_step.write_conformers(out_path)
stat_inf = os.stat(out_path)
self.assertEqual(stat_inf.st_size, 4414)
def test_single_core_execution(self):
step_conf = {
_SBE.STEPID: "01_conf_genXTB",
_SBE.STEP_TYPE: _SBE.STEP_XTB,
_SBE.EXEC: {
_SBE.EXEC_BINARYLOCATION: attach_root_path(
os.path.join(MAIN_CONFIG["XTBHOME"])
),
_SBE.EXEC_PARALLELIZATION: {_SBE.EXEC_PARALLELIZATION_CORES: 1},
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
_CE.XTB_OPT: "vtight",
_CE.XTB_GBSA: "h2o",
},
}
},
}
xtb_step = StepXTB(**step_conf)
xtb_step.data.compounds = self._medium_molecules
self.assertListEqual(
list(
xtb_step.get_compounds()[0][0][0]
.get_molecule()
.GetConformer(0)
.GetPositions()[0]
),
[1.8851, -1.0363, -0.1124],
)
xtb_step.execute()
self.assertListEqual(
list(
xtb_step.get_compounds()[0][0][0]
.get_molecule()
.GetConformer(0)
.GetPositions()[0]
),
[1.8526, -0.9638, -0.1394],
)
# check number of conformers returned (only one Compound with only one Enumeration)
self.assertEqual(len(xtb_step.get_compounds()[0][0]), 1)
# check SDF write-out (including energy-as-tag annotation)
out_path = os.path.join(
self._test_dir, "XTB_conformers_from_OMEGA_paracetamol.sdf"
)
xtb_step.write_conformers(out_path)
stat_inf = os.stat(out_path)
self.assertEqual(stat_inf.st_size, 6874)
def test_parallel_execution(self):
step_conf = {
_SBE.STEPID: "01_conf_genXTB",
_SBE.STEP_TYPE: _SBE.STEP_XTB,
_SBE.EXEC: {
_SBE.EXEC_BINARYLOCATION: attach_root_path(
os.path.join(MAIN_CONFIG["XTBHOME"])
),
_SBE.EXEC_PARALLELIZATION: {_SBE.EXEC_PARALLELIZATION_CORES: 8},
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {
_SBE.SETTINGS_ARGUMENTS_FLAGS: [],
_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {
_CE.XTB_OPT: "vtight",
_CE.XTB_GBSA: "h2o",
},
}
},
}
xtb_step = StepXTB(**step_conf)
xtb_step.data.compounds = self._medium_molecules
self.assertListEqual(
list(
xtb_step.get_compounds()[0][0][0]
.get_molecule()
.GetConformer(0)
.GetPositions()[0]
),
[1.8851, -1.0363, -0.1124],
)
t1 = time.time()
xtb_step.execute()
t2 = time.time()
self.assertLess(t2 - t1, 4)
self.assertListEqual(
list(
xtb_step.get_compounds()[0][0][0]
.get_molecule()
.GetConformer(0)
.GetPositions()[0]
),
[1.8526, -0.9638, -0.1394],
)
# check number of conformers returned (only one Compound with only one Enumeration)
self.assertEqual(len(xtb_step.get_compounds()[0][0]), 1)
# check SDF write-out (including energy-as-tag annotation)
out_path = os.path.join(
self._test_dir, "XTB_conformers_from_OMEGA_paracetamol.sdf"
)
xtb_step.write_conformers(out_path)
stat_inf = os.stat(out_path)
self.assertEqual(stat_inf.st_size, 6874)
``` |
{
"source": "jharsono/baby-activity-logger",
"score": 3
} |
#### File: baby_activity_logger/alert_button/alert_button.py
```python
WAVFILE = '/home/pi/projects/baby-activity-logger/baby_activity_logger/alert_button/alert.wav'
import pygame
from pygame import *
import sys
from gpiozero import Button
from time import sleep
import os
class AlertButton:
def __init__(self, gpio_pin):
self.alert_on = False
self.play_button = Button(gpio_pin)
pygame.display.init()
screen = pygame.display.set_mode((1,1))
mixer.pre_init(frequency=44100, size=-16, channels=2, buffer=4096)
pygame.init()
# screen=pygame.display.set_mode((400,400),0,32)
def toggle_alert(self):
self.alert_on = not self.alert_on
def play_alert(self):
s = pygame.mixer.Sound(WAVFILE)
ch = s.play()
while ch.get_busy():
pygame.time.delay(100)
def run(self):
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key==K_ESCAPE:
pygame.quit()
sys.exit()
# pygame.display.update()
self.play_button.when_pressed = self.toggle_alert
while self.alert_on:
self.play_alert()
sleep(1)
```
#### File: baby_activity_logger/gcal_api_client/gcal_api_client.py
```python
import pickle
import os
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from datetime import datetime, timedelta
class GcalApiClient:
def __init__(self, path_to_secret, path_to_token):
self.path_to_secret = '../%s' % path_to_secret
self.path_to_token = '../%s' % path_to_token
self.service = None
self.calendar_id = None
self.set_credentials()
self.last_sleep = self.get_last_sleep()
def set_credentials(self):
scopes = ['https://www.googleapis.com/auth/calendar']
try:
credentials = pickle.load(open(os.path.abspath(os.path.join(
os.path.dirname(__file__), self.path_to_token)), "rb"))
except:
flow = InstalledAppFlow.from_client_secrets_file(os.path.abspath(os.path.join(
os.path.dirname(__file__), self.path_to_secret)), scopes=scopes)
credentials = flow.run_console()
pickle.dump(credentials, open(os.path.abspath(os.path.join(
os.path.dirname(__file__), self.path_to_token)), "wb"))
self.service = build("calendar", "v3", credentials=credentials)
result = self.service.calendarList().list().execute()
self.calendar_id = result['items'][0]['id']
def create_event(self, summary):
start_time = datetime.now()
end_time = start_time + timedelta(minutes=15)
timezone = 'America/Los_Angeles'
event_data = {
'summary': summary,
'start': {
'dateTime': start_time.strftime("%Y-%m-%dT%H:%M:%S"),
'timeZone': timezone,
},
'end': {
'dateTime': end_time.strftime("%Y-%m-%dT%H:%M:%S"),
'timeZone': timezone,
},
}
print('Added %s' % summary)
try:
new_event = self.service.events().insert(
calendarId=self.calendar_id,
body=event_data).execute()
if summary == 'sleep':
sleep_obj = {
'id': new_event['id'],
'start': new_event['start']['dateTime']
}
print(sleep_obj)
self.last_sleep = sleep_obj
return new_event
except:
return False
def get_last_sleep(self):
last_sleep_query = self.service.events().list(
calendarId=self.calendar_id,
q='sleep',
singleEvents=True,
orderBy="startTime"
).execute()
last_sleep_item = last_sleep_query['items'][-1]
print(last_sleep_item)
last_sleep_obj = {
'id': last_sleep_item['id'],
'start': last_sleep_item['start']['dateTime']
}
return last_sleep_obj
def end_sleep(self):
print('ending sleep id %s' % self.last_sleep['id'])
end_time = datetime.now()
start_time = self.last_sleep['start']
event_data = {
'end': {
'dateTime': end_time.strftime("%Y-%m-%dT%H:%M:%S"),
},
'start': {
'dateTime': start_time,
},
}
try:
event = self.service.events().patch(
calendarId=self.calendar_id,
eventId=self.last_sleep['id'],
body=event_data).execute()
new_end_time = event.get('end')['dateTime']
return 'Sleep ended at %s' % new_end_time
except:
return False
# Last sleep scheduled task
def set_last_sleep(self):
try:
last_sleep = self.get_last_sleep()
self.last_sleep = last_sleep
return last_sleep
except:
print('error setting last sleep')
return False
```
#### File: baby_activity_logger/led_helper/led_helper.py
```python
import RPi.GPIO as GPIO
class LEDHelper:
def __init__(self, red_pin, green_pin, blue_pin):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(red_pin, GPIO.OUT)
GPIO.setup(green_pin, GPIO.OUT)
GPIO.setup(blue_pin, GPIO.OUT)
self.red = GPIO.PWM(red_pin, 1000)
self.green = GPIO.PWM(green_pin, 1000)
self.blue = GPIO.PWM(blue_pin, 1000)
self.red.start(0)
self.green.start(0)
self.blue.start(0)
def set_color(self, rgb=[]):
self.red.start(0)
self.green.start(0)
self.blue.start(0)
# The line below should allow you to use web-like RGB values from 0-255,
# but currently causes flickering with 470ohm resistors
# rgb = [(x / 255.0) * 100 for x in rgb]
self.red.ChangeDutyCycle(rgb[0])
self.green.ChangeDutyCycle(rgb[1])
self.blue.ChangeDutyCycle(rgb[2])
def off(self):
self.red.stop()
self.blue.stop()
self.green.stop()
def set_fetch_status(self):
self.set_color([1, 0, 1])
def set_success_status(self):
self.set_color([0, 1, 0])
def set_fail_status(self):
self.set_color([1, 0, 0])
``` |
{
"source": "jhart99/a6tools",
"score": 2
} |
#### File: a6tools/test/test_rda.py
```python
import unittest
import rda
import a6
class TestEscaper(unittest.TestCase):
def test_escaper(self):
self.assertEqual(rda.escaper(b''), bytes())
self.assertEqual(rda.escaper(bytes([0x00])), bytes([0x00]))
self.assertEqual(rda.escaper(bytes([0x11])), bytes([0x5C,0xEE]))
self.assertEqual(rda.escaper(bytes([0x13])), bytes([0x5C,0xEC]))
self.assertEqual(rda.escaper(bytes([0x5C])), bytes([0x5C,0xA3]))
def test_unescaper(self):
self.assertEqual(a6.unescaper(b''), bytes())
self.assertEqual(a6.unescaper(bytes([0x00])), bytes([0x00]))
self.assertEqual(a6.unescaper(bytes([0x5C, 0xEE])), bytes([0x11]))
self.assertEqual(a6.unescaper(bytes([0x5C, 0xEC])), bytes([0x13]))
self.assertEqual(a6.unescaper(bytes([0x5C, 0xA3])), bytes([0x5C]))
class TestRdaDebugFrame(unittest.TestCase):
def test_compute_check(self):
self.assertEqual(a6.compute_check(b''), bytes([0x00]))
self.assertEqual(a6.compute_check(bytes([0x00])), bytes([0x00]))
self.assertEqual(a6.compute_check(bytes([0x00])), bytes([0x00]))
self.assertEqual(a6.compute_check(bytes([0x00,0xff])), bytes([0xff]))
self.assertEqual(a6.compute_check(bytes([0x01,0x02])), bytes([0x03]))
self.assertEqual(a6.compute_check(bytes([0x01,0x02,0xff])), bytes([0xfc]))
self.assertEqual(a6.compute_check(bytes([0xff,0x80,0x01])), bytes([0x7e]))
def test_rda_debug_frame(self):
self.assertEqual(a6.rda_debug_frame(bytes([0xFF]), bytes([0x00]), bytes([0x10,0x00,0x00,0x82,0x01])),
bytes([0xad,0x00,0x07,0xff,0x00,0x10,0x00,0x00,0x82,0x01, 0x6c]))
self.assertEqual(a6.rda_debug_frame(bytes([0xFF]), bytes([0x02]), bytes([0x10,0x00,0x00,0x82,0x01])),
bytes([0xad,0x00,0x07,0xff,0x02,0x10,0x00,0x00,0x82,0x01, 0x6e]))
def test_read_word(self):
self.assertEqual(a6.read_word(0x82000010, 1), bytes([0xad,0x00,0x07,0xff,0x02,0x10,0x00,0x00,0x82,0x01, 0x6e]))
def test_write_block(self):
self.assertEqual(a6.write_block(0x82000010, bytes.fromhex('aabbccdd')), bytes.fromhex('ad000aff8310000082aabbccddee'))
class TestA6Commands(unittest.TestCase):
def test_h2p_command(self):
self.assertEqual(a6.h2p_command(0x00), bytes.fromhex('ad0007ff8405000000007e'))
self.assertEqual(a6.h2p_command(0xa5), bytes.fromhex('ad0007ff8405000000a5db'))
def test_set_uart_to_host(self):
self.assertEqual(a6.set_uart_to_host(), bytes.fromhex('ad0007ff840300000080f8'))
def test_read_uart_to_host(self):
self.assertEqual(a6.read_uart_to_host(), bytes.fromhex('ad0007ff040300000001f9'))
def test_ate_command(self):
self.assertEqual(a6.ate_command('AT+DMOCONNECT', 0x8201ff9c), bytes.fromhex('ad0016ff839cff01820000b7'))
def test_cps_command(self):
self.assertEqual(a6.cps_command(bytes.fromhex('002b'), 0x8201ff9c), bytes.fromhex('ad0007ff040300000001f9'))
class TestATCommands(unittest.TestCase):
def test_get_freqerr(self):
self.assertEqual(a6.parse_freq_err_resp("_OnCmd_GETFREQERR the compesation value[-860]"), -860)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jhart99/workflow-client-python",
"score": 2
} |
#### File: workflow-client-python/example/diamond.py
```python
from pprint import pprint
from kubernetes import client, config
from kubernetes_workflow.apis import WorkflowV1Api
from kubernetes_workflow.models import (
DagV1Workflow, DagV1WorkflowStep, DagV1WorkflowSpec)
def create_workflow_object():
"""
Creates the diamond DAG from the workflow examples
Looks something like this where we flow from left to right
one -> (two, three) -> four
"""
container = client.V1Container(
name="busybox",
image="gcr.io/google-containers/busybox",
command=["sh", "-c",
"echo Starting on: $(date); sleep 5; \
echo Goodbye cruel world at: $(date)"])
stepone = DagV1WorkflowStep(
name="stepone",
job_template=client.V1beta1JobTemplateSpec(
spec=client.V1JobSpec(
template=client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(
labels={"workflow": "stepone"}),
spec=client.V1PodSpec(
containers=[container],
restart_policy="Never")))))
steptwo = DagV1WorkflowStep(
name="steptwo",
dependencies=['stepone'],
job_template=client.V1beta1JobTemplateSpec(
spec=client.V1JobSpec(
template=client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(
labels={"workflow": "steptwo"}),
spec=client.V1PodSpec(
containers=[container],
restart_policy="Never")))))
stepthree = DagV1WorkflowStep(
name="stepthree",
dependencies=['stepone'],
job_template=client.V1beta1JobTemplateSpec(
spec=client.V1JobSpec(
template=client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(
labels={"workflow": "stepthree"}),
spec=client.V1PodSpec(
containers=[container],
restart_policy="Never")))))
stepfour = DagV1WorkflowStep(
name="stepfour",
dependencies=['steptwo', 'stepthree'],
job_template=client.V1beta1JobTemplateSpec(
spec=client.V1JobSpec(
template=client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(
labels={"workflow": "stepfour"}),
spec=client.V1PodSpec(
containers=[container],
restart_policy="Never")))))
workflow = DagV1Workflow(metadata=client.V1ObjectMeta(name="diamond"),
spec=DagV1WorkflowSpec(
selector=client.V1LabelSelector(
match_labels={"workflow": "diamond"}),
steps=[stepone, steptwo,
stepthree, stepfour]))
return workflow
def main():
"""
The flow of configuring the API interface, creating the workflow
and submitting the Workflow
"""
config.load_kube_config()
coa = WorkflowV1Api()
workflow = create_workflow_object()
api_response = coa.create_namespaced_workflow("default", workflow)
pprint(api_response)
if __name__ == '__main__':
main()
```
#### File: kubernetes_workflow/apis/workflow_v1_api.py
```python
from kubernetes import client
class WorkflowV1Api(client.CustomObjectsApi):
"""
Kubernetes workflow Api
"""
def create_namespaced_workflow(self, namespace, body, **kwargs):
"""
create a workflow
This method makes a workflow
:param str namespace: object name and auth scope, such as for teams
and projects (required)
:param body: required
"""
api_response = self.create_namespaced_custom_object(
"dag.example.com", "v1", namespace, "workflows", body, **kwargs)
return api_response
def delete_namespaced_workflow(self, namespace, name, body, **kwargs):
"""
delete a workflow
This method deletes a workflow
:param str namespace: object name and auth scope, such as for teams
and projects (required)
:param name: required
:param body: required
"""
api_response = self.delete_namespaced_custom_object(
"dag.example.com", "v1", namespace, "workflows",
name, body, **kwargs)
return api_response
def get_namespaced_workflow(self, namespace, name, **kwargs):
"""
get a workflow
This method gets a workflow
:param str namespace: object name and auth scope, such as for teams
and projects (required)
:param body: required
"""
api_response = self.create_namespaced_custom_object(
"dag.example.com", "v1", namespace, "workflows", name, **kwargs)
return api_response
def list_namespaced_workflow(self, namespace, **kwargs):
"""
list workflows
This method lists all workflows in a namespace
:param str namespace: object name and auth scope, such as for teams
and projects (required)
"""
api_response = self.list_namespaced_custom_object(
"dag.example.com", "v1", namespace, "workflows", **kwargs)
return api_response
def replace_namespaced_workflow(self, namespace, name, body, **kwargs):
"""
replace workflow
This method replaces a workflow in a namespace
:param str namespace: object name and auth scope, such as for teams
and projects (required)
"""
api_response = self.replace_namespaced_custom_object(
"dag.example.com", "v1", namespace, "workflows",
name, body, **kwargs)
return api_response
``` |
{
"source": "jhartford/RoBO",
"score": 3
} |
#### File: RoBO/examples/example_bohamiann.py
```python
import logging
import numpy as np
from hpolib.benchmarks.synthetic_functions import Branin
from robo.fmin import bohamiann
logging.basicConfig(level=logging.INFO)
def objective_function(x):
y = np.sin(3 * x[0]) * 4 * (x[0] - 1) * (x[0] + 2)
return y
# Defining the bounds and dimensions of the input space
lower = np.array([0])
upper = np.array([6])
# Start Bayesian optimization to optimize the objective function
results = bohamiann(objective_function, lower, upper, num_iterations=20)
print(results["x_opt"])
print(results["f_opt"])
```
#### File: RoBO/examples/example_dngo.py
```python
import lasagne
import numpy as np
import matplotlib.pyplot as plt
from robo.initial_design.init_random_uniform import init_random_uniform
from robo.models.dngo import DNGO
from robo.util.normalization import zero_mean_unit_var_normalization, zero_mean_unit_var_unnormalization
def f(x):
return np.sinc(x * 10 - 5).sum(axis=1)[:, None]
rng = np.random.RandomState(42)
X = init_random_uniform(np.zeros(1), np.ones(1), 20, rng)
y = f(X)[:, 0]
model = DNGO()
model.train(X, y)
predictions = lasagne.layers.get_output(model.network,
zero_mean_unit_var_normalization(X, model.X_mean, model.X_std)[0],
deterministic=True).eval()
predictions = zero_mean_unit_var_unnormalization(predictions, model.y_mean, model.y_std)
X_test = np.linspace(0, 1, 100)[:, None]
X_test_norm = zero_mean_unit_var_normalization(X_test, model.X_mean, model.X_std)[0]
# Get features from the net
layers = lasagne.layers.get_all_layers(model.network)
basis_funcs = lasagne.layers.get_output(layers[:-1], X_test_norm)[-1].eval()
fvals = f(X_test)[:, 0]
m, v = model.predict(X_test)
colormap = plt.cm.gist_ncar
plt.gca().set_color_cycle([colormap(i) for i in np.linspace(0, 0.9, min(50, model.n_units_3))])
for f in range(min(50, model.n_units_3)):
plt.plot(X_test[:, 0], basis_funcs[:, f])
plt.grid()
plt.xlabel(r"Input $x$")
plt.ylabel(r"Basisfunction $\theta(x)$")
plt.show()
plt.plot(X, y, "ro")
plt.plot(X, predictions, "g+")
plt.grid()
plt.plot(X_test[:, 0], fvals, "k--")
plt.plot(X_test[:, 0], m, "blue")
plt.fill_between(X_test[:, 0], m + np.sqrt(v), m - np.sqrt(v), color="orange", alpha=0.4)
plt.xlim(0, 1)
plt.show()
```
#### File: robo/acquisition_functions/integrated_acquisition.py
```python
import logging
import numpy as np
from copy import deepcopy
from robo.acquisition_functions.base_acquisition import BaseAcquisitionFunction
logger = logging.getLogger(__name__)
class IntegratedAcquisition(BaseAcquisitionFunction):
def __init__(self, model, acquisition_func,
X_lower, X_upper, cost_model=None, is_dngo=False):
'''
Meta acquisition_functions function that allows to marginalise the
acquisition_functions function over GP hyperparameter.
Parameters
----------
model: Model object
The model of the objective function, it has to be an instance of
GaussianProcessMCMC or GPyModelMCMC.
acquisition_func: BaseAcquisitionFunction object
The acquisition_functions function that will be integrated.
cost_model: Model object
If the acquisition_functions function also takes the cost into account, we
have to specify here the model for the cost function. cost_model
has to be an instance of GaussianProcessMCMC or GPyModelMCMC.
is_dngo: boolean
If you use DNGO as model set this flag to true in order to
apply first the basis functions to the input data point
'''
self.model = model
self.is_dngo = is_dngo
# Save also the cost model if the acquisition_functions function needs it
if cost_model is not None:
self.cost_model = cost_model
# Keep for each model an extra acquisition_functions function module
self.estimators = []
for _ in range(self.model.n_hypers):
# Copy the acquisition_functions function for this model
estimator = deepcopy(acquisition_func)
estimator.model = None
if cost_model is not None:
estimator.cost_model = None
self.estimators.append(estimator)
super(IntegratedAcquisition, self).__init__(model, X_lower, X_upper)
def update(self, model, cost_model=None, **kwargs):
"""
Updates each acquisition_functions function object if the models
have changed
Parameters
----------
model: Model object
The model of the objective function, it has to be an instance of
GaussianProcessMCMC or GPyModelMCMC.
cost_model: Model object
If the acquisition_functions function also takes the cost into account, we
have to specify here the model for the cost function. cost_model
has to be an instance of GaussianProcessMCMC or GPyModelMCMC.
"""
self.model = model
if cost_model is not None:
self.cost_model = cost_model
for i in range(self.model.n_hypers):
if cost_model is not None:
self.estimators[i].update(self.model.models[i],
self.cost_model.models[i],
**kwargs)
else:
self.estimators[i].update(self.model.models[i], **kwargs)
def compute(self, X, derivative=False):
"""
Integrates the acquisition_functions function over the GP's hyperparameters by
averaging the acquisition_functions value for X of each hyperparameter sample.
Parameters
----------
X: np.ndarray(1, D), The input point where the acquisition_functions function
should be evaluate. The dimensionality of X is (N, D), with N as
the number of points to evaluate at and D is the number of
dimensions of one X.
derivative: Boolean
If is set to true also the derivative of the acquisition_functions
function at X is returned
Returns
-------
np.ndarray(1,1)
Integrated acquisition_functions value of X
np.ndarray(1,D)
Derivative of the acquisition_functions value at X (only if derivative=True)
"""
acquisition_values = np.zeros([self.model.n_hypers])
if self.is_dngo:
import lasagne
X_ = (X - self.model.X_mean) / self.model.X_std
# Get features from the net
layers = lasagne.layers.get_all_layers(self.model.network)
theta = lasagne.layers.get_output(layers[:-1], X_)[-1].eval()
else:
theta = X
# Integrate over the acquisition_functions values
for i in range(self.model.n_hypers):
acquisition_values[i] = self.estimators[i](theta,
derivative=derivative)
return np.array([[acquisition_values.mean()]])
```
#### File: test/test_maximizer/test_maximizers_one_dim.py
```python
import unittest
import numpy as np
from robo.maximizers.direct import Direct
from robo.maximizers.grid_search import GridSearch
def objective_function(x):
y = (0.5 - x) ** 2
return y
class TestMaximizers1D(unittest.TestCase):
def setUp(self):
self.lower = np.array([0])
self.upper = np.array([1])
def test_direct(self):
maximizer = Direct(objective_function, self.lower, self.upper)
x = maximizer.maximize()
assert x.shape[0] == 1
assert len(x.shape) == 1
assert np.all(x >= self.lower)
assert np.all(x <= self.upper)
def test_grid_search(self):
maximizer = GridSearch(objective_function, self.lower, self.upper)
x = maximizer.maximize()
assert x.shape[0] == 1
assert len(x.shape) == 1
assert np.all(x >= self.lower)
assert np.all(x <= self.upper)
if __name__ == "__main__":
unittest.main()
```
#### File: test/test_maximizer/test_maximizers_two_dim.py
```python
import unittest
import numpy as np
from robo.maximizers.cmaes import CMAES
from robo.maximizers.direct import Direct
def objective_function(x):
y = np.sum((0.5 - x) ** 2, axis=1)
return y
class TestMaximizers2D(unittest.TestCase):
def setUp(self):
self.lower = np.array([0, 0])
self.upper = np.array([1, 1])
def test_direct(self):
maximizer = Direct(objective_function, self.lower, self.upper)
x = maximizer.maximize()
assert x.shape[0] == 2
assert len(x.shape) == 1
assert np.all(x >= self.lower)
assert np.all(x <= self.upper)
def test_cmaes(self):
maximizer = CMAES(objective_function, self.lower, self.upper)
x = maximizer.maximize()
assert x.shape[0] == 2
assert len(x.shape) == 1
assert np.all(x >= self.lower)
assert np.all(x <= self.upper)
if __name__ == "__main__":
unittest.main()
```
#### File: test/test_util/test_posterior_optimization.py
```python
import unittest
import numpy as np
from robo.util.posterior_optimization import posterior_mean_optimization, posterior_mean_plus_std_optimization
from test.dummy_model import DemoQuadraticModel
class TestPosteriorOptimization(unittest.TestCase):
def setUp(self):
X = np.random.randn(5, 2)
y = np.sum((0.5 - X) ** 2, axis=1)
self.model = DemoQuadraticModel()
self.model.train(X, y)
self.lower = np.array([0, 0])
self.upper = np.array([1, 1])
self.opt = np.array([0.5, 0.5])
def test_posterior_mean_optimization(self):
x = posterior_mean_optimization(self.model, self.lower, self.upper, method="cma", n_restarts=1)
np.testing.assert_almost_equal(x, self.opt, decimal=5)
x = posterior_mean_optimization(self.model, self.lower, self.upper, method="scipy", with_gradients=False)
np.testing.assert_almost_equal(x, self.opt, decimal=5)
def test_posterior_mean_plus_std_optimization(self):
x = posterior_mean_plus_std_optimization(self.model, self.lower, self.upper, method="cma", n_restarts=1)
np.testing.assert_almost_equal(x, self.opt, decimal=5)
x = posterior_mean_optimization(self.model, self.lower, self.upper, method="scipy", with_gradients=False)
np.testing.assert_almost_equal(x, self.opt, decimal=5)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jhartika/audioapinat",
"score": 3
} |
#### File: jhartika/audioapinat/gmmwithdeltas2.py
```python
import numpy as np
import os
import json
from sklearn.mixture import GaussianMixture
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
def main():
target_dir = "gmm"
metadata_filepath = "data/ext/metadata.json"
train_file_dir = "data/train_files"
train_file_names = os.listdir(train_file_dir)
#load metadata json
with open(metadata_filepath, 'r') as f:
metadata = json.load(f)
n_classes = len(metadata['LABELS'])
train_data = [[] for i in range(n_classes)]
delta_train_files = []
train_files = []
for file_name in train_file_names :
parts = file_name.split('_')
if(len(parts) == 3) :
delta_train_files.append(file_name)
if(len(parts) == 2) :
train_files.append(file_name)
for (file_name, delta_file_name) in zip(train_files, delta_train_files) :
parts = file_name.split('_')#Get speaker from filename
# if(len(parts) != 3) : #data without deltas has 2 parts
# continue
speaker = parts[0]
data = np.load(f'{train_file_dir}/{file_name}')
delta_data = np.load(f'{train_file_dir}/{delta_file_name}')
#print(np.shape(data))
length = min(len(data), len(delta_data))
print(np.shape(data[:len(data)-1]))
print(np.shape(delta_data))
data=np.concatenate((data[:length], delta_data[:length]), axis=1)
train_data[metadata['LABELS'][speaker]].append(data)
gmm = GaussianMixture(n_components=2, tol=1e-5, max_iter=200, n_init=1, verbose=1)
delta_gmms = []
for i in range(n_classes) :
train_data[i]=np.concatenate(train_data[i][:])
print(np.shape(train_data[i]))
gmm.fit(train_data[i])
joblib.dump(gmm, f'{target_dir}/delta_gmm_{i}.pkl')
for i in range(0, n_classes) :
delta_gmm = joblib.load(f'{target_dir}/delta_gmm_{i}.pkl')
delta_gmms.append(delta_gmm)
# for i in range(0, n_classes) :
# for j in range(0, len(train_data[i])) :
# traain_data[i].append(train_data[i][j])
# #old
train_data_file = "data/ext/train_data.npy"
train_labels_file = "data/ext/train_labels.npy"
# test_data_file = "data/ext/test_data.npy"
# test_labels_file = "data/ext/test_labels.npy"
train_data = np.load(train_data_file)
train_labels = np.load(train_labels_file)
# test_data = np.load(test_data_file)
# test_labels = np.load(test_labels_file)
# """ Fitting of the GMMs """
# #number of different speakers
# gmm = GaussianMixture(n_components=1, tol=1e-3, max_iter=200, n_init=1, verbose=1)
gmms = []
for i in range(0, n_classes) :
speaker_train_data = train_data[train_labels==i]
print(np.shape(speaker_train_data))
gmm.fit(speaker_train_data)
joblib.dump(gmm, f'{target_dir}/gmm_{i}.pkl')
for i in range(0, n_classes) :
gmm = joblib.load(f'{target_dir}/gmm_{i}.pkl')
gmms.append(gmm)
""" Predict using the GMMs """
metadata_filepath = "data/ext/metadata.json"
test_file_dir = "data/test"
test_file_names = os.listdir(test_file_dir)
#load metadata json
with open(metadata_filepath, 'r') as f:
metadata = json.load(f)
labels = []
preds = []
file_names = []
delta_file_names = []
for file_name in test_file_names :
parts = file_name.split('_')
if(len(parts) == 3) :
delta_file_names.append(file_name)
if(len(parts) == 2) :
file_names.append(file_name)
#Make prediction per file in test_file_dir
#for file_name in test_file_names :
for (file_name, delta_file_name) in zip(file_names, delta_file_names) :
parts = file_name.split('_')#Get speaker from filename
# if(len(parts) != 3) : #data without deltas has 2 parts
# continue
data = np.load(f'{test_file_dir}/{file_name}')
delta_data = np.load(f'{test_file_dir}/{delta_file_name}')
length = min(len(data), len(delta_data))
print(np.shape(data[:len(data)-1]))
print(np.shape(delta_data))
data=np.concatenate((data[:length], delta_data[:length]), axis=1)
testscores = np.zeros((len(data), n_classes))
#Score each sample in a file with all GMMs
for i in range(0, n_classes) :
testscores[:, i] = delta_gmms[i].score_samples(data)
# testscores = np.zeros((len(data)+len(delta_data), n_classes))
# #Score each sample in a file with all GMMs
# for sample in data :
# for i in range(0, n_classes) :
# testscores[0:len(data), i] = gmms[i].score_samples(data)
# testscores[len(data):, i] = delta_gmms[i].score_samples(delta_data)
#Predict label(highest scoring GMM index) for each sample
predictions = np.argmax(testscores, axis=1)
#Majority vote between predictions for the file
prediction = np.bincount(predictions).argmax()
#Gather predictions and correct labels for accuracy score
preds.append(prediction)
label = metadata['LABELS'][parts[0]]#Get label matching speaker
labels.append(label)
print(f'pred:{prediction}, label:{label}')
#Print accuracy score
print(accuracy_score(labels, preds))
if __name__ == '__main__':
main()
``` |
{
"source": "jhart-r7/b2-sdk-python",
"score": 2
} |
#### File: b2-sdk-python/b2sdk/session.py
```python
import functools
from b2sdk.exception import (InvalidAuthToken, Unauthorized)
from b2sdk.raw_api import ALL_CAPABILITIES
class B2Session(object):
"""
A *magic* facade that supplies the correct api_url and account_auth_token
to methods of underlying raw_api and reauthorizes if necessary
"""
def __init__(self, api, raw_api):
self._api = api # for reauthorization
self.raw_api = raw_api
def __getattr__(self, name):
f = getattr(self.raw_api, name)
@functools.wraps(f)
def wrapper(*args, **kwargs):
auth_failure_encountered = False
# download_by_name uses different URLs
url_factory = kwargs.pop('url_factory', self._api.account_info.get_api_url)
while 1:
api_url = url_factory()
account_auth_token = self._api.account_info.get_account_auth_token()
try:
return f(api_url, account_auth_token, *args, **kwargs)
except InvalidAuthToken:
if not auth_failure_encountered:
auth_failure_encountered = True
reauthorization_success = self._api.authorize_automatically()
if reauthorization_success:
continue
# TODO: exception chaining could be added here
# to help debug reauthorization failures
raise
except Unauthorized as e:
raise self._add_app_key_info_to_unauthorized(e)
return wrapper
def _add_app_key_info_to_unauthorized(self, unauthorized):
"""
Takes an Unauthorized error and adds information from the application key
about why it might have failed.
"""
# What's allowed?
allowed = self._api.account_info.get_allowed()
capabilities = allowed['capabilities']
bucket_name = allowed['bucketName']
name_prefix = allowed['namePrefix']
# Make a list of messages about the application key restrictions
key_messages = []
if set(capabilities) != set(ALL_CAPABILITIES):
key_messages.append("with capabilities '" + ','.join(capabilities) + "'")
if bucket_name is not None:
key_messages.append("restricted to bucket '" + bucket_name + "'")
if name_prefix is not None:
key_messages.append("restricted to files that start with '" + name_prefix + "'")
if not key_messages:
key_messages.append('with no restrictions')
# Make a new message
new_message = unauthorized.message
if new_message == '':
new_message = 'unauthorized'
new_message += ' for application key ' + ', '.join(key_messages)
return Unauthorized(new_message, unauthorized.code)
```
#### File: b2sdk/sync/exception.py
```python
from ..exception import B2Error
class EnvironmentEncodingError(B2Error):
"""
Raised when a file name can not be decoded with system encoding
"""
def __init__(self, filename, encoding):
"""
:param filename: an encoded file name
:type filename: str, bytes
:param encoding: file name encoding
:type encoding: str
"""
super(EnvironmentEncodingError, self).__init__()
self.filename = filename
self.encoding = encoding
def __str__(self):
return """file name %s cannot be decoded with system encoding (%s).
We think this is an environment error which you should workaround by
setting your system encoding properly, for example like this:
export LANG=en_US.UTF-8""" % (
self.filename,
self.encoding,
)
```
#### File: b2sdk/sync/policy_manager.py
```python
from .policy import DownAndDeletePolicy, DownAndKeepDaysPolicy, DownPolicy
from .policy import UpAndDeletePolicy, UpAndKeepDaysPolicy, UpPolicy
class SyncPolicyManager(object):
"""
Policy manager, implements a logic to get a correct policy class
and create a policy object based on various parameters
"""
def __init__(self):
self.policies = {} # dict<,>
def get_policy(
self, sync_type, source_file, source_folder, dest_file, dest_folder, now_millis, args
):
"""
Return policy object
:param sync_type: synchronization type
:type sync_type: str
:param source_file: source file name
:type source_file: str
:param source_folder: a source folder path
:type source_folder: str
:param dest_file: destination file name
:type dest_file: str
:param dest_folder: a destination folder path
:type dest_folder: str
:param now_millis: current time in milliseconds
:type now_millis: int
:param args: an object which holds command line arguments
:return: a policy object
"""
policy_class = self.get_policy_class(sync_type, args)
return policy_class(source_file, source_folder, dest_file, dest_folder, now_millis, args)
def get_policy_class(self, sync_type, args):
"""
Get policy class by a given sync type
:param sync_type: synchronization type
:type sync_type: str
:param args: an object which holds command line arguments
:return: a policy class
"""
if sync_type == 'local-to-b2':
if args.delete:
return UpAndDeletePolicy
elif args.keepDays:
return UpAndKeepDaysPolicy
else:
return UpPolicy
elif sync_type == 'b2-to-local':
if args.delete:
return DownAndDeletePolicy
elif args.keepDays:
return DownAndKeepDaysPolicy
else:
return DownPolicy
assert False, 'invalid sync type: %s, args: %s' % (sync_type, str(args))
POLICY_MANAGER = SyncPolicyManager()
```
#### File: b2-sdk-python/b2sdk/utils.py
```python
from __future__ import division, print_function
import hashlib
import os
import platform
import re
import shutil
import tempfile
from logfury.v0_1 import DefaultTraceAbstractMeta, DefaultTraceMeta, limit_trace_arguments, disable_trace, trace_call
import six
try:
import concurrent.futures as futures
except ImportError:
import futures
def interruptible_get_result(future):
"""
Waits for the result of a future in a way that can be interrupted
by a KeyboardInterrupt.
This is not necessary in Python 3, but is needed for Python 2.
:param future: a future to get result of
:type future: Future
"""
while True:
try:
return future.result(timeout=1.0)
except futures.TimeoutError:
pass
def b2_url_encode(s):
"""
URL-encodes a unicode string to be sent to B2 in an HTTP header.
:param s: a unicode string to encode
:type s: str
:return: URL-encoded string
:rtype: str
"""
return six.moves.urllib.parse.quote(s.encode('utf-8'))
def b2_url_decode(s):
"""
Decodes a Unicode string returned from B2 in an HTTP header.
:param s: a unicode string to decode
:type s: str
:return: a Python unicode string.
:rtype: str
"""
result = six.moves.urllib.parse.unquote_plus(s)
if six.PY2:
# The behavior of unquote_plus is different in python 2 and 3.
# In Python 3, it decodes the UTF-8, while in Python 2 it does not.
result = result.decode('utf-8')
return result
def choose_part_ranges(content_length, minimum_part_size):
"""
Returns a list of (offset, length) for the parts of a large file.
:param content_length: content length value
:type content_length: int
:param minimum_part_size: a minimum file part size
:type minimum_part_size: int
:rtype: list
"""
# If the file is at least twice the minimum part size, we are guaranteed
# to be able to break it into multiple parts that are all at least
# the minimum part size.
assert minimum_part_size * 2 <= content_length
# How many parts can we make?
part_count = min(content_length // minimum_part_size, 10000)
assert 2 <= part_count
# All of the parts, except the last, are the same size. The
# last one may be bigger.
part_size = content_length // part_count
last_part_size = content_length - (part_size * (part_count - 1))
assert minimum_part_size <= last_part_size
# Make all of the parts except the last
parts = [(i * part_size, part_size) for i in six.moves.range(part_count - 1)]
# Add the last part
start_of_last = (part_count - 1) * part_size
last_part = (start_of_last, content_length - start_of_last)
parts.append(last_part)
return parts
def hex_sha1_of_stream(input_stream, content_length):
"""
Returns the 40-character hex SHA1 checksum of the first content_length
bytes in the input stream.
:param input_stream: stream object, which exposes read() method
:param content_length: expected length of the stream
:type content_length: int
:rtype: str
"""
remaining = content_length
block_size = 1024 * 1024
digest = hashlib.sha1()
while remaining != 0:
to_read = min(remaining, block_size)
data = input_stream.read(to_read)
if len(data) != to_read:
raise ValueError(
'content_length(%s) is more than the size of the file' % content_length
)
digest.update(data)
remaining -= to_read
return digest.hexdigest()
def hex_sha1_of_bytes(data):
"""
Returns the 40-character hex SHA1 checksum of the data
:param data: an array of bytes
:type data: bytes
:rtype: str
"""
return hashlib.sha1(data).hexdigest()
def validate_b2_file_name(name):
"""
Raises a ValueError if the name is not a valid B2 file name.
:param name: a string to check
:type name: str
"""
if not isinstance(name, six.string_types):
raise ValueError('file name must be a string, not bytes')
name_utf8 = name.encode('utf-8')
if len(name_utf8) < 1:
raise ValueError('file name too short (0 utf-8 bytes)')
if 1000 < len(name_utf8):
raise ValueError('file name too long (more than 1000 utf-8 bytes)')
if name[0] == '/':
raise ValueError("file names must not start with '/'")
if name[-1] == '/':
raise ValueError("file names must not end with '/'")
if '\\' in name:
raise ValueError("file names must not contain '\\'")
if '//' in name:
raise ValueError("file names must not contain '//'")
if chr(127) in name:
raise ValueError("file names must not contain DEL")
if any(250 < len(segment) for segment in name_utf8.split(six.b('/'))):
raise ValueError("file names segments (between '/') can be at most 250 utf-8 bytes")
def is_file_readable(local_path, reporter=None):
"""
Check if the local file has read permissions
:param local_path: a file path
:type local_path: str
:param reporter: reporter object to put errors on
:rtype: bool
"""
if not os.path.exists(local_path):
if reporter is not None:
reporter.local_access_error(local_path)
return False
elif not os.access(local_path, os.R_OK):
if reporter is not None:
reporter.local_permission_error(local_path)
return False
return True
def fix_windows_path_limit(path):
"""
Prefix paths when running on Windows to overcome 260 character path length limit
See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
:param path: a path to prefix
:type path: str
:return: a prefixed path
:rtype: str
"""
if platform.system() == 'Windows':
if path.startswith('\\\\'):
# UNC network path
return '\\\\?\\UNC\\' + path[2:]
elif os.path.isabs(path):
# local absolute path
return '\\\\?\\' + path
else:
# relative path, don't alter
return path
else:
return path
class BytesIoContextManager(object):
"""
A simple wrapper for a BytesIO that makes it look like
a file-like object that can be a context manager.
"""
def __init__(self, byte_data):
"""
:param bytes_data: a byte stream
"""
self.byte_data = byte_data
def __enter__(self):
return six.BytesIO(self.byte_data)
def __exit__(self, type_, value, traceback):
return None # don't hide exception
class TempDir(object):
"""
Context manager that creates and destroys a temporary directory.
"""
def __enter__(self):
"""
Returns the unicode path to the temp dir.
"""
self.dirpath = six.u(tempfile.mkdtemp())
return self.dirpath
def __exit__(self, exc_type, exc_val, exc_tb):
shutil.rmtree(self.dirpath)
return None # do not hide exception
def _pick_scale_and_suffix(x):
# suffixes for different scales
suffixes = ' kMGTP'
# We want to use the biggest suffix that makes sense.
ref_digits = str(int(x))
index = (len(ref_digits) - 1) // 3
suffix = suffixes[index]
if suffix == ' ':
suffix = ''
scale = 1000**index
return (scale, suffix)
def format_and_scale_number(x, unit):
"""
Picks a good scale for representing a number and formats it.
:param x: a number
:type x: int
:param unit: an arbitrary unit name
:type unit: str
:return: scaled and formatted number
:rtype: str
"""
# simple case for small numbers
if x < 1000:
return '%d %s' % (x, unit)
# pick a scale
(scale, suffix) = _pick_scale_and_suffix(x)
# decide how many digits after the decimal to display
scaled = x / scale
if scaled < 10.0:
fmt = '%1.2f %s%s'
elif scaled < 100.0:
fmt = '%1.1f %s%s'
else:
fmt = '%1.0f %s%s'
# format it
return fmt % (scaled, suffix, unit)
def format_and_scale_fraction(numerator, denominator, unit):
"""
Picks a good scale for representing a fraction, and formats it.
:param numerator: a numerator of a fraction
:type numerator: int
:param denominator: a denominator of a fraction
:type denominator: int
:param unit: an arbitrary unit name
:type unit: str
:return: scaled and formatted fraction
:rtype: str
"""
# simple case for small numbers
if denominator < 1000:
return '%d / %d %s' % (numerator, denominator, unit)
# pick a scale
(scale, suffix) = _pick_scale_and_suffix(denominator)
# decide how many digits after the decimal to display
scaled_denominator = denominator / scale
if scaled_denominator < 10.0:
fmt = '%1.2f / %1.2f %s%s'
elif scaled_denominator < 100.0:
fmt = '%1.1f / %1.1f %s%s'
else:
fmt = '%1.0f / %1.0f %s%s'
# format it
scaled_numerator = numerator / scale
return fmt % (scaled_numerator, scaled_denominator, suffix, unit)
_CAMELCASE_TO_UNDERSCORE_RE = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')
def camelcase_to_underscore(input_):
"""
Convert camel cased string to string with underscores
:param input_: an input string
:type input_: str
:return: string with underscores
:rtype: str
"""
return _CAMELCASE_TO_UNDERSCORE_RE.sub(r'_\1', input_).lower()
class B2TraceMeta(DefaultTraceMeta):
"""
Traces all public method calls, except for ones with names that begin with `get_`
"""
pass
class B2TraceMetaAbstract(DefaultTraceAbstractMeta):
"""
Default class for tracers, to be set as
a metaclass for abstract base classes
"""
pass
assert disable_trace
assert limit_trace_arguments
assert trace_call
``` |
{
"source": "jhart-r7/propsd-client-python",
"score": 2
} |
#### File: propsd-client-python/propsd/__init__.py
```python
import requests
import json
import logging
import objectpath
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.jobstores.memory import MemoryJobStore
log = logging.getLogger(__name__)
class Client(object):
"""The propsd client
Keyword Args:
propsd_server (str): The server hostname/ip address (default localhost)
propsd_port (int): The server port (default 9100)
"""
def __init__(self, propsd_server='localhost', propsd_port=9100):
self.propsd_server = propsd_server
self.propsd_port = propsd_port
self.__update_callbacks = []
self.__update_properties_previous = {}
self.__update_scheduler = BackgroundScheduler({
'apscheduler.jobstores.default': {
'class': 'apscheduler.jobstores.memory:MemoryJobStore',
},
'apscheduler.executors.default': {
'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
'max_workers': '1'
},
'apscheduler.job_defaults.coalesce': 'true',
'apscheduler.job_defaults.max_instances': '1',
'apscheduler.timezone': 'UTC',
})
self.__update_job = self.__update_scheduler.add_job(
self.__update_properties,
'interval',
seconds=1,
id='update-check-job')
self.__update_scheduler.start()
def get(self, key):
"""Gets a specific property
Args:
key (str): The key to retrieve
Returns:
str: The property value, or None.
"""
try:
response = requests.get("http://%s:%d/v1/conqueso/api/roles/default/properties/%s" % (self.propsd_server, self.propsd_port, key))
return response.text
except:
log.warn("Could not retrieve property value")
def properties(self):
"""Gets all propsd properties
Returns:
dict: The complete propsd property set
"""
try:
response = requests.get("http://%s:%d/v1/properties" % (self.propsd_server, self.propsd_port))
return json.loads(response.text)
except:
log.warn("Could not retrieve property value")
def status(self):
"""Gets the status of the propsd service
Returns:
dict: A dictionary containing the status parameters.
"""
response = requests.get("http://%s:%d/v1/status" % (self.propsd_server, self.propsd_port))
return json.loads(response.text)
def health(self):
"""Gets the health of the propsd service
Returns:
dict: A dictionary containing the health parameters.
"""
response = requests.get("http://%s:%d/v1/health" % (self.propsd_server, self.propsd_port))
return json.loads(response.text)
def subscribe(self, search, callback):
"""Subscribe to document changes
Args:
search (str): The objectpatch search string
callback (object): The function to call
"""
self.__update_callbacks.append({'search': search, 'callback': callback})
def shutdown(self):
"""Shuts down the propsd client
"""
self.__update_scheduler.shutdown()
def __update_properties(self):
properties = self.properties()
for item in self.__update_callbacks:
search = item['search']
thistree = objectpath.Tree(properties)
thisresult = thistree.execute(search)
thattree = objectpath.Tree(self.__update_properties_previous)
thatresult = thattree.execute(search)
if thisresult != thatresult:
item['callback'](search, properties, thisresult)
self.__update_properties_previous = properties
``` |
{
"source": "jhartz/iochannels",
"score": 3
} |
#### File: iochannels/support/readline_support.py
```python
import atexit
from typing import Callable, List, Optional, Sequence, cast
class InputCompleter:
"""
Class used to handle autocomplete on an input via the readline module.
Inspired by rlcompleter from the python standard library.
This class can be set to have either a list of autocomplete options, or only a single
autocomplete option.
There's a difference between using "set_option" with a list with only one option, and using
"set_single_option". If the user hasn't entered anything yet, and hits TAB:
- If a list was set using "set_options", then no match is returned.
- If a single option was set using "set_single_option", then that option is returned.
Note that having a single option be returned when the user hasn't entered any text can cause
odd issues if the user enters some text, then a space, then hits TAB.
"""
def __init__(self) -> None:
self.options = None # type: Optional[List[str]]
self.matches = None # type: Optional[List[str]]
self.single_option = None # type: Optional[str]
def set_options(self, options: Optional[Sequence[str]]) -> None:
self.options = options
self.matches = None
self.single_option = None
def set_single_option(self, option: str) -> None:
self.options = None
self.matches = None
self.single_option = option
def __call__(self, text: str, state: int) -> Optional[str]:
"""
Get the next possible completion for "text".
:param text: The text that the user has entered so far.
:param state: The index of the item in the results list.
:return: The item matched by text and state, or None.
"""
if self.options is not None:
return self._get_option(text, state)
elif self.single_option is not None:
return self._get_single_option(text, state)
else:
# readline not currently turned on; maybe the user actually wants a tab character
if state == 0:
_readline.insert_text("\t")
_readline.redisplay()
return ""
def _get_option(self, text: str, state: int) -> Optional[str]:
if not text.strip():
return None
if state == 0 or self.matches is None:
self.matches = [s for s in self.options if s and s.startswith(text)]
try:
return self.matches[state]
except IndexError:
return None
def _get_single_option(self, text: str, state: int) -> Optional[str]:
if state == 0 and self.single_option.startswith(text):
return self.single_option
return None
try:
import readline as _readline
global_readline_completer = InputCompleter() # type: Optional[InputCompleter]
_readline.set_completer(cast(Callable[[str, int], str], global_readline_completer))
_readline.parse_and_bind("tab: complete")
atexit.register(lambda: _readline.set_completer(None))
except ImportError:
_readline = None
global_readline_completer = None
``` |
{
"source": "j-hase/reda",
"score": 2
} |
#### File: reda/importers/eit_fzj.py
```python
import functools
import os
import numpy as np
import pandas as pd
import scipy.io as sio
import reda.importers.eit_version_2010 as eit_version_2010
import reda.importers.eit_version_2013 as eit_version_2013
import reda.importers.eit_version_2017 as eit_version_2017
import reda.importers.eit_version_2018a as eit_version_2018a
import reda.importers.eit_version_20200609 as eit_version_20200609
from reda.importers.utils.decorators import enable_result_transforms
from reda.configs.configManager import ConfigManager
# data file formats differ slightly between versions. Version numbers do not
# follow a consistent naming scheme. Therefore we introduce this dict to map
# the version numbers found in the .mat files to the reda python modules.
mat_version_importers = {
# this is the file version used for the 160 channel multiplexer system
'FZJ-EZ-2018A': eit_version_2018a,
'FZJ-EZ-2017': eit_version_2017,
'FZJ-EZ-09.11.2010': eit_version_2010,
'FZJ-EZ-14.02.2013': eit_version_2013,
'EZ-2020-06-09': eit_version_20200609,
}
def _get_file_version(filename):
"""High level import function that tries to determine the specific version
of the data format used.
Parameters
----------
filename: string
File path to a .mat matlab filename, as produced by the various
versions of the emmt_pp.exe postprocessing program.
Returns
-------
version: string
a sanitized version of the file format version
"""
mat = sio.loadmat(filename, squeeze_me=True)
version = mat['MP']['Version'].item()
del(mat)
return version
def MD_ConfigsPermutate(df_md):
"""Given a MD DataFrame, return a Nx4 array which permutes the current
injection dipoles.
"""
g_current_injections = df_md.groupby(['a', 'b'])
ab = np.array(list(g_current_injections.groups.keys()))
config_mgr = ConfigManager(nr_of_electrodes=ab.max())
config_mgr.gen_configs_permutate(ab, silent=True)
return config_mgr.configs
def get_mnu0_data(filename, configs, return_3p=False, **kwargs):
"""Import data post-processed as 3P data (NMU0), i.e., measured towards
common ground.
Parameters
----------
filename : string (usually: eit_data_mnu0.mat)
filename of matlab file
configs : Nx4 numpy.ndarray|filename|function
4P measurements configurations (ABMN) to generate out of the data. If
this parameter is a callable, then call it with the MD DataFrame as its
sole parameter and expect a Nx4 numpy.ndarray as return value
return_3p : bool, optional
also return 3P data
Keyword Arguments
-----------------
multiplexer_group : int|None, optional
For the multiplexer system (version 2018a) the multiplexer group MUST
be specified to import data. This is a number between 1 and 4.
Returns
-------
data_emd_4p : pandas.DataFrame
The generated 4P data
data_md_raw : pandas.DataFrame|None
MD data (sometimes this data is not imported, then we return None here)
data_emd_3p : pandas.DataFrame
The imported 3P data (only if return_3p==True)
"""
if not os.path.isfile(filename):
raise IOError('Data file not found! {}'.format(filename))
version = _get_file_version(filename)
importer = mat_version_importers.get(version, None)
if importer is not None:
mat = sio.loadmat(filename, squeeze_me=True)
data_md_raw = importer._extract_md(mat, **kwargs)
data_emd_3p = importer._extract_emd(mat, **kwargs)
# check configs
if callable(configs):
configs_abmn = configs(data_md_raw)
else:
configs_abmn = configs
if data_emd_3p is not None:
data_emd_4p = compute_quadrupoles(
data_emd_3p, configs_abmn, data_md_raw)
else:
data_emd_4p = None
else:
raise Exception(
'The file version "{}" is not supported yet.'.format(
version)
)
if return_3p:
return data_emd_4p, data_md_raw, data_emd_3p
else:
return data_emd_4p, data_md_raw
def get_md_data(filename, **kwargs):
"""Import data and return the MD (i.e., injection) data
Parameters
----------
filename : string (usually: eit_data_mnu0.mat)
filename of matlab file
Keyword Arguments
-----------------
multiplexer_group : int|None, optional
For the multiplexer system (version 2018a) the multiplexer group MUST
be specified to import data. This is a number between 1 and 4.
Returns
-------
data_md_raw : pandas.DataFrame|None
MD data (sometimes this data is not imported, then we return None here)
"""
if not os.path.isfile(filename):
raise IOError('Data file not found! {}'.format(filename))
version = _get_file_version(filename)
importer = mat_version_importers.get(version, None)
if importer is not None:
mat = sio.loadmat(filename, squeeze_me=True)
data_md_raw = importer._extract_md(mat, **kwargs)
return data_md_raw
else:
raise Exception('emmt_pp version not found: {}'.format(version))
def get_adc_data(filename, **kwargs):
"""Import data and return the adc-related data from the MD (i.e.,
injection) structure
Parameters
----------
filename : string (usually: eit_data_mnu0.mat)
filename of matlab file
Keyword Arguments
-----------------
multiplexer_group : int|None, optional
For the multiplexer system (version 2018a) the multiplexer group MUST
be specified to import data. This is a number between 1 and 4.
Returns
-------
data_adc_raw : pandas.DataFrame|None
adc-MD data (sometimes this data is not imported, then we return None
here)
"""
if not os.path.isfile(filename):
raise IOError('Data file not found! {}'.format(filename))
version = _get_file_version(filename)
importer = mat_version_importers.get(version, None)
if importer is not None:
mat = sio.loadmat(filename, squeeze_me=True)
data_md_raw = importer._extract_adc_data(mat, **kwargs)
return data_md_raw
else:
raise Exception('emmt_pp version not found: {}'.format(version))
@enable_result_transforms
@functools.wraps(get_mnu0_data)
def read_3p_data(*args, **kwargs):
# this is a wrapper that conforms to the importer standards
results = get_mnu0_data(*args, **kwargs)
df_emd = results[0]
return df_emd, None, None
def compute_quadrupoles(df_emd, config_file, df_md=None):
"""
Parameters
----------
df_emd : pandas.DataFrame
The EMD data, as imported from the .mat file (3P-data)
config_file : string
filename for configuration file. The configuration file contains N rows
with 4 columns each (A, B, M, N)
df_md : pandas.DataFrame (optional)
The MD data
Returns
-------
"""
# 'configs' can be a numpy array or a filename
if not isinstance(config_file, np.ndarray):
configs = np.loadtxt(config_file).astype(int)
else:
configs = config_file
configs = np.atleast_2d(configs)
# construct four-point measurements via superposition
print('Constructing four-point measurements')
quadpole_list = []
index = 0
for Ar, Br, M, N in configs:
# print('constructing', Ar, Br, M, N)
# the order of A and B doesn't concern us
A = np.min((Ar, Br))
B = np.max((Ar, Br))
# first choice: correct ordering
query_M = df_emd.query('a=={0} and b=={1} and p=={2}'.format(
A, B, M
))
query_N = df_emd.query('a=={0} and b=={1} and p=={2}'.format(
A, B, N
))
if query_M.size == 0 or query_N.size == 0:
continue
index += 1
# keep these columns as they are (no subtracting)
keep_cols_all = [
'datetime',
'frequency',
'a', 'b',
'Zg1', 'Zg2', 'Zg3',
'Zg',
'Is',
'Il',
'Iab',
'Ileakage',
]
# only keep those are actually there
keep_cols = [x for x in keep_cols_all if x in query_M.columns]
df4 = pd.DataFrame()
diff_cols = ['Zt', ]
df4[keep_cols] = query_M[keep_cols]
for col in diff_cols:
df4[col] = query_M[col].values - query_N[col].values
df4['m'] = query_M['p'].values
df4['n'] = query_N['p'].values
quadpole_list.append(df4)
if quadpole_list:
dfn = pd.concat(quadpole_list)
Rsign = np.sign(np.real(dfn['Zt']))
dfn['r'] = Rsign * np.abs(dfn['Zt'])
if 'Iab' in dfn.columns:
dfn['Vmn'] = dfn['r'] * dfn['Iab']
dfn['rpha'] = np.arctan2(
np.imag(dfn['Zt'].values),
np.real(dfn['Zt'].values)
) * 1e3
# Depending on the specific analysis software ware, some columns are
# located in the md struct and need to be merged to the dfn
check_md_columns = [
'Zg',
'Iab',
'Ileakage',
]
for column in check_md_columns:
if(column not in dfn.columns and df_md is not None and
column in df_md.columns):
print('Adding column {} from MD'.format(column))
# import IPython
# IPython.embed()
dfn = pd.merge(
dfn,
df_md[['a', 'b', 'frequency', column]],
on=['a', 'b', 'frequency']
)
else:
dfn = pd.DataFrame()
return dfn.sort_values(['frequency', 'a', 'b', 'm', 'n'])
def apply_correction_factors(df, correction_data):
"""Apply correction factors for a pseudo-2D measurement setup. See Weigand
and Kemna, 2017, Biogeosciences, for detailed information.
Parameters
----------
df : :py:class:`pandas.DataFrame`
DataFrame containing the data
correction_data : string|iterable of strings|:py:class:`numpy.ndarray`
Correction data, either as a filename, a list of filenames to be
merged, or directly as a numpy array
Returns
-------
df : :py:class:`pandas.DataFrame`
Corrected data
corr_data : :py:class:`numpy.ndarray`
Correction factors used
"""
if isinstance(correction_data, (list, tuple)):
corr_data_raw = np.vstack(
[np.loadtxt(x) for x in correction_data]
)
elif isinstance(correction_data, np.ndarray):
corr_data_raw = correction_data
else:
# assume only one data file
corr_data_raw = np.loadtxt(correction_data)
assert corr_data_raw.shape[1] in (3, 5)
# if required, convert from CRTomo electrode denotations in (a,b,m,n) style
if corr_data_raw.shape[1] == 3:
A = (corr_data_raw[:, 0] / 1e4).astype(int)
B = (corr_data_raw[:, 0] % 1e4).astype(int)
M = (corr_data_raw[:, 1] / 1e4).astype(int)
N = (corr_data_raw[:, 1] % 1e4).astype(int)
corr_data = np.vstack((A, B, M, N, corr_data_raw[:, 2])).T
else:
corr_data = corr_data_raw
corr_data[:, 0:2] = np.sort(corr_data[:, 0:2], axis=1)
corr_data[:, 2:4] = np.sort(corr_data[:, 2:4], axis=1)
# if 'frequency' not in df.columns:
# raise Exception(
# 'No frequency data found. Are you sure this is a seit data set?'
# )
df = df.reset_index()
gf = df.groupby(['a', 'b', 'm', 'n'])
for key, item in gf.indices.items():
# print('key', key)
# print(item)
item_norm = np.hstack((np.sort(key[0:2]), np.sort(key[2:4])))
# print(item_norm)
index = np.where(
(corr_data[:, 0] == item_norm[0]) &
(corr_data[:, 1] == item_norm[1]) &
(corr_data[:, 2] == item_norm[2]) &
(corr_data[:, 3] == item_norm[3])
)[0]
# print(index, corr_data[index])
if len(index) == 0:
print(key)
raise Exception(
'No correction factor found for this configuration'
)
factor = corr_data[index, 4]
# apply correction factor
for col in ('r', 'Zt', 'Vmn', 'rho_a'):
if col in df.columns:
df.iloc[item, df.columns.get_loc(col)] *= factor
# add the correction factor to the DataFrame
if 'corr_fac' not in df.columns:
df['corr_fac'] = np.nan
df.iloc[item, df.columns.get_loc('corr_fac')] = factor
return df, corr_data
```
#### File: reda/plotters/histograms.py
```python
import pandas as pd
import reda.utils.mpl
# import pylab as plt
# import matplotlib as mpl
# mpl.rcParams['font.size'] = 8.0
import numpy as np
import reda.main.units as units
plt, mpl = reda.utils.mpl.setup()
def _get_nr_bins(count):
"""depending on the number of data points, compute a best guess for an
optimal number of bins
https://en.wikipedia.org/wiki/Histogram#Number_of_bins_and_width
"""
if count <= 30:
# use the square-root choice, used by Excel and Co
k = np.ceil(np.sqrt(count))
else:
# use Sturges' formula
k = np.ceil(np.log2(count)) + 1
return int(k)
def plot_histograms(ertobj, keys, **kwargs):
"""Generate histograms for one or more keys in the given container.
Parameters
----------
ertobj : container instance or :class:`pandas.DataFrame`
data object which contains the data.
keys : str or list of strings
which keys (column names) to plot
merge : bool, optional
if True, then generate only one figure with all key-plots as columns
(default True)
log10plot : bool, optional
default: True
extra_dims : list, optional
?
nr_bins : None|int
if an int is given, use this as the number of bins, otherwise use a
heuristic.
Examples
--------
>>> from reda.plotters import plot_histograms
>>> from reda.testing import ERTContainer
>>> figs_dict = plot_histograms(ERTContainer, "r", merge=False)
Generating histogram plot for key: r
Returns
-------
figures : dict
dictionary with the generated histogram figures
"""
# you can either provide a DataFrame or an ERT object
if isinstance(ertobj, pd.DataFrame):
df = ertobj
else:
df = ertobj.data
if df.shape[0] == 0:
raise Exception('No data present, cannot plot')
if isinstance(keys, str):
keys = [keys, ]
figures = {}
merge_figs = kwargs.get('merge', True)
if merge_figs:
nr_x = 2
nr_y = len(keys)
size_x = 15 / 2.54
size_y = 5 * nr_y / 2.54
fig, axes_all = plt.subplots(nr_y, nr_x, figsize=(size_x, size_y))
axes_all = np.atleast_2d(axes_all)
for row_nr, key in enumerate(keys):
print('Generating histogram plot for key: {0}'.format(key))
subdata_raw = df[key].values
subdata = subdata_raw[~np.isnan(subdata_raw)]
subdata = subdata[np.isfinite(subdata)]
nr_of_bins = kwargs.get('nr_of_bins', _get_nr_bins(subdata.size))
subdata_log10_with_nan = np.log10(subdata[subdata > 0])
subdata_log10 = subdata_log10_with_nan[~np.isnan(
subdata_log10_with_nan)
]
subdata_log10 = subdata_log10[np.isfinite(subdata_log10)]
if merge_figs:
axes = axes_all[row_nr].squeeze()
else:
fig, axes = plt.subplots(1, 2, figsize=(10 / 2.54, 5 / 2.54))
label = units.get_label(key)
if mpl.rcParams['text.usetex']:
label = label.replace('_', '-')
ax = axes[0]
ax.hist(
subdata,
nr_of_bins,
)
ax.set_xlabel(
label
)
ax.set_ylabel('count')
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(5))
ax.tick_params(axis='both', which='major', labelsize=6)
ax.tick_params(axis='both', which='minor', labelsize=6)
if subdata_log10.size > 0:
ax = axes[1]
ax.hist(
subdata_log10,
nr_of_bins,
)
ax.set_xlabel(r'$log_{10}($' + label + ')')
ax.set_ylabel('count')
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(5))
else:
pass
# del(axes[1])
fig.tight_layout()
if not merge_figs:
figures[key] = fig
if merge_figs:
figures['all'] = fig
return figures
def plot_histograms_extra_dims(dataobj, keys, primary_dim=None, **kwargs):
"""Produce histograms grouped by one extra dimensions. Produce additional
figures for all extra dimensions.
The dimension to spread out along subplots is called the primary extra
dimension.
Extra dimensions are:
* timesteps
* frequency
If only "timesteps" are present, timesteps will be plotted as subplots.
If only "frequencies" are present, frequencies will be plotted as subplots.
If more than one extra dimensions is present, multiple figures will be
generated.
Test cases:
* not extra dims present (primary_dim=None, timestep, frequency)
* timestep (primary_dim=None, timestep, frequency)
* frequency (primary_dim=None, timestep, frequency)
* timestep + frequency (primary_dim=None, timestep, frequency)
check nr of returned figures.
Parameters
----------
dataobj : :py:class:`pandas.DataFrame` or reda container
The data container/data frame which holds the data
keys : string|list|tuple|iterable
The keys (columns) of the dataobj to plot
primary_dim : string
primary extra dimension to plot along subplots. If None, the first
extra dimension found in the data set is used, in the following order:
timestep, frequency.
subquery : str, optional
if provided, apply this query statement to the data before plotting
log10 : bool
Plot only log10 transformation of data (default: False)
lin_and_log10 : bool
Plot both linear and log10 representation of data (default: False)
Nx : int, optional
Number of subplots in x direction
Returns
-------
dim_name : str
name of secondary dimensions, i.e. the dimensions for which separate
figures were created
figures : dict
dict containing the generated figures. The keys of the dict correspond
to the secondary dimension grouping keys
Examples
--------
>>> import reda.testing.containers
>>> ert = reda.testing.containers.ERTContainer_nr
>>> import reda.plotters.histograms as RH
>>> dim_name, fig = RH.plot_histograms_extra_dims(ert, ['r', ])
>>> import reda.testing.containers
>>> ert = reda.testing.containers.ERTContainer_nr
>>> import reda.plotters.histograms as RH
>>> dim_name, fig = RH.plot_histograms_extra_dims(ert, ['r', 'a'])
"""
if isinstance(dataobj, pd.DataFrame):
df_raw = dataobj
else:
df_raw = dataobj.data
if kwargs.get('subquery', False):
df = df_raw.query(kwargs.get('subquery'))
else:
df = df_raw
# define some labels
edim_labels = {
'timestep': (
'time',
''
),
'frequency': (
'frequency',
'Hz',
),
}
# prepare data columns to plot
if isinstance(keys, str):
keys = [keys, ]
columns = keys
N_c = len(columns)
# create log10 plots?
if kwargs.get('lin_and_log10', False):
transformers = ['lin', 'log10']
N_trafo = 2
elif kwargs.get('log10', False):
transformers = ['log10', ]
N_trafo = 1
else:
transformers = ['lin', ]
N_trafo = 1
# available extra dimensions
extra_dims = ('timestep', 'frequency')
# select dimension to plot into subplots
if primary_dim is None or primary_dim not in df.columns:
for extra_dim in extra_dims:
if extra_dim in df.columns:
primary_dim = extra_dim
# now primary_dim is either None (i.e., we don't have any extra dims to
# group), or it contains a valid column to group
# now prepare the secondary dimensions for which we create extra figures
secondary_dimensions = []
for extra_dim in extra_dims:
if extra_dim in df.columns and extra_dim != primary_dim:
secondary_dimensions.append(extra_dim)
# group secondary dimensions, or create a tuple with all the data
if secondary_dimensions:
group_secondary = df.groupby(secondary_dimensions)
else:
group_secondary = (('all', df), )
figures = {}
for sec_g_name, sec_g in group_secondary:
# group over primary dimension
if primary_dim is None:
group_primary = (('all', sec_g), )
N_primary = 1
else:
group_primary = sec_g.groupby(primary_dim)
N_primary = group_primary.ngroups
# determine layout of Figure
Nx_max = kwargs.get('Nx', 4)
N = N_primary * N_c * N_trafo
Nx = min(Nx_max, N)
Ny = int(np.ceil(N / Nx))
size_x = 5 * Nx / 2.54
size_y = 5 * Ny / 2.54
fig, axes = plt.subplots(
Ny, Nx,
figsize=(size_x, size_y),
sharex=True,
sharey=True
)
axes = np.atleast_2d(axes)
index = 0
for p_name, pgroup in group_primary:
for column in columns:
for transformer in transformers:
# print('{0}-{1}-{2}'.format(ts_name, column, transformer))
subdata_raw = pgroup[column].values
subdata = subdata_raw[~np.isnan(subdata_raw)]
subdata = subdata[np.isfinite(subdata)]
if transformer == 'log10':
subdata_log10_with_nan = np.log10(subdata[subdata > 0])
subdata_log10 = subdata_log10_with_nan[~np.isnan(
subdata_log10_with_nan)
]
subdata_log10 = subdata_log10[
np.isfinite(subdata_log10)
]
subdata = subdata_log10
ax = axes.flat[index]
ax.hist(
subdata,
_get_nr_bins(subdata.size),
)
ax.set_xlabel(
units.get_label(column)
)
ax.set_ylabel('count')
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(3))
ax.tick_params(axis='both', which='major', labelsize=6)
ax.tick_params(axis='both', which='minor', labelsize=6)
# depending on the type of pname, change the format
p_name_fmt = '{}'
from numbers import Number
if isinstance(p_name, Number):
p_name_fmt = '{:.4f}'
title_str = '{}: ' + p_name_fmt + ' {}'
ax.set_title(
title_str.format(
edim_labels[primary_dim][0],
p_name,
edim_labels[primary_dim][1],
),
fontsize=7.0,
)
index += 1
# remove some labels
for ax in axes[:, 1:].flat:
ax.set_ylabel('')
for ax in axes[:-1, :].flat:
ax.set_xlabel('')
fig.tight_layout()
for ax in axes.flat[index:]:
ax.set_visible(False)
figures[sec_g_name] = fig
return '_'.join(secondary_dimensions), figures
```
#### File: reda/testing/test_container_ert.py
```python
import pandas as pd
import reda
def test_init():
"""test initializing an empty ERT container"""
container = reda.ERT()
def test_init_with_data():
"""test initializing an ERT container and provide good data"""
df = pd.DataFrame(
[
# normals
(0, 1, 2, 4, 3, 1.1),
(0, 1, 2, 5, 4, 1.2),
(0, 1, 2, 6, 5, 1.3),
(0, 1, 2, 7, 6, 1.4),
(0, 2, 3, 5, 4, 1.5),
(0, 2, 3, 6, 5, 1.6),
(0, 2, 3, 7, 6, 1.7),
(0, 3, 4, 6, 5, 1.8),
(0, 3, 4, 7, 6, 1.9),
(0, 4, 5, 7, 6, 2.0),
],
columns=['timestep', 'a', 'b', 'm', 'n', 'r'],
)
container_good = reda.ERT(data=df)
assert container_good.data.shape[0] == df.shape[0]
```
#### File: reda/utils/pseudo_positions.py
```python
import numpy as np
def get_xy_simple_dipole_dipole(dataframe, spacing=1, indices=None):
"""For each configuration indicated by the numerical index array, compute
(x,z) pseudo locations based on the paper from XX.
All positions are computed for indices=None.
"""
if indices is None:
indices = slice(None)
abmn = dataframe.iloc[
indices, dataframe.columns.get_indexer(['a', 'b', 'm', 'n'])
].values
posx = np.mean(abmn[:, 0:4], axis=1)
posz = np.abs(
np.min(abmn[:, 0:2], axis=1) - np.max(abmn[:, 2:4], axis=1)
) * -0.192
# scale the positions with the electrode spacing
posx *= spacing
posz *= spacing
print(abmn.shape, posx.shape)
print('posxz', np.vstack((abmn.T, posx, posz)).T)
return posx, posz
``` |
{
"source": "jhashekhar/CRAFT-pytorch",
"score": 2
} |
#### File: jhashekhar/CRAFT-pytorch/test.py
```python
import sys
import os
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import torchvision
import torchvision.transforms.functional as TF
from PIL import Image
import cv2
from skimage import io
import numpy as np
import craft_pytorch.craft_utils as craft_utils
import craft_pytorch.imgproc as imgproc
import craft_pytorch.file_utils as file_utils
import json
import zipfile
from craft_pytorch.craft import CRAFT
from collections import OrderedDict
def copyStateDict(state_dict):
if list(state_dict.keys())[0].startswith("module"):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ".".join(k.split(".")[start_idx:])
new_state_dict[name] = v
return new_state_dict
def str2bool(v):
return v.lower() in ("yes", "y", "true", "t", "1")
parser = argparse.ArgumentParser(description='CRAFT Text Detection')
parser.add_argument('--trained_model', default='weights/craft_mlt_25k.pth', type=str, help='pretrained model')
parser.add_argument('--text_threshold', default=0.7, type=float, help='text confidence threshold')
parser.add_argument('--low_text', default=0.4, type=float, help='text low-bound score')
parser.add_argument('--link_threshold', default=0.4, type=float, help='link confidence threshold')
parser.add_argument('--cuda', default=False, type=str2bool, help='Use cuda for inference')
parser.add_argument('--canvas_size', default=1280, type=int, help='image size for inference')
parser.add_argument('--mag_ratio', default=1.5, type=float, help='image magnification ratio')
parser.add_argument('--poly', default=False, action='store_true', help='enable polygon type')
args = parser.parse_args()
""" For test images in a folder """
result_folder = './result/'
if not os.path.isdir(result_folder):
os.mkdir(result_folder)
current_dir = os.path.join(os.getcwd(), 'uploads')
image_path = current_dir + '/' + os.listdir(current_dir)[0]
print(image_path)
def test_net(net, image, text_threshold, link_threshold, low_text, cuda, poly, refine_net=None):
# resize
img_resized, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(image, args.canvas_size, interpolation=cv2.INTER_LINEAR, mag_ratio=args.mag_ratio)
ratio_h = ratio_w = 1 / target_ratio
# preprocessing
x = imgproc.normalizeMeanVariance(img_resized)
x = torch.from_numpy(x).permute(2, 0, 1) # [h, w, c] to [c, h, w]
x = Variable(x.unsqueeze(0)) # [c, h, w] to [b, c, h, w]
# forward pass
y, feature = net(x)
# make score and link map
score_text = y[0,:,:,0].cpu().data.numpy()
score_link = y[0,:,:,1].cpu().data.numpy()
# Post-processing
boxes, polys = craft_utils.getDetBoxes(score_text, score_link, text_threshold, link_threshold, low_text, poly)
# coordinate adjustment
boxes = craft_utils.adjustResultCoordinates(boxes, ratio_w, ratio_h)
polys = craft_utils.adjustResultCoordinates(polys, ratio_w, ratio_h)
for k in range(len(polys)):
if polys[k] is None: polys[k] = boxes[k]
# render results (optional)
render_img = score_text.copy()
render_img = np.hstack((render_img, score_link))
ret_score_text = imgproc.cvt2HeatmapImg(render_img)
return boxes, polys, ret_score_text
def labels_and_images(img_path, bboxes_list):
# Transforms images into images and save them into a folder
list_of_images = []
count = 0
for i in range(len(bboxes_list)):
img = bboxes_list[i]
x1, y1, x2, y2 = img[0], img[1], img[2], img[7]
# Reading and saving image as tensors
img = Image.open(img_path)
x = TF.to_tensor(img)
# :, second:last, first:third
x_t = x[:, y1:y2, x1:x2]
dest_dir = '/Users/xanthate/github/flask-tut/bboxes'
torchvision.utils.save_image(x_t, '{}/bboxes_{}.jpg'.format(dest_dir, i))
list_of_images.append('bboxes_{}.jpg'.format(i))
count += 1
return list_of_images
if __name__ == '__main__':
# load net
net = CRAFT() # initialize
print('Loading weights from checkpoint (' + args.trained_model + ')')
net.load_state_dict(copyStateDict(torch.load(args.trained_model, map_location='cpu')))
net.eval()
# load data
print("Test image {:s}".format(image_path), end='\r')
image = imgproc.loadImage(image_path)
bboxes, polys, score_text = test_net(net, image, args.text_threshold, args.link_threshold, args.low_text, args.cuda, args.poly)
# save score text
###################filename, file_ext = os.path.splitext(os.path.basename(image_path))
bboxes_list = []
for box in bboxes:
x = np.array(box).astype(np.int32).reshape((-1))
x = x.tolist()
bboxes_list.append(x)
print("Length of bboxes_list: ", len(bboxes_list))
loi = labels_and_images(image_path, bboxes_list)
#print(bboxes_list, type(bboxes_list), type(bboxes_list[0]))
#file_utils.saveResult(image_path, image[:,:,::-1], polys, dirname=result_folder)
``` |
{
"source": "JhashX/tytus",
"score": 3
} |
#### File: team27/G-27/funcionalidad.py
```python
from environment import *
fp = {
'FUNCTION':'LA FUNCION',
'PROCEDURE': 'EL PROCEDIMIENTO'
}
"""
Clase enum DBType:
_____________________________________________________________
Necesaria para tener un fácil acceso a los tipos definidos para una base de datos.
"""
from enum import Enum
class DBType(Enum):
smallint = 0
integer = 1
bigint = 2
decimal = 3
numeric = 4
real = 5
double_precision = 6
money = 7
ch_varying = 8
varchar=9
character = 10
char = 11
text = 12
timestamp_wtz = 13
timestamp_tz = 14
date = 15
time_wtz = 16
time_tz = 17
interval = 18
boolean = 19
"""
Diccionario Tipos:
_____________________________________________________________
Este proporciona un acceso rápido a el valor que debe de tener por default una variable no inicializada
ya que Python no permite declarar variables vacías.
"""
tipos = {
DBType.smallint: 0,
DBType.integer: 0,
DBType.bigint: 0,
DBType.decimal: 0,
DBType.numeric: 0,
DBType.real: 0,
DBType.double_precision: 0,
DBType.money: 0,
DBType.ch_varying: "\"\"",
DBType.varchar: "\"\"",
DBType.character: "\"\"",
DBType.char: "\"\"",
DBType.text: "\"\"",
DBType.timestamp_tz: "\"1900-01-01 00:00:00\"",
DBType.timestamp_wtz: "\"1900-01-01 00:00:00\"",
DBType.date: "\"1900-01-01 00:00:00\"",
DBType.time_wtz: "\"1900-01-01 00:00:00\"",
DBType.time_tz: "\"1900-01-01 00:00:00\"",
DBType.interval: "\"1900-01-01 00:00:00\"",
DBType.boolean: True
}
"""
Método declare:
_____________________________________________________________
Genera el código 3D de una declaración de variables.
id: Identificador de la variable
tipo: Enum DBType que especifica el tipo de la variable.
Valor: Expresion que se asigna a la variable, si no se cuenta con el se envía None.
"""
def declare(identificador, tipo, valor):
temporal = getTemp()
temporales[identificador] = temporal
id = temporal
if valor != None:
if isinstance(valor, str):
return id + '=' + valor + '\n'
return id + '=' + str(valor) + '\n'
default = tipos.get(tipo,'None')
if isinstance(default, str):
return id + '=' + default + '\n'
return id + '=' + str(default) + '\n'
"""
______________________________________________________________
Genera el código 3D para la asignación de un valor a una variable ya existente.
identificador: id de la variable buscada
valor: Valor que se le dará, debe de ser un string
"""
def assign(identificador, valor):
id = temporales[identificador]
if isinstance(valor,str):
return id + '=' + valor +'\n'
"""
______________________________________________________________
Genera un temporal nuevo en forma de string.
"""
def getTemp():
global tempCount
id = 'T' + str(tempCount)
tempCount += 1
return id
"""
______________________________________________________________
Genera una etiqueta nuevo en forma de string.
"""
def getLabel():
global labelCount
id = 'L' + str(labelCount)
labelCount += 1
return id
"""
______________________________________________________________
Traduce una expresión en forma de diccionario con las llaves:
-left, right y data
En donde left y right son diccionarios y data es el operador.
"""
def traduct(raiz):
if(raiz != None):
l = None
r = None
if isinstance( raiz, dict):
valoriz = raiz['left']
valorder = raiz['right']
if valoriz != None:
if 'c3d' in valoriz:
l = raiz['left']
elif(raiz['left']):
l = traduct(raiz['left'])
if valorder != None:
if 'c3d' in valorder:
r = raiz['right']
elif(raiz['right']):
r = traduct(raiz['right'])
data = raiz['data']
value = {}
if(data == '+'):#suma
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + '+' + r['temp'] + '\n'
value['temp'] = temp
elif(data == '-'):#resta
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + '-' + r['temp'] + '\n'
value['temp'] = temp
elif(data == '*'):#multiplicacion
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + '*' + r['temp'] + '\n'
value['temp'] = temp
elif(data == '/'):#division
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + '/' + r['temp'] + '\n'
value['temp'] = temp
elif(data == '%'):#modulo
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + '%' + r['temp'] + '\n'
value['temp'] = temp
elif(data == '^'):#potencia
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + '**' + r['temp'] + '\n'
value['temp'] = temp
elif(data == '<'):#menor
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + '<' + r['temp'] + '\n'
value['temp'] = temp
elif(data == '>'):#mayor
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + '>' + r['temp'] + '\n'
value['temp'] = temp
elif(data == '<='):#menor o igual
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + '<=' + r['temp'] + '\n'
value['temp'] = temp
elif(data == '>='):#mayor o igual
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + '>=' + r['temp'] + '\n'
value['temp'] = temp
elif(data == '='):#igual
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + '==' + r['temp'] + '\n'
value['temp'] = temp
elif(data == '!='):#No igual
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + '!=' + r['temp'] + '\n'
value['temp'] = temp
elif(data == '<>'):#Diferente
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + '!=' + r['temp'] + '\n'
value['temp'] = temp
elif(data == 'OR'):#or
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + ' or ' + r['temp'] + '\n'
value['temp'] = temp
elif(data == 'AND'):#and
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + ' and ' + r['temp'] + '\n'
value['temp'] = temp
elif(data == 'NOT'):#not
temp = getTemp()
value['c3d'] = l['c3d'] + temp + '= not ' + l['temp'] + '\n'
value['temp'] = temp
elif(data == '||'):#concatenation
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + ' + ' + r['temp'] + '\n'
value['temp'] = temp
elif(data == '&'):#bitwise and
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + ' & ' + r['temp'] + '\n'
value['temp'] = temp
elif(data == '|'):#bitwise or
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + ' | ' + r['temp'] + '\n'
value['temp'] = temp
elif(data == '#'):#bitwise xor
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + ' # ' + r['temp'] + '\n'
value['temp'] = temp
elif(data == '~'):#bitwise not
temp = getTemp()
value['c3d'] = l['c3d'] + temp + '= ~' + l['temp'] + '\n'
value['temp'] = temp
elif(data == '<<'):#bitwise shift left
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + ' << ' + r['temp'] + '\n'
value['temp'] = temp
elif(data == '>>'):#bitwise shift right
temp = getTemp()
value['c3d'] = l['c3d'] + r['c3d'] + temp + '=' + l['temp'] + ' >> ' + r['temp'] + '\n'
value['temp'] = temp
else:
value['c3d'] = ""
if isinstance(data, str):
temporal = data
if data in temporales:
temporal = temporales[data]
value['temp'] = temporal
elif isinstance(data,dict):
value = data
else:
value['temp'] = str(data)
return value
"""
______________________________________________________________
"""
def funcion(diccionarioFuncion, codigo3D):
arregloFunciones.append(diccionarioFuncion) #Agrego a la metadata general de las funciones.
id = diccionarioFuncion['id']
codigoGenerado = '@with_goto\n'
codigoGenerado += "def " + id + '('
for v in diccionarioFuncion['parametros']:
codigoGenerado += v + ','
if codigoGenerado[len(codigoGenerado)-1] == ',':
codigoGenerado = codigoGenerado[:-1]
codigoGenerado += '):\n'
codigo3D = '\n' + codigo3D
codigo3D = codigo3D.replace('\n', '\n\t')
codigo3D = codigo3D[:-1]
temporales.clear()
return codigoGenerado +"\t#INICIA DECLARE"+ codigo3D
"""
______________________________________________________________
"""
def call(id, listaParámetros):
c3d = ""
paramsTemp = []
for v in listaParámetros:
aux = traduct(v)
paramsTemp.append(aux['temp'])
c3d += aux['c3d']
temporal = getTemp()
c3d += temporal + '=' + id + '('
for temp in paramsTemp:
c3d += temp + ','
if c3d[len(c3d)-1] == ',':
c3d = c3d[:-1]
c3d += ')\n'
retorno = {'c3d':c3d,'temp':temporal}
return retorno
"""
______________________________________________________________
"""
def callNative(id, listaParámetros):
c3d = ""
paramsTemp = []
for v in listaParámetros:
aux = traduct(v)
paramsTemp.append(aux['temp'])
c3d += aux['c3d']
temporal = getTemp()
c3d += temporal + '=' + str(get_lower(id)) + '('
for temp in paramsTemp:
c3d += temp + ','
c3d = c3d[:-1]
c3d += ')\n'
return {'c3d':c3d,'temp':temporal}
"""
______________________________________________________________
"""
def returnF (exp):
ret = traduct(exp)
res = ret['c3d'] + '\nreturn ' + ret['temp'] + '\n'
return res
"""
______________________________________________________________
"""
def assignQ(identificador,valor):
id = temporales[identificador]
if isinstance(valor,str):
return '\n' +id + '= parser.parse( \'' + valor +'\')\n'
"""
______________________________________________________________
"""
def resFinal(funciones, codigo):
resultado = 'from goto import with_goto\nfrom parser_ import Parser\nfrom libraries.bstring_functions import *\n'
resultado += 'from libraries.datetime_functions import *\nfrom libraries.math_functions import *\nfrom libraries.trigonometric_functions import *\n\n'
resultado += 'parser = Parser()\n'
for f in funciones:
resultado += f +'\n'
resultado += codigo
funciones = []
return resultado
"""
DICCIONARIO PARA LOS METODOS
______________________________________________________________
"""
dict_Func = {
'LENGTH':'length',
'SUBSTRING':'substring',
'TRIM':'trim',
'MD5':'md5',
'SHA256':'sha256',
'SUBSTR':'substr',
'GET_BYTE':'get_byte',
'SET_BYTE':'set_byte',
'CONVERT':'convert',
'DECODE':'decode',
'ENCODE':'encode',
'NOW':'now',
'EXTRACT':'extract',
'DATE_PART':'date_part',
'CURRENT_DATE':'current_date',
'CURRENT_TIME':'current_time',
'ABSOLUTE':'absolute',
'CBRT':'cbrt',
'CEIL':'ceil',
'CEILING':'ceiling',
'DEGREES':'degrees',
'DIV':'div',
'EXP':'exp',
'FACTORIAL':'factorial',
'FLOOR':'floor',
'GCD':'gcd',
'LN':'ln',
'LOG':'log',
'PI':'pi',
'POWER':'power',
'RADIANS':'radians',
'SIGN':'sign',
'SQRT':'sqrt',
'TRUNC':'trunc',
'RANDOM':'random',
'ACOS':'acos',
'ACOSD':'acosd',
'ASIN':'asin',
'ASIND':'asind',
'ATAN':'atan',
'ATAND':'atand',
'ATAN2':'atan2',
'ATAN2D':'atan2d',
'COS':'cos',
'COSD':'cosd',
'COT':'cot',
'COTD':'cotd',
'SIN':'sin',
'SIND':'sind',
'TAN':'tan',
'TAND':'tand',
'SINH':'sinh',
'COSH':'cosh',
'TANH':'tanh',
'ASINH':'asinh',
'ACOSH':'acosh',
'ATANH':'atanh',
'ABS':'abs'
}
"""
DICCIONARIO PARA LOS METODOS
______________________________________________________________
funcion para castear un strig de mayusculas a minusculas
"""
def get_lower(name_func):
return dict_Func.get(name_func, name_func)
"""
______________________________________________________________
"""
def deleteProcFunc(tipo, id):
for v in arregloFunciones:
if v['tipo'] == tipo and v['id'] == id:
v['estado'] = 'ELIMINADO'
return 'SELECT "SE HA ELIMINADO ' + fp[tipo] + ' CON ÉXITO.";'
return 'SELECT "ERROR NO SE HA ENCONTRADO' + fp[tipo] + ' QUE DESEA ELIMINAR";'
"""
______________________________________________________________
"""
def AddTs(id, tipo, operacion):
#else variable .- if id in temporales:
if id in temporales:
variable = {'id':id, 'tipo':tipo, 'temporal': temporales[id],'operacion':operacion}
else:
variable = {'id':id, 'tipo':tipo, 'temporal': 'None','operacion':operacion}
funcionAux.append(variable)
"""
_______________________________________________________________
"""
def modifyTs(id,valor, operacion):
encontrada = {}
for i in range(len(funcionAux)):
if id == funcionAux[i]['id']:
encontrada['id'] = funcionAux[i]['id']
encontrada['tipo'] = funcionAux[i]['tipo']
encontrada['temporal'] = valor
encontrada['operacion'] = operacion
funcionAux.append(encontrada)
break
"""
_______________________________________________________________
"""
def genTable(functionId):
aux = []
for v in funcionAux:
aux.append(v)
arregloF.append({'id':functionId,'valor':aux})
funcionAux.clear()
``` |
{
"source": "jhasid/oneNeuron_pypi",
"score": 3
} |
#### File: src/oneNeuron/pereceptron.py
```python
import numpy as np
import logging
from tqdm import tqdm
class Perceptron:
def __init__(self,eta,epochs): #eta is learning rate
self.weights = np.random.randn(3) * 1e-4 #making small weight init
logging.info(f"initial weights before training :\n {self.weights}")
self.eta = eta
self.epochs = epochs
def activationfunction(self,inputs,weights):
z = np.dot(inputs,weights) # z = w * x a matrix
logging.info("#"*10)
#logging.info(f"activation z vale :\n{z}")
#logging.info("#"*10)
return np.where(z > 0,1,0)
def fit(self,x,y):
self.x = x
self.y = y
x_with_bias = np.c_[self.x,-np.ones((len(self.x),1))] #concatenation of x and bias to give matrix
logging.info(f"x with bais : \n{x_with_bias}")
#for epoch in range(self.epochs):
for epoch in tqdm(range(self.epochs),total=self.epochs,desc="training the model"):
logging.info("#"*10)
logging.info(f"for epoch :{epoch}")
logging.info("#"*10)
y_hat = self.activationfunction(x_with_bias,self.weights) # z value 0 or 1 #forward propogation
logging.info(f"predicted value after forward pass :\n{y_hat}")
logging.info(f"expected value after forward pass :\n{y}")
self.error = self.y - y_hat #error = y - y_hat
logging.info(f"error :\n{self.error}")
self.weights = self.weights + self.eta * np.dot(x_with_bias.T,self.error) # new weight = old weight + n(y_hat) #backward propogation
logging.info(f"update weights after epoch :\n{epoch}/{self.epochs} : {self.weights}")
logging.info("#"*10)
def predict(self,x):
x_with_bias = np.c_[x,-np.ones((len(x),1))]
return self.activationfunction(x_with_bias,self.weights) # x with updated weights
def total_loss(self):
total_loss = np.sum(self.error)
logging.info(f"total loss :{total_loss}")
return total_loss
``` |
{
"source": "jhaskinsjr/ussim",
"score": 2
} |
#### File: lime/implementation/lsu.py
```python
import os
import sys
import argparse
import logging
import functools
import struct
import service
import toolbox
import components.simplecache
import riscv.execute
import riscv.syscall.linux
def fetch_block(service, state, addr):
_blockaddr = state.get('l1dc').blockaddr(addr)
_blocksize = state.get('l1dc').nbytesperblock
state.get('pending_fetch').append(_blockaddr)
service.tx({'event': {
'arrival': 1 + state.get('cycle'),
'l2': {
'cmd': 'peek',
'addr': _blockaddr,
'size': _blocksize,
},
}})
toolbox.report_stats(service, state, 'flat', 'l1dc.misses')
def do_l1dc(service, state, addr, size, data=None):
service.tx({'info': 'addr : {}'.format(addr)})
_ante = None
_post = None
if state.get('l1dc').fits(addr, size):
_data = state.get('l1dc').peek(addr, size)
# service.tx({'info': '_data : {}'.format(_data)})
if not _data:
if len(state.get('pending_fetch')): return # only 1 pending fetch at a time is primitive, but good enough for now
fetch_block(service, state, addr)
return
else:
_blockaddr = state.get('l1dc').blockaddr(addr)
_blocksize = state.get('l1dc').nbytesperblock
_size = _blockaddr + _blocksize - addr
_ante = state.get('l1dc').peek(addr, _size)
if not _ante:
if len(state.get('pending_fetch')): return # only 1 pending fetch at a time is primitive, but good enough for now
fetch_block(service, state, addr)
return
_post = state.get('l1dc').peek(addr + _size, size - _size)
if not _post:
if len(state.get('pending_fetch')): return # only 1 pending fetch at a time is primitive, but good enough for now
fetch_block(service, state, addr + _size)
return
# NOTE: In an L1DC with only a single block, an incoming _post would
# always displace _ante, and an incoming _ante would always displace
# _post... but an L1DC with only a single block would not be very
# useful in practice, so no effort will be made to handle that scenario.
# Like: No effort AT ALL.
_data = _ante + _post
assert len(_data) == size
if data:
# STORE
service.tx({'result': {
'arrival': 2 + state.get('cycle'),
'l1dc': {
'addr': addr,
'size': size,
},
}})
if _ante:
assert _post
state.get('l1dc').poke(addr, _ante)
state.get('l1dc').poke(addr + size, _post)
else:
state.get('l1dc').poke(addr, data)
# writethrough
service.tx({'event': {
'arrival': 1 + state.get('cycle'),
'l2': {
'cmd': 'poke',
'addr': addr,
'size': len(data),
'data': data
}
}})
else:
# LOAD
service.tx({'result': {
'arrival': 2 + state.get('cycle'), # must not arrive in commit the same cycle as the LOAD instruction
'l1dc': {
'addr': addr,
'size': size,
'data': _data,
},
}})
state.get('executing').pop(0)
if len(state.get('pending_fetch')): state.get('pending_fetch').pop(0)
toolbox.report_stats(service, state, 'flat', 'l1dc.accesses')
def do_unimplemented(service, state, insn):
logging.info('Unimplemented: {}'.format(state.get('insn')))
service.tx({'undefined': insn})
def do_load(service, state, insn):
do_l1dc(service, state, insn.get('operands').get('addr'), insn.get('nbytes'))
def do_store(service, state, insn):
_data = insn.get('operands').get('data')
_data = {
'SD': _data,
'SW': _data[:4],
'SH': _data[:2],
'SB': _data[:1],
}.get(insn.get('cmd'))
do_l1dc(service, state, insn.get('operands').get('addr'), insn.get('nbytes'), _data)
def do_execute(service, state):
# NOTE: simpliying to only one in-flight LOAD/STORE at a time
_insn = (state.get('executing')[0] if len(state.get('executing')) else (state.get('pending_execute')[0] if len(state.get('pending_execute')) else None))
if not _insn: return
if not len(state.get('executing')): state.get('executing').append(state.get('pending_execute').pop(0))
service.tx({'info': '_insn : {}'.format(_insn)})
{
'LD': do_load,
'LW': do_load,
'LH': do_load,
'LB': do_load,
'LWU': do_load,
'LHU': do_load,
'LBU': do_load,
'SD': do_store,
'SW': do_store,
'SH': do_store,
'SB': do_store,
}.get(_insn.get('cmd'), do_unimplemented)(service, state, _insn)
def do_tick(service, state, results, events):
for _l2 in filter(lambda x: x, map(lambda y: y.get('l2'), results)):
_addr = _l2.get('addr')
if _addr == state.get('operands').get('l2'):
state.get('operands').update({'l2': _l2.get('data')})
elif _addr in state.get('pending_fetch'):
service.tx({'info': '_l2 : {}'.format(_l2)})
state.get('l1dc').poke(_addr, _l2.get('data'))
for _insn in map(lambda y: y.get('lsu'), filter(lambda x: x.get('lsu'), events)):
state.get('pending_execute').append(_insn.get('insn'))
# TODO: should this commit event be done in alu like everything else?
service.tx({'event': {
'arrival': 1 + state.get('cycle'),
'commit': {
'insn': _insn.get('insn'),
}
}})
do_execute(service, state)
if '__main__' == __name__:
parser = argparse.ArgumentParser(description='μService-SIMulator: Load-Store Unit')
parser.add_argument('--debug', '-D', dest='debug', action='store_true', help='output debug messages')
parser.add_argument('--quiet', '-Q', dest='quiet', action='store_true', help='suppress status messages')
parser.add_argument('--log', type=str, dest='log', default='/tmp', help='logging output directory (absolute path!)')
parser.add_argument('launcher', help='host:port of μService-SIMulator launcher')
args = parser.parse_args()
logging.basicConfig(
filename=os.path.join(args.log, '{}.log'.format(os.path.basename(__file__))),
format='%(message)s',
level=(logging.DEBUG if args.debug else logging.INFO),
)
logging.debug('args : {}'.format(args))
if not args.quiet: print('Starting {}...'.format(sys.argv[0]))
_launcher = {x:y for x, y in zip(['host', 'port'], args.launcher.split(':'))}
_launcher['port'] = int(_launcher['port'])
logging.debug('_launcher : {}'.format(_launcher))
state = {
'service': 'lsu',
'cycle': 0,
'l1dc': None,
'pending_fetch': [],
'active': True,
'running': False,
'ack': True,
'pending_execute': [],
'executing': [],
'operands': {},
'config': {
'l1dc.nsets': 2**4,
'l1dc.nways': 2**1,
'l1dc.nbytesperblock': 2**4,
},
}
state.update({'l1dc': components.simplecache.SimpleCache(
state.get('config').get('l1dc.nsets'),
state.get('config').get('l1dc.nways'),
state.get('config').get('l1dc.nbytesperblock'),
)})
_service = service.Service(state.get('service'), _launcher.get('host'), _launcher.get('port'))
while state.get('active'):
state.update({'ack': True})
msg = _service.rx()
# _service.tx({'info': {'msg': msg, 'msg.size()': len(msg)}})
# print('msg : {}'.format(msg))
for k, v in msg.items():
if {'text': 'bye'} == {k: v}:
state.update({'active': False})
state.update({'running': False})
elif {'text': 'run'} == {k: v}:
state.update({'running': True})
state.update({'ack': False})
elif 'tick' == k:
state.update({'cycle': v.get('cycle')})
_results = v.get('results')
_events = v.get('events')
do_tick(_service, state, _results, _events)
if state.get('ack') and state.get('running'): _service.tx({'ack': {'cycle': state.get('cycle')}})
if not args.quiet: print('Shutting down {}...'.format(sys.argv[0]))
```
#### File: riscv/constants/__init__.py
```python
JUMPS = ['JALR', 'JAL']
BRANCHES = ['BEQ', 'BNE', 'BLT', 'BGE', 'BLTU', 'BGEU']
LOADS = ['LD', 'LW', 'LH', 'LB', 'LWU', 'LHU', 'LBU']
STORES = ['SD', 'SW', 'SH', 'SB']
def register_mask(wbits):
return {
64: (2 ** 64) - 1,
32: (2 ** 32) - 1,
16: (2 ** 16) - 1,
8: (2 ** 8) - 1,
}.get(wbits, None)
def integer_to_list_of_bytes(v, wbits, byte_order):
return list((v & register_mask(wbits)).to_bytes((wbits // 8), byte_order))
```
#### File: riscv/decode/__init__.py
```python
import functools
import struct
def compressed_unimplemented_instruction(word, **kwargs):
return {
'cmd': 'Undefined',
'word': word,
'size': 2,
}
def uncompressed_unimplemented_instruction(word, **kwargs):
return {
'cmd': 'Undefined',
'word': word,
'size': 4,
}
def c_j(word, **kwargs):
# C.J performs an unconditional control transfer. The offset is
# sign-extended and added to the pc to form the jump target address.
# C.J can therefore target a ±2 KiB range. C.J expands to jal x0,
# offset[11:1].
return {
'cmd': 'JAL',
'imm': kwargs.get('imm'),
'rd': 0,
'word': word,
'size': 2,
}
def c_jr(word):
return {
'cmd': 'JALR',
'imm': 0,
'rs1': compressed_rs1_or_rd(word),
'rd': 0,
'word': word,
'size': 2,
}
def c_beqz(word, **kwargs):
# BEQZ performs conditional control transfers. The offset is
# sign-extended and added to the pc to form the branch target address.
# It can therefore target a ±256 B range. C.BEQZ takes the branch if
# the value in register rs1' is zero. It expands to
# beq rs1', x0, offset[8:1].
return {
'cmd': 'BEQ',
'imm': kwargs.get('imm'),
'rs1': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'rs2': 0,
'taken': None,
'word': word,
'size': 2,
}
def c_bnez(word, **kwargs):
# BEQZ performs conditional control transfers. The offset is
# sign-extended and added to the pc to form the branch target address.
# It can therefore target a ±256 B range. C.BEQZ takes the branch if
# the value in register rs1' is zero. It expands to
# beq rs1', x0, offset[8:1].
return {
'cmd': 'BNE',
'imm': kwargs.get('imm'),
'rs1': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'rs2': 0,
'taken': None,
'word': word,
'size': 2,
}
def c_mv(word):
# C.MV copies the value in register rs2 into register rd. C.MV expands into add rd, x0, rs2;
# see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.106)
return {
'cmd': 'ADD',
'rs1': 0,
'rs2': compressed_rs2(word),
'rd': compressed_rs1_or_rd(word),
'word': word,
'size': 2,
}
def c_lui(word, **kwargs):
# C.LUI loads the non-zero 6-bit immediate field into bits 17–12 of the
# destination register, clears the bottom 12 bits, and sign-extends bit
# 17 into all higher bits of the destination. C.LUI expands into
# lui rd, nzimm[17:12]. C.LUI is only valid when rd̸={x0, x2}, and when
# the immediate is not equal to zero.
#
# C.LUI nzimm[17] dest̸={0, 2} nzimm[16:12] C1
# see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.104)
return {
'cmd': 'LUI',
'rd': compressed_rs1_or_rd(word),
'imm': kwargs.get('imm'),
'word': word,
'size': 2,
}
def c_ldsp(word):
# C.LDSP is an RV64C/RV128C-only instruction that loads a 64-bit value from memory
# into register rd. It computes its effective address by adding the zero-extended
# offset, scaled by 8, to the stack pointer, x2. It expands to ld rd, offset[8:3](x2);
# see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.99)
#
# 011 uimm[5] rd̸=0 uimm[4:3|8:6] 10 C.LDSP (RV64/128; RES, rd=0);
# see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.111)
_b080706 = (word >> 2) & 0b111
_b0403 = (word >> 5) & 0b11
_b05 = (word >> 12) & 0b1
_imm = (_b080706 << 6) | (_b05 << 5) | (_b0403 << 3)
return {
'cmd': 'LD',
'rs1': 2,
'imm': _imm,
'rd': compressed_rs1_or_rd(word),
'nbytes': 8,
'word': word,
'size': 2,
}
def c_lw(word, **kwargs):
# C.LW loads a 32-bit value from memory into register rd ′. It computes
# an effective address by adding the zero-extended offset, scaled by 4,
# to the base address in register rs1 ′. It expands to
# lw rd', offset[6:2](rs1').
# see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.101)
return {
'cmd': 'LW',
'rs1': compressed_quadrant_00_rs1_prime(word),
'imm': kwargs.get('imm'),
'rd': compressed_quadrant_00_rs2_prime_or_rd_prime(word),
'nbytes': 4,
'word': word,
'size': 2,
}
def c_ld(word, **kwargs):
# C.LD is an RV64C/RV128C-only instruction that loads a 64-bit value from memory
# into register rd'. It computes an effective address by adding the zero-extended
# offset, scaled by 8, to the base address in register rs1'. It expands to ld rd',
# offset[7:3](rs1').
# see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.101)
return {
'cmd': 'LD',
'rs1': compressed_quadrant_00_rs1_prime(word),
'imm': kwargs.get('imm'),
'rd': compressed_quadrant_00_rs2_prime_or_rd_prime(word),
'nbytes': 8,
'word': word,
'size': 2,
}
def c_sd(word, **kwargs):
# C.SD is an RV64C/RV128C-only instruction that stores a 64-bit value in
# register rs2' to memory. It computes an effective address by adding the
# zero-extended offset, scaled by 8, to the base address in register rs1'.
# It expands to sd rs2', offset[7:3](rs1')
# see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.102)
return {
'cmd': 'SD',
'rs1': compressed_quadrant_00_rs1_prime(word),
'rs2': compressed_quadrant_00_rs2_prime_or_rd_prime(word),
'imm': kwargs.get('imm'),
'nbytes': 8,
'word': word,
'size': 2,
}
def c_sw(word, **kwargs):
# C.SW stores a 32-bit value in register rs2' to memory. It computes an
# effective address by adding the zero-extended offset, scaled by 4, to
# the base address in register rs1'. It expands to sw rs2', offset[6:2](rs1')
# see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.102)
return {
'cmd': 'SW',
'rs1': compressed_quadrant_00_rs1_prime(word),
'rs2': compressed_quadrant_00_rs2_prime_or_rd_prime(word),
'imm': kwargs.get('imm'),
'nbytes': 4,
'word': word,
'size': 2,
}
def c_addi4spn(word, **kwargs):
# C.ADDI4SPN is a CIW-format instruction that adds a zero-extended non-zero
# immediate, scaledby 4, to the stack pointer, x2, and writes the result to rd'.
# This instruction is used to generate pointers to stack-allocated variables,
# and expands to addi rd', x2, nzuimm[9:2].
return {
'cmd': 'ADDI',
'imm': kwargs.get('imm'),
'rs1': 2,
'rd': compressed_quadrant_00_rs2_prime_or_rd_prime(word),
'word': word,
'size': 2,
}
def c_addi16sp(word, **kwargs):
# C.ADDI16SP is used to adjust the stack pointer in procedure prologues and
# epilogues. It expands into addi x2, x2, nzimm[9:4]. C.ADDI16SP is only
# valid when nzimm̸=0; the code point with nzimm=0 is reserved.
return {
'cmd': 'ADDI',
'imm': kwargs.get('imm'),
'rs1': 2,
'rd': 2,
'word': word,
'size': 2,
}
def c_sdsp(word, **kwargs):
# C.SDSP is an RV64C/RV128C-only instruction that stores a 64-bit value in
# register rs2 to memory. It computes an effective address by adding the
# zero-extended offset, scaled by 8, to the stack pointer, x2. It expands to
# sd rs2, offset[8:3](x2).
return {
'cmd': 'SD',
'imm': kwargs.get('imm'),
'rs1': 2,
'rs2': compressed_rs2(word),
'nbytes': 8,
'word': word,
'size': 2,
}
def c_addi(word, **kwargs):
# C.ADDI adds the non-zero sign-extended 6-bit immediate to the value in
# register rd then writes the result to rd. C.ADDI expands into
# addi rd, rd, nzimm[5:0]. C.ADDI is only valid when rd̸=x0. The code point
# with both rd=x0 and nzimm=0 encodes the C.NOP instruction; the remaining
# code points with either rd=x0 or nzimm=0 encode HINTs.
return {
'cmd': 'ADDI',
'imm': kwargs.get('imm'),
'rs1': compressed_rs1_or_rd(word),
'rd': compressed_rs1_or_rd(word),
'word': word,
'size': 2,
}
def c_addiw(word, **kwargs):
# C.ADDIW is an RV64C/RV128C-only instruction that performs the same
# computation but produces a 32-bit result, then sign-extends result to 64
# bits. C.ADDIW expands into addiw rd, rd, imm[5:0]. The immediate can be
# zero for C.ADDIW, where this corresponds to sext.w rd. C.ADDIW is only
# valid when rd̸=x0; the code points with rd=x0 are reserved.
return {
'cmd': 'ADDIW',
'imm': kwargs.get('imm'),
'rs1': compressed_rs1_or_rd(word),
'rd': compressed_rs1_or_rd(word),
'word': word,
'size': 2,
}
def c_nop(word):
return {
'cmd': 'NOP',
'word': word,
'size': 2,
}
def c_add(word):
# C.ADD adds the values in registers rd and rs2 and writes the result to
# register rd. C.ADD expands into add rd, rd, rs2. C.ADD is only valid when
# rs2̸=x0; the code points with rs2=x0 correspond to the C.JALR and C.EBREAK
# instructions. The code points with rs2̸=x0 and rd=x0 are HINTs.
return {
'cmd': 'ADD',
'rs1': compressed_rs1_or_rd(word),
'rs2': compressed_rs2(word),
'rd': compressed_rs1_or_rd(word),
'word': word,
'size': 2,
}
def c_sub(word):
# C.SUB subtracts the value in register rs2 ′ from the value in register rd',
# then writes the result to register rd ′. C.SUB expands into
# sub rd', rd', rs2'.
return {
'cmd': 'SUB',
'rs1': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'rs2': compressed_quadrant_01_rs2_prime(word),
'rd': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'word': word,
'size': 2,
}
def c_xor(word):
# C.XOR computes the bitwise XOR of the values in registers rd'
# and rs2', then writes the result to register rd'. C.XOR expands
# into xor rd', rd', rs2'.
return {
'cmd': 'XOR',
'rs1': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'rs2': compressed_quadrant_01_rs2_prime(word),
'rd': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'word': word,
'size': 2,
}
def c_or(word):
# C.OR computes the bitwise OR of the values in registers rd'
# and rs2', then writes the result to register rd'. C.OR expands
# into or rd', rd', rs2'.
return {
'cmd': 'OR',
'rs1': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'rs2': compressed_quadrant_01_rs2_prime(word),
'rd': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'word': word,
'size': 2,
}
def c_and(word):
# C.AND computes the bitwise AND of the values in registers rd'
# and rs2', then writes the result to register rd'. C.AND expands
# into and rd', rd', rs2'.
return {
'cmd': 'AND',
'rs1': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'rs2': compressed_quadrant_01_rs2_prime(word),
'rd': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'word': word,
'size': 2,
}
def c_subw(word):
# C.SUBW is an RV64C/RV128C-only instruction that subtracts the value
# in register rs2' from the value in register rd', then sign-extends
# the lower 32 bits of the difference before writing the result to
# register rd'. C.SUBW expands into subw rd', rd', rs2 ′.
return {
'cmd': 'SUBW',
'rs1': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'rs2': compressed_quadrant_01_rs2_prime(word),
'rd': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'word': word,
'size': 2,
}
def c_addw(word):
# C.ADDW is an RV64C/RV128C-only instruction that adds the values
# in registers rd' and rs2', then sign-extends the lower 32 bits of
# the sum before writing the result to register rd'. C.ADDW
# expands into addw rd', rd', rs2'
return {
'cmd': 'ADDW',
'rs1': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'rs2': compressed_quadrant_01_rs2_prime(word),
'rd': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'word': word,
'size': 2,
}
def c_li(word, **kwargs):
# C.LI loads the sign-extended 6-bit immediate, imm, into register rd. C.LI
# expands into addi rd, x0, imm[5:0]. C.LI is only valid when rd̸=x0; the code
# points with rd=x0 encode HINTs.
return {
'cmd': 'ADDI',
'rs1': 0,
'rd': compressed_rs1_or_rd(word),
'imm': kwargs.get('imm'),
'word': word,
'size': 2,
}
def c_slli(word, **kwargs):
# C.SLLI is a CI-format instruction that performs a logical left shift
# of the value in register rd then writes the result to rd. The shift
# amount is encoded in the shamt field. For RV128C, a shift amount of
# zero is used to encode a shift of 64. C.SLLI expands into
# slli rd, rd, shamt[5:0], except for RV128C with shamt=0, which expands
# to slli rd, rd, 64.
return {
'cmd': 'SLLI',
'rs1': compressed_rs1_or_rd(word),
'rd': compressed_rs1_or_rd(word),
'shamt': kwargs.get('imm'),
'word': word,
'size': 2,
}
def c_srli(word, **kwargs):
# C.SRLI is a CB-format instruction that performs a logical right shift
# of the value in register rd' then writes the result to rd'. The shift
# amount is encoded in the shamt field. For RV128C, a shift amount of
# zero is used to encode a shift of 64. Furthermore, the shift amount
# is sign-extended for RV128C, and so the legal shift amounts are 1–31,
# 64, and 96–127. C.SRLI expands into srli rd', rd', shamt[5:0], except
# for RV128C with shamt=0, which expands to srli rd ′, rd ′, 64
return {
'cmd': 'SRLI',
'rs1': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'rd': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'shamt': kwargs.get('imm'),
'word': word,
'size': 2,
}
def c_srai(word, **kwargs):
# C.SRAI is defined analogously to C.SRLI, but instead performs an
# arithmetic right shift. C.SRAI expands to srai rd', rd', shamt[5:0].
return {
'cmd': 'SRAI',
'rs1': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'rd': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'shamt': kwargs.get('imm'),
'word': word,
'size': 2,
}
def c_andi(word, **kwargs):
# C.ANDI is a CB-format instruction that computes the bitwise AND
# of the value in register rd' and the sign-extended 6-bit immediate,
# then writes the result to rd'. C.ANDI expands to andi rd', rd', imm[5:0].
return {
'cmd': 'ANDI',
'rs1': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'rd': compressed_quadrant_01_rs1_prime_or_rd_prime(word),
'imm': kwargs.get('imm'),
'word': word,
'size': 2,
}
def lui(word):
# LUI (load upper immediate) is used to build 32-bit constants and
# uses the U-type format. LUI places the U-immediate value in the top
# 20 bits of the destination register rd, filling in the lowest 12
# bits with zeros. ... The 32-bit result is sign-extended to 64 bits.
_imm = uncompressed_imm32(word)
_imm <<= 12
_b31 = (_imm >> 31) & 0b1
_imm = functools.reduce(lambda a, b: a | b, map(lambda x: _b31 << x, range(32, 64)), _imm)
_imm = int.from_bytes(struct.Struct('<Q').pack(_imm), 'little', signed=True)
return {
'cmd': 'LUI',
'imm': _imm,
'rd': uncompressed_rd(word),
'word': word,
'size': 4,
}
def auipc(word):
# AUIPC (add upper immediate to pc) is used to build pc-relative
# addresses and uses the U-type format. AUIPC forms a 32-bit offset
# from the 20-bit U-immediate, filling in the lowest 12 bits with
# zeros, adds this offset to the address of the AUIPC instruction,
# then places the result in register rd.
_imm = uncompressed_imm32(word)
_imm <<= 12
_b31 = (_imm >> 31) & 0b1
_imm = functools.reduce(lambda a, b: a | b, map(lambda x: _b31 << x, range(32, 64)), _imm)
_imm = int.from_bytes(struct.Struct('<Q').pack(_imm), 'little', signed=True)
return {
'cmd': 'AUIPC',
'imm': _imm,
'rd': uncompressed_rd(word),
'word': word,
'size': 4,
}
def jal(word):
return {
'cmd': 'JAL',
'imm': uncompressed_imm21(word, signed=True),
'rd': uncompressed_rd(word),
'word': word,
'size': 4,
}
def jalr(word):
# The indirect jump instruction JALR (jump and link register) uses
# the I-type encoding. The target address is obtained by adding the
# sign-extended 12-bit I-immediate to the register rs1, then setting
# the least-significant bit of the result to zero. The address of
# the instruction following the jump (pc+4) is written to register
# rd. Register x0 can be used as the destination if the result is
# not required.
# see: https://riscv.org/wp-content/uploads/2019/12/riscv-spec-20191213.pdf (p. 21)
return {
'cmd': 'JALR',
'imm': uncompressed_i_type_imm12(word, signed=True),
'rs1': uncompressed_rs1(word),
'rd': uncompressed_rd(word),
'word': word,
'size': 4,
}
def i_type(word):
# imm[11:0] rs1 000 rd 0010011 ADDI
# imm[11:0] rs1 010 rd 0010011 SLTI
# imm[11:0] rs1 011 rd 0010011 SLTIU
# imm[11:0] rs1 100 rd 0010011 XORI
# imm[11:0] rs1 110 rd 0010011 ORI
# imm[11:0] rs1 111 rd 0010011 ANDI
# 0000000 shamt rs1 001 rd 0010011 SLLI
# 0000000 shamt rs1 101 rd 0010011 SRLI
# 0100000 shamt rs1 101 rd 0010011 SRAI
# imm[11:0] rs1 000 rd 0011011 ADDIW
# 0000000 shamt rs1 001 rd 0011011 SLLIW
# 0000000 shamt rs1 101 rd 0011011 SRLIW
# 0100000 shamt rs1 101 rd 0011011 SRAIW
# see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.130)
_cmds = {
0b001_0011: {
0b000: {'cmd': 'ADDI', 'imm': uncompressed_i_type_imm12(word, signed=True)},
0b010: {'cmd': 'SLTI', 'imm': uncompressed_i_type_imm12(word, signed=True)},
0b011: {'cmd': 'SLTIU', 'imm': int.from_bytes((uncompressed_i_type_imm12(word, signed=True) & ((2**64) - 1)).to_bytes(8, 'little'), 'little')},
0b001: {'cmd': 'SLLI', 'shamt': uncompressed_i_type_shamt(word)},
0b101: {'cmd': ('SRLI' if 0 == uncompressed_funct7(word) else 'SRAI'), 'shamt': uncompressed_i_type_shamt(word)},
0b111: {'cmd': 'ANDI', 'imm': uncompressed_i_type_imm12(word, signed=True)},
},
0b001_1011: {
0b000: {'cmd': 'ADDIW', 'imm': uncompressed_i_type_imm12(word, signed=True)},
0b001: {'cmd': 'SLLIW', 'shamt': uncompressed_i_type_shamt(word)},
0b101: {'cmd': ('SRLIW' if 0 == uncompressed_funct7(word) else 'SRAIW'), 'shamt': uncompressed_i_type_shamt(word)},
}
}.get(uncompressed_opcode(word), {})
if not uncompressed_funct3(word) in _cmds.keys():
return uncompressed_unimplemented_instruction(word)
_retval = {
**_cmds.get(uncompressed_funct3(word)),
**{
'rs1': uncompressed_rs1(word),
'rd': uncompressed_rd(word),
'word': word,
'size': 4,
},
}
return _retval
def r_type(word):
# 0000000 rs2 rs1 000 rd 0110011 ADD
# 0100000 rs2 rs1 000 rd 0110011 SUB
# 0000000 rs2 rs1 001 rd 0110011 SLL
# 0000000 rs2 rs1 010 rd 0110011 SLT
# 0000000 rs2 rs1 011 rd 0110011 SLTU
# 0000000 rs2 rs1 100 rd 0110011 XOR
# 0000000 rs2 rs1 101 rd 0110011 SRL
# 0100000 rs2 rs1 101 rd 0110011 SRA
# 0000000 rs2 rs1 110 rd 0110011 OR
# 0000000 rs2 rs1 111 rd 0110011 AND
# 0000000 rs2 rs1 000 rd 0111011 ADDW
# 0100000 rs2 rs1 000 rd 0111011 SUBW
# 0000000 rs2 rs1 001 rd 0111011 SLLW
# 0000000 rs2 rs1 101 rd 0111011 SRLW
# 0100000 rs2 rs1 101 rd 0111011 SRAW
# see:
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.130, 131)
_cmds = {
(0b000_0000, 0b000, 0b011_0011): 'ADD',
(0b010_0000, 0b000, 0b011_0011): 'SUB',
(0b000_0000, 0b001, 0b011_0011): 'SLL',
(0b000_0000, 0b100, 0b011_0011): 'XOR',
(0b000_0000, 0b101, 0b011_0011): 'SRL',
(0b010_0000, 0b101, 0b011_0011): 'SRA',
(0b000_0000, 0b110, 0b011_0011): 'OR',
(0b000_0000, 0b111, 0b011_0011): 'AND',
(0b000_0000, 0b000, 0b011_1011): 'ADDW',
(0b010_0000, 0b000, 0b011_1011): 'SUBW',
(0b000_0001, 0b000, 0b011_0011): 'MUL',
(0b000_0001, 0b001, 0b011_0011): 'MULH',
(0b000_0001, 0b010, 0b011_0011): 'MULHSU',
(0b000_0001, 0b011, 0b011_0011): 'MULHU',
(0b000_0001, 0b100, 0b011_0011): 'DIV',
(0b000_0001, 0b101, 0b011_0011): 'DIVU',
(0b000_0001, 0b110, 0b011_0011): 'REM',
(0b000_0001, 0b111, 0b011_0011): 'REMU',
(0b000_0001, 0b000, 0b011_1011): 'MULW',
(0b000_0001, 0b100, 0b011_1011): 'DIVW',
(0b000_0001, 0b101, 0b011_1011): 'DIVUW',
(0b000_0001, 0b110, 0b011_1011): 'REMW',
(0b000_0001, 0b111, 0b011_1011): 'REMUW',
}
_cmd = _cmds.get((uncompressed_funct7(word), uncompressed_funct3(word), uncompressed_opcode(word)), 'Undefined')
return {
'cmd': _cmd,
'rs1': uncompressed_rs1(word),
'rs2': uncompressed_rs2(word),
'rd': uncompressed_rd(word),
'word': word,
'size': 4,
}
def b_type(word):
_cmds = {
0b000: 'BEQ',
0b001: 'BNE',
0b100: 'BLT',
0b101: 'BGE',
0b110: 'BLTU',
0b111: 'BGEU',
}
if not uncompressed_funct3(word) in _cmds.keys(): uncompressed_illegal_instruction(word)
_cmd = _cmds.get(uncompressed_funct3(word))
return {
'cmd': _cmd,
'rs1': uncompressed_rs1(word),
'rs2': uncompressed_rs2(word),
'imm': uncomprssed_b_type_imm13(word, signed=True),
'taken': None,
'word': word,
'size': 4,
}
def system(word):
_cmds = {
0b0000_0000_0000: 'ECALL',
}
if not uncompressed_i_type_imm12(word) in _cmds.keys(): uncompressed_illegal_instruction(word)
_cmd = _cmds.get(uncompressed_i_type_imm12(word))
return {
'cmd': _cmd,
'word': word,
'size': 4,
}
def load(word):
# imm[11:0] rs1 000 rd 0000011 LB
# imm[11:0] rs1 001 rd 0000011 LH
# imm[11:0] rs1 010 rd 0000011 LW
# imm[11:0] rs1 011 rd 0000011 LD
# imm[11:0] rs1 100 rd 0000011 LBU
# imm[11:0] rs1 101 rd 0000011 LHU
# imm[11:0] rs1 110 rd 0000011 LWU
_variety = {
0b000: {'cmd': 'LB', 'nbytes': 1},
0b001: {'cmd': 'LH', 'nbytes': 2},
0b010: {'cmd': 'LW', 'nbytes': 4},
0b011: {'cmd': 'LD', 'nbytes': 8},
0b100: {'cmd': 'LBU', 'nbytes': 1},
0b101: {'cmd': 'LHU', 'nbytes': 2},
0b110: {'cmd': 'LWU', 'nbytes': 4},
}.get((word >> 12) & 0b111)
return {
**_variety,
**{
'imm': uncompressed_load_imm12(word, signed=True),
'rs1': uncompressed_rs1(word),
'rd': uncompressed_rd(word),
'word': word,
'size': 4,
},
}
def store(word):
# imm[11:5] rs2 rs1 000 imm[4:0] 0100011 SB
# imm[11:5] rs2 rs1 001 imm[4:0] 0100011 SH
# imm[11:5] rs2 rs1 010 imm[4:0] 0100011 SW
# imm[11:5] rs2 rs1 011 imm[4:0] 0100011 SD
# see: https://riscv.org/wp-content/uploads/2019/12/riscv-spec-20191213.pdf (p. 130, 131)
_variety = {
0b000: {'cmd': 'SB', 'nbytes': 1},
0b001: {'cmd': 'SH', 'nbytes': 2},
0b010: {'cmd': 'SW', 'nbytes': 4},
0b011: {'cmd': 'SD', 'nbytes': 8},
}.get(uncompressed_funct3(word))
return {
**_variety,
**{
'imm': uncompressed_store_imm12(word, signed=True),
'rs1': uncompressed_rs1(word),
'rs2': uncompressed_rs2(word),
'word': word,
'size': 4,
},
}
def fence(word):
return {
'cmd': 'FENCE',
'word': word,
'size': 4,
}
def compressed_illegal_instruction(word, **kwargs):
assert False, 'Illegal instruction ({:04x})!'.format(word)
def decode_compressed(word):
# print('decode_compressed({:04x})'.format(word))
return {
0b00: compressed_quadrant_00,
0b01: compressed_quadrant_01,
0b10: compressed_quadrant_10,
}.get(compressed_quadrant(word), compressed_unimplemented_instruction)(word)
def compressed_quadrant(word):
# print('compressed_quadrant({:04x})'.format(word))
return word & 0b11
def compressed_quadrant_00(word):
return {
0b000: compressed_quadrant_00_opcode_000,
0b010: compressed_quadrant_00_opcode_010,
0b011: compressed_quadrant_00_opcode_011,
0b110: compressed_quadrant_00_opcode_110,
0b111: compressed_quadrant_00_opcode_111,
}.get(compressed_opcode(word), compressed_unimplemented_instruction)(word)
def compressed_quadrant_00_opcode_000(word):
# 00 nzuimm[5:4|9:6|2|3] rd' 00 ; C.ADDI4SPN (RES, nzuimm=0)
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.110)
_impl = compressed_unimplemented_instruction
_b03 = (word >> 5) & 0b1
_b02 = (word >> 6) & 0b1
_b09080706 = (word >> 7) & 0b1111
_b0504 = (word >> 11) & 0b11
_imm = (_b09080706 << 6) | (_b0504 << 4) | (_b03 << 3) | (_b02 << 2)
if 0 == _imm:
_impl = compressed_illegal_instruction
else:
_impl = c_addi4spn
return _impl(word, imm=_imm)
def compressed_quadrant_00_opcode_010(word):
# 010 uimm[5:3] rs1' uimm[2|6] rd' 00 C.LW
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.110)
_impl = c_lw
_b06 = (word >> 5) & 0b1
_b02 = (word >> 6) & 0b1
_b050403 = (word >> 9) & 0b111
_imm = (_b06 << 6) | (_b050403 << 3) | (_b02 << 2)
return _impl(word, imm=_imm)
def compressed_quadrant_00_opcode_011(word):
# 011 uimm[5:3] rs1 ′ uimm[7:6] rd ′ 00 C.LD (RV64/128)
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.110)
_impl = c_ld
_b0706 = (word >> 5) & 0b11
_b050403 = (word >> 10) & 0b111
_imm = (_b0706 << 6) | (_b050403 << 3)
return _impl(word, imm=_imm)
def compressed_quadrant_00_opcode_110(word):
# 110 uimm[5:3] rs1' uimm[2|6] rs2' 00 C.SW
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.110)
_impl = c_sw
_b06 = (word >> 5) & 0b1
_b02 = (word >> 6) & 0b1
_b050403 = (word >> 10) & 0b111
_imm = (_b06 << 6) | (_b050403 << 3) | (_b02 << 2)
return _impl(word, imm=_imm)
def compressed_quadrant_00_opcode_111(word):
# 111 uimm[5:3] rs1' uimm[7:6] rs2' 00 C.SD (RV64/128)
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.110)
_impl = c_sd
_b0706 = (word >> 5) & 0b11
_b050403 = (word >> 10) & 0b111
_imm = (_b0706 << 6) | (_b050403 << 3)
return _impl(word, imm=_imm)
def compressed_quadrant_00_rs1_prime(word):
# RVC Register Number 000 001 010 011 100 101 110 111
# Integer Register Number x8 x9 x10 x11 x12 x13 x14 x15
# Integer Register ABI Name s0 s1 a0 a1 a2 a3 a4 a5
# Floating-Point Register Number f8 f9 f10 f11 f12 f13 f14 f15
# Floating-Point Register ABI Name fs0 fs1 fa0 fa1 fa2 fa3 fa4 fa5
# Table 16.2: Registers specified by the three-bit rs1 ′, rs2 ′, and rd ′ fields of the CIW, CL, CS, CA,
# and CB formats; see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.98)
#
# see also: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p. 111)
return 8 + ((word >> 7) & 0b111)
def compressed_quadrant_00_rs2_prime_or_rd_prime(word):
return 8 + ((word >> 2) & 0b111)
def compressed_quadrant_01(word):
return {
0b000: compressed_quadrant_01_opcode_000,
0b001: compressed_quadrant_01_opcode_001,
0b010: compressed_quadrant_01_opcode_010,
0b011: compressed_quadrant_01_opcode_011,
0b100: compressed_quadrant_01_opcode_100,
0b101: compressed_quadrant_01_opcode_101,
0b110: compressed_quadrant_01_opcode_110,
0b111: compressed_quadrant_01_opcode_111,
}.get(compressed_opcode(word), compressed_unimplemented_instruction)(word)
def compressed_quadrant_01_opcode_000(word):
# 000 nzimm[5] rs1/rd̸=0 nzimm[4:0] 01 C.ADDI (HINT, nzimm=0) (p.111)
_impl = compressed_unimplemented_instruction
_b05 = (word >> 12) & 0b1
_b0403020100 = (word >> 2) & 0b1_1111
_imm = (_b05 << 5) | (_b0403020100)
_imm = functools.reduce(lambda a, b: a | b, map(lambda x: _b05 << x, range(6, 16)), _imm)
_imm = int.from_bytes(struct.Struct('<H').pack(_imm), 'little', signed=True)
if 0 != compressed_rs1_or_rd(word):
_impl = c_addi
else:
_impl = c_nop
return _impl(word, imm=_imm)
def compressed_quadrant_01_opcode_001(word):
# 001 imm[5] rs1/rd̸ =0 imm[4:0] 01 C.ADDIW (RV64/128; RES, rd=0)
_impl = compressed_unimplemented_instruction
if 0 != compressed_rs1_or_rd(word):
_impl = c_addiw
_b05 = (word >> 12) & 0b1
_b0403020100 = (word >> 2) & 0b1_1111
_imm = (_b05 << 5) | (_b0403020100)
_imm = functools.reduce(lambda a, b: a | b, map(lambda x: _b05 << x, range(6, 16)), _imm)
_imm = int.from_bytes(struct.Struct('<H').pack(_imm), 'little', signed=True)
else:
_imm = None
return _impl(word, imm=_imm)
def compressed_quadrant_01_opcode_010(word):
# 010 imm[5] rd̸=0 imm[4:0] 01 C.LI (HINT, rd=0)
_impl = compressed_unimplemented_instruction
_b0403020100 = (word >> 2) & 0b1_1111
_b05 = (word >> 12) & 0b1
_imm = (_b05 << 5) | _b0403020100
_imm = functools.reduce(lambda a, b: a | b, map(lambda x: _b05 << x, range(6, 16)), _imm)
_imm = int.from_bytes(struct.Struct('<H').pack(_imm), 'little', signed=True)
if 0 == compressed_rs1_or_rd(word):
_impl = compressed_illegal_instruction
else:
_impl = c_li
return _impl(word, imm=_imm)
def compressed_quadrant_01_opcode_011(word):
# 011 nzimm[9] 2 nzimm[4|6|8:7|5] 01 C.ADDI16SP (RES, nzimm=0) (p.111)
# 011 nzimm[17] rd̸={0, 2} nzimm[16:12] 01 C.LUI (RES, nzimm=0; HINT, rd=0)
_impl = compressed_unimplemented_instruction
if 2 == compressed_rs1_or_rd(word):
_b05 = (word >> 2) & 0b1
_b0807 = (word >> 3) & 0b11
_b06 = (word >> 5) & 0b1
_b04 = (word >> 6) & 0b1
_b09 = (word >> 12) & 0b1
_imm = (_b09 << 9) | (_b0807 << 7) | (_b06 << 6) | (_b05 << 5) | (_b04 << 4)
_imm = functools.reduce(lambda a, b: a | b, map(lambda x: _b09 << x, range(10, 16)), _imm)
_imm = int.from_bytes(struct.Struct('<H').pack(_imm), 'little', signed=True)
if 0 == _imm:
_impl = compressed_illegal_instruction
else:
_impl = c_addi16sp
else:
_b17 = (word >> 12) & 0b1
_b1615141312 = (word >> 2) & 0b1_1111
_imm = (_b17 << 17) | (_b1615141312 << 12)
if 0 == _imm:
_impl = compressed_illegal_instruction
else:
_imm <<= 12
_imm = functools.reduce(lambda a, b: a | b, map(lambda x: _b17 << x, range(18 + 12, 32)), _imm)
_imm = int.from_bytes(struct.Struct('<I').pack(_imm), 'little', signed=True)
_impl = c_lui
return _impl(word, imm=_imm)
def compressed_quadrant_01_opcode_100(word):
# 100 nzuimm[5] 00 rs1 ′/rd ′ nzuimm[4:0] 01 C.SRLI (RV32 NSE, nzuimm[5]=1)
# 100 0 00 rs1 ′/rd ′ 0 01 C.SRLI64 (RV128; RV32/64 HINT)
# 100 nzuimm[5] 01 rs1 ′/rd ′ nzuimm[4:0] 01 C.SRAI (RV32 NSE, nzuimm[5]=1)
# 100 0 01 rs1 ′/rd ′ 0 01 C.SRAI64 (RV128; RV32/64 HINT)
# 100 imm[5] 10 rs1 ′/rd ′ imm[4:0] 01 C.ANDI
# 100 0 11 rs1 ′/rd ′ 00 rs2 ′ 01 C.SUB
# 100 0 11 rs1 ′/rd ′ 01 rs2 ′ 01 C.XOR
# 100 0 11 rs1 ′/rd ′ 10 rs2 ′ 01 C.OR
# 100 0 11 rs1 ′/rd ′ 11 rs2 ′ 01 C.AND
# 100 1 11 rs1 ′/rd ′ 00 rs2 ′ 01 C.SUBW (RV64/128; RV32 RES)
# 100 1 11 rs1 ′/rd ′ 01 rs2 ′ 01 C.ADDW (RV64/128; RV32 RES)
# 100 1 11 — 10 — 01 Reserved
# 100 1 11 — 11 — 01 Reserved
_impl = compressed_unimplemented_instruction
_imm = None
_b12 = (word >> 12) & 0b1
_b1110 = (word >> 10) & 0b11
_b0605 = (word >> 5) & 0b11
if 0b00 == _b1110:
_impl = c_srli
_imm = (_b12 << 5) | (word >> 2) & 0b1_1111
elif 0b01 == _b1110:
_impl = c_srai
_imm = (_b12 << 5) | (word >> 2) & 0b1_1111
elif 0b10 == _b1110:
_impl = c_andi
_imm = int.from_bytes([(_b12 << 7) | (_b12 << 6) | (_b12 << 5) | (word >> 2) & 0b1_1111], 'little', signed=True)
elif 0b11 == _b1110:
if 0b0 == _b12:
_impl = {
0b00: c_sub,
0b01: c_xor,
0b10: c_or,
0b11: c_and,
}.get(_b0605)
else:
_impl = {
0b00: c_subw,
0b01: c_addw,
}.get(_b0605)
return (_impl(word, imm=_imm) if _imm else _impl(word))
def compressed_quadrant_01_opcode_101(word):
# 101 imm[11|4|9:8|10|6|7|3:1|5] 01 C.J
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.111)
_impl = c_j
_b05 = (word >> 2) & 0b1
_b030201 = (word >> 3) & 0b111
_b07 = (word >> 6) & 0b1
_b06 = (word >> 7) & 0b1
_b10 = (word >> 8) & 0b1
_b0908 = (word >> 9) & 0b11
_b04 = (word >> 11) & 0b1
_b11 = (word >> 12) & 0b1
_imm = (_b11 << 11) | (_b10 << 10) | (_b0908 << 8) | (_b07 << 7) | (_b06 << 6) | (_b05 << 5) | (_b04 << 4) | (_b030201 << 1)
_imm = functools.reduce(lambda a, b: a | b, map(lambda x: _b11 << x, range(12, 16)), _imm)
_imm = int.from_bytes(struct.Struct('<H').pack(_imm), 'little', signed=True)
return _impl(word, imm=_imm)
def compressed_quadrant_01_opcode_110(word):
# 110 imm[8|4:3] rs1 ′ imm[7:6|2:1|5] 01 C.BEQZ
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.111)
_impl = c_beqz
_b05 = (word >> 2) & 0b1
_b0201 = (word >> 3) & 0b11
_b0706 = (word >> 5) & 0b11
_b0403 = (word >> 10) & 0b11
_b08 = (word >> 12) & 0b1
_imm = (_b08 << 8) | (_b0706 << 6) | (_b05 << 5) | (_b0403 << 3) | (_b0201 << 1)
_imm = functools.reduce(lambda a, b: a | b, map(lambda x: _b08 << x, range(9, 16)), _imm)
_imm = int.from_bytes(struct.Struct('<H').pack(_imm), 'little', signed=True)
return _impl(word, imm=_imm)
def compressed_quadrant_01_opcode_111(word):
# 111 imm[8|4:3] rs1 ′ imm[7:6|2:1|5] 01 C.BNEZ
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.111)
_impl = c_bnez
_b05 = (word >> 2) & 0b1
_b0201 = (word >> 3) & 0b11
_b0706 = (word >> 5) & 0b11
_b0403 = (word >> 10) & 0b11
_b08 = (word >> 12) & 0b1
_imm = (_b08 << 8) | (_b0706 << 6) | (_b05 << 5) | (_b0403 << 3) | (_b0201 << 1)
_imm = functools.reduce(lambda a, b: a | b, map(lambda x: _b08 << x, range(9, 16)), _imm)
_imm = int.from_bytes(struct.Struct('<H').pack(_imm), 'little', signed=True)
return _impl(word, imm=_imm)
def compressed_quadrant_01_rs1_prime_or_rd_prime(word):
# RVC Register Number 000 001 010 011 100 101 110 111
# Integer Register Number x8 x9 x10 x11 x12 x13 x14 x15
# Integer Register ABI Name s0 s1 a0 a1 a2 a3 a4 a5
# Floating-Point Register Number f8 f9 f10 f11 f12 f13 f14 f15
# Floating-Point Register ABI Name fs0 fs1 fa0 fa1 fa2 fa3 fa4 fa5
# Table 16.2: Registers specified by the three-bit rs1 ′, rs2 ′, and rd ′ fields of the CIW, CL, CS, CA,
# and CB formats; see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.98)
#
# see also: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p. 111)
return 8 + ((word >> 7) & 0b111)
def compressed_quadrant_01_rs2_prime(word):
return 8 + ((word >> 2) & 0b111)
def compressed_quadrant_10(word):
# print('compressed_quadrant_10({:04x})'.format(word))
return {
0b000: compressed_quadrant_10_opcode_000,
0b011: compressed_quadrant_10_opcode_011,
0b100: compressed_quadrant_10_opcode_100,
0b111: compressed_quadrant_10_opcode_111,
}.get(compressed_opcode(word), compressed_unimplemented_instruction)(word)
def compressed_quadrant_10_opcode_000(word):
# 000 nzuimm[5] rs1/rd̸=0 nzuimm[4:0] 10 C.SLLI (HINT, rd=0; RV32 NSE, nzuimm[5]=1)
_impl = compressed_unimplemented_instruction
if 0 == compressed_rs1_or_rd(word):
pass
_imm = None
else:
_b05 = (word >> 12) & 0b1
_b0403020100 = (word >> 2) & 0b1_1111
_imm = (_b05 << 5) | _b0403020100
_impl = c_slli
return _impl(word, imm=_imm)
def compressed_quadrant_10_opcode_011(word):
_impl = compressed_unimplemented_instruction
if 0 == compressed_rs1_or_rd(word):
pass
else:
_impl = c_ldsp
return _impl(word)
def compressed_quadrant_10_opcode_100(word):
# print('compressed_quadrant_10_opcode_100()')
_impl = compressed_unimplemented_instruction
_b12 = (word >> 12) & 0b1
if 0 == _b12:
if 0 == compressed_rs2(word):
_impl = c_jr
else:
_impl = c_mv
else:
# 100 1 rs1/rd̸=0 rs2̸=0 10 C.ADD (HINT, rd=0)
if 0 != compressed_rs1_or_rd(word) and 0 != compressed_rs2(word):
_impl = c_add
else:
pass
return _impl(word)
def compressed_quadrant_10_opcode_111(word):
# 111 uimm[5:3|8:6] rs2 10 C.SDSP (RV64/128
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.111)
_impl = c_sdsp
_b080706 = (word >> 7) & 0b111
_b050403 = (word >> 10) & 0b111
_imm = (_b080706 << 6) | (_b050403 << 3)
return _impl(word, imm=_imm)
def compressed_opcode(word):
# print('compressed_opcode({:04x}): -> {}'.format(word, (word & 0b1110_0000_0000_0000) >> 13))
return (word & 0b1110_0000_0000_0000) >> 13
def compressed_rs1_or_rd(word):
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p. 111)
return (word >> 7) & 0b1_1111
def compressed_rs2(word):
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p. 111)
return (word >> 2) & 0b1_1111
#def compressed_rs1_prime_or_rd_prime(word):
# # RVC Register Number 000 001 010 011 100 101 110 111
# # Integer Register Number x8 x9 x10 x11 x12 x13 x14 x15
# # Integer Register ABI Name s0 s1 a0 a1 a2 a3 a4 a5
# # Floating-Point Register Number f8 f9 f10 f11 f12 f13 f14 f15
# # Floating-Point Register ABI Name fs0 fs1 fa0 fa1 fa2 fa3 fa4 fa5
# # Table 16.2: Registers specified by the three-bit rs1 ′, rs2 ′, and rd ′ fields of the CIW, CL, CS, CA,
# # and CB formats; see: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p.98)
# # see also: https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p. 111)
# return (word >> 7) & 0b111
#def compressed_rs2_prime(word):
# # https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p. 111)
# return (word >> 2) & 0b111
def compressed_imm6(word, **kwargs):
# nzimm[5] 00000 nzimm[4:0]
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p. 111)
_b05 = (word & 0b1_0000_0000_0000) >> 12
_b0403020100 = (word & 0b111_1100) >> 2
_retval = _b05 << 5
_retval |= _b0403020100
_retval = functools.reduce(lambda a, b: a | b, map(lambda x: _b05 << x, range(6, 8)), _retval)
return int.from_bytes(struct.Struct('<B').pack(_retval), 'little', **kwargs)
def compressed_imm10(word, **kwargs):
# nzimm[9] 00010 nzimm[4|6|8:7|5]
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p. 111)
_tmp = (word >> 2) & 0b111_1111_1111
_b05 = (_tmp & 0b1) >> 0
_b0807 = (_tmp & 0b110) >> 1
_b06 = (_tmp & 0b1000) >> 3
_b04 = (_tmp & 0b1_0000) >> 4
_b09 = (_tmp & 0b100_0000_0000) >> 10
_retval = _b09 << 9
_retval |= _b0807 << 7
_retval |= _b06 << 6
_retval |= _b05 << 5
_retval |= _b04 << 4
_retval = functools.reduce(lambda a, b: a | b, map(lambda x: _b09 << x, range(10, 16)), _retval)
return int.from_bytes(struct.Struct('<H').pack(_retval), 'little', **kwargs)
def compressed_imm12(word, **kwargs):
# imm[11|4|9:8|10|6|7|3:1|5]
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf, (p. 111)
_tmp = (word & 0b0001_1111_1111_1100) >> 2
_b05 = (_tmp & 0b1) >> 0
_b030201 = (_tmp & 0b1110) >> 1
_b07 = (_tmp & 0b1_0000) >> 4
_b06 = (_tmp & 0b10_0000) >> 5
_b10 = (_tmp & 0b100_0000) >> 6
_b0908 = (_tmp & 0b1_1000_0000) >> 7
_b04 = (_tmp & 0b10_0000_0000) >> 9
_b11 = (_tmp & 0b100_0000_0000) >> 10
_retval = _b11 << 11
_retval |= _b10 << 10
_retval |= _b0908 << 8
_retval |= _b07 << 7
_retval |= _b06 << 6
_retval |= _b05 << 5
_retval |= _b04 << 4
_retval |= _b030201 << 3
_retval = functools.reduce(lambda a, b: a | b, map(lambda x: _b11 << x, range(12, 16)), _retval)
return int.from_bytes(struct.Struct('<H').pack(_retval), 'little', **kwargs)
def uncompressed_illegal_instruction(word, **kwargs):
assert False, 'Illegal instruction ({:08x})!'.format(word)
def decode_uncompressed(word):
return {
0b000_0011: load,
0b000_1111: fence,
0b011_0111: lui,
0b001_0111: auipc,
0b110_1111: jal,
0b110_0111: jalr,
0b010_0011: store,
0b001_0011: i_type,
0b001_1011: i_type,
0b011_1011: r_type,
0b011_0011: r_type,
0b110_0011: b_type,
0b111_0011: system,
}.get(uncompressed_opcode(word), uncompressed_unimplemented_instruction)(word)
def uncompressed_opcode(word):
return (word & 0b111_1111)
def uncompressed_rs1(word):
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p. 130)
return (word >> 15) & 0b1_1111
def uncompressed_rs2(word):
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p. 130)
return (word >> 20) & 0b1_1111
def uncompressed_rd(word):
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p. 130)
return (word >> 7) & 0b1_1111
def uncompressed_i_type_imm12(word, **kwargs):
# imm[11:0] rs1 funct3 rd opcode I-type
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p. 130)
_tmp = (word >> 20) & 0b1111_1111_1111
_b11 = (_tmp & 0b1000_0000_0000) >> 11
_retval = _tmp
_retval = functools.reduce(lambda a, b: a | b, map(lambda x: _b11 << x, range(12, 32)), _retval)
return int.from_bytes(struct.Struct('<I').pack(_retval), 'little', **kwargs)
def uncompressed_imm21(word, **kwargs):
# imm[20|10:1|11|19:12] rrrrr ooooooo
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p. 130)
_b1918171615141312 = (word >> 12) & 0b1111_1111
_b11 = (word >> 20) & 0b1
_b10090807060504030201 = (word >> 21) & 0b11_1111_1111
_b20 = (word >> 31) & 0b1
_retval = _b20 << 20
_retval |= _b1918171615141312 << 12
_retval |= _b11 << 11
_retval |= _b10090807060504030201 << 1
_retval = functools.reduce(lambda a, b: a | b, map(lambda x: _b20 << x, range(21, 32)), _retval)
return int.from_bytes(struct.Struct('<I').pack(_retval), 'little', **kwargs)
def uncompressed_imm32(word, **kwargs):
# imm[31:12] rrrrr ooooooo
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p. 130)
# _retval = word & 0b1111_1111_1111_1111_1111_0000_0000_0000
# return int.from_bytes(struct.Struct('<I').pack(_retval), 'little', **kwargs)
# return int.from_bytes(_retval.to_bytes(4, 'little'), 'little', **kwargs)
return (word >> 12) & 0b1111_1111_1111_1111_1111
def uncompressed_funct7(word):
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p. 16)
return (word >> 25) & 0b111_1111
def uncompressed_funct3(word):
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p. 16)
return (word >> 12) & 0b111
def uncompressed_i_type_shamt(word):
return (word >> 20) & 0b11_1111
def uncompressed_load_imm12(word, **kwargs):
# imm[11:0] rs1 011 rd 0000011 LD
_b11 = (word >> 31) & 0b1
_retval = (word >> 20) & 0b1111_1111_1111
_retval = functools.reduce(lambda a, b: a | b, map(lambda x: _b11 << x, range(12, 32)), _retval)
return int.from_bytes(struct.Struct('<I').pack(_retval), 'little', **kwargs)
def uncompressed_store_imm12(word, **kwargs):
# imm[11:5] rs2 rs1 011 imm[4:0] 0100011 SD
_b0403020100 = (word >> 7) & 0b1_1111
_b100908070605 = (word >> 25) & 0b111_1111
_b11 = (word >> 31) & 0b1
_retval = _b11 << 11
_retval |= _b100908070605 << 5
_retval |= _b0403020100
_retval = functools.reduce(lambda a, b: a | b, map(lambda x: _b11 << x, range(12, 32)), _retval)
return int.from_bytes(struct.Struct('<I').pack(_retval), 'little', **kwargs)
def uncomprssed_b_type_imm13(word, **kwargs):
# imm[12|10:5] rs2 rs1 000 imm[4:1|11] 1100011 BEQ
# imm[12|10:5] rs2 rs1 001 imm[4:1|11] 1100011 BNE
# https://riscv.org/wp-content/uploads/2019/06/riscv-spec.pdf (p. 130)
_b11 = (word >> 7) & 0b1
_b04030201 = (word >> 8) & 0b1111
_b100908070605 = (word >> 25) & 0b11_1111
_b12 = (word >> 31) & 0b1
_retval = _b12 << 12
_retval |= _b11 << 11
_retval |= _b100908070605 << 5
_retval |= _b04030201 << 1
_retval = functools.reduce(lambda a, b: a | b, map(lambda x: _b12 << x, range(13, 32)), _retval)
return int.from_bytes(struct.Struct('<I').pack(_retval), 'little', **kwargs)
def do_decode(buffer, max_insns):
_retval = []
while max_insns > len(_retval) and len(buffer):
_word = int.from_bytes(buffer[:4], 'little')
if 0x3 == _word & 0x3:
if 4 > len(buffer): break
_retval.append(decode_uncompressed(_word))
else:
_word &= 0xffff
_retval.append(decode_compressed(_word))
return _retval
``` |
{
"source": "jhaslam/bc_vaccine_card_decoder",
"score": 3
} |
#### File: jhaslam/bc_vaccine_card_decoder/shc_decode.py
```python
import base64
import json
import zlib
import yaml
def extract_numeric(qr_code: str) -> str:
# Strip off the beginning 'shc:/' from the QR code string, if it is
# present. It is just there to indicate MIME type.
# This produces a "numeric" encoded string
numeric_encoded = qr_code
if qr_code[:5] == 'shc:/':
numeric_encoded = qr_code[5:]
return numeric_encoded
def numeric_to_jws(numeric_encoded: str) -> str:
# Convert a numeric encoded string to a JWS encoded string:
# 1) Split the numeric string into pairs of numbers, each of which
# represents a single JWS character.
# 2) From the decimal value of each of these pairs, add 45
# 3) Convert the result of each decimal value to UTF-8.
pairs = (numeric_encoded[i:i + 2] for i in range(0, len(numeric_encoded), 2))
decimal_vals = (int(val) for val in pairs)
shifted_vals = (val + 45 for val in decimal_vals)
characters = (chr(val) for val in shifted_vals)
return ''.join(characters)
def extract_jws_payload(jws: str) -> str:
# Extracts the payload from a JWS encoded string.
# JWS encoded strings consist of three fields delimited by a dot '.':
# "JWS Protected Header", "JWS Payload", and "JWS Signature"
# The payload portion is encoded with zip and base64 encoding
extracted_payload = jws.split('.')[1]
decoded_payload = base64_decode(extracted_payload)
return decompress(decoded_payload)
def base64_decode(encoded: str) -> bytes:
# JWS encodes binary data using base64URL rather than standard base64
# Padding needs to be recovered or decode will fail
padded = encoded + ('=' * (-len(encoded) % 4))
return base64.urlsafe_b64decode(padded)
def decompress(zipped: bytes) -> str:
# JWS compresses payload data using the "RFC 1951 Deflate" data format
inflated_bytes = zlib.decompress(zipped, wbits=-zlib.MAX_WBITS)
return inflated_bytes.decode("utf-8")
def to_yaml_str(payload) -> str:
# JWS payloads are in JSON.
# Convert this to YAML for pretty-printing purposes
return yaml.dump(payload)
def extract_payload(qr_code: str):
# Main entrypoint to the payload extraction process
numeric = extract_numeric(qr_code)
jws = numeric_to_jws(numeric)
jws_payload = extract_jws_payload(jws)
return json.loads(jws_payload)
def main():
qr_code = input('Paste your QR Code here: ')
payload = extract_payload(qr_code)
pretty_payload = to_yaml_str(payload)
print('\n\nYour decoded Vaccine Card data is as follows:\n\n', pretty_payload)
if __name__ == "__main__":
main()
``` |
{
"source": "jhasmany-jhunnior/F.I.C.C.T.-Proyecto_Calculadora_AB-Python_Flask",
"score": 4
} |
#### File: jhasmany-jhunnior/F.I.C.C.T.-Proyecto_Calculadora_AB-Python_Flask/CArbol.py
```python
from CNodo import Nodo
class Arbol:
def __init__(self):
self.__root = None
'''Getter del atributo root.'''
@property
def root(self):
return self.__root
'''Setter del atributo root.'''
@root.setter
def root(self, nodo):
self.__root = nodo
'''Comprueba si la raiz esta vacia.'''
def Vacio(self):
return self.__root is None
'''Inserta un dato al arbol.'''
def Insertar(self, dat):
if self.Vacio() :
self.root = Nodo(dat)
else:
self.__Insert(dat, self.root)
'''Metodo llamado por Insetar(), que recorre recursivamente
el arbol hasta insertar el nuevo dato.'''
def __Insert(self, dat , nodo):
if nodo is None:
nodo = Nodo(dat)
else:
if dat < nodo.dato:
if nodo.izq is None:
nodo.izq = Nodo(dat)
else:
self.__Insert(dat, nodo.izq)
elif dat > nodo.dato:
if nodo.der is None:
nodo.der = Nodo(dat)
else:
self.__Insert(dat, nodo.der)
'''Recorrido InOrden.'''
def InOrden(self, raiz):
if raiz is not None :
self.InOrden(raiz.izq)
print(raiz.dato, end=" ")
self.InOrden(raiz.der)
'''Recorrido PreOrden.'''
def PreOrden(self, raiz):
if raiz is not None:
print(raiz.dato, end=" ")
self.PreOrden(raiz.izq)
self.PreOrden(raiz.der)
'''Recorrido PostOrden'''
def PostOrden(self, raiz):
if raiz is not None:
self.PostOrden(raiz.izq)
self.PostOrden(raiz.der)
print(raiz.dato)
if __name__ == "__main__":
Raiz = Arbol()
Raiz.Insertar(21)
Raiz.Insertar(13)
Raiz.Insertar(15)
Raiz.Insertar(10)
Raiz.Insertar(18)
Raiz.Insertar(25)
Raiz.Insertar(40)
Raiz.InOrden(Raiz.root)
``` |
{
"source": "jhasse/tudi",
"score": 3
} |
#### File: jhasse/tudi/level13.py
```python
from star import Star
from levelbase import LevelBase
import jngl
class Level(LevelBase):
def __init__(self):
LevelBase.__init__(self)
self.stars = [Star(450, 420), Star(450, 360), Star(370, 360), Star(530, 360), Star(370, 280), Star(530, 280), Star(290, 280), Star(610, 280),
Star(290, 200), Star(610, 200), Star(210, 200), Star(690, 200),
Star(370, 200, "red"), Star(530, 200, "red"), Star(450, 280, "red"),
Star(610, 360, "red"), Star(290, 360, "red")]
```
#### File: jhasse/tudi/level18.py
```python
from star import Star
from levelbase import LevelBase
from box import Box
import jngl
class Level(LevelBase):
def __init__(self):
LevelBase.__init__(self)
self.stars = []
for y in range(160, 450, 80):
self.stars.append(Star(730, y, "red"))
for x in range(380, 700, 80):
self.stars.append(Star(x, 100))
self.stars.append(Star(x, 180))
self.boxes = [Box(280, 250, 380, 50)]
def drawHints(self):
jngl.print("Jump onto this box.", 350, 320)
jngl.print("Well done!", 400, -50)
```
#### File: jhasse/tudi/level21.py
```python
from star import Star
from levelbase import LevelBase
import jngl
class Level(LevelBase):
def __init__(self):
LevelBase.__init__(self)
self.stars = [Star(100, 200, "green" ), Star(200, 200, "red" ), Star(300, 200, "green" ), Star(400, 200, "red" ), Star(500, 200, "red" ), Star(600, 200, "red" ), Star(700, 200, "green"),
Star(100, 300, "green" ), Star(200, 300, "green"),
Star(100, 400, "green"), Star(200, 400, "red" ), Star(300, 400, "green"), Star(400, 400, "red" ), Star(500, 400, "red" ), Star(600, 400, "red" ), Star(700, 400, "red" )]
for star in self.stars:
star.x += 30
def drawHints(self):
jngl.print("Green Stars are worth 5 points.", 310, 290)
```
#### File: jhasse/tudi/level26.py
```python
from star import Star
from levelbase import LevelBase
from box import Box
import jngl
class Level(LevelBase):
def __init__(self):
LevelBase.__init__(self)
self.stars = [ Star(720, 190), Star(60, -180), Star(300, -1000, "blue"), Star(380, -1000, "blue"), Star(460, -1000, "blue"),
Star(750, -510, "red")]
self.boxes = [ Box(770, -400, 20, 860), Box(700, 240, 70, 20), Box(400, 5, 70, 20), Box(0, -120, 250, 20), Box(0, -320, 140, 20),
Box(100, -520, 600, 20), Box(100, -720, 600, 20), Box(770, -1500, 20, 920) ]
```
#### File: jhasse/tudi/level3.py
```python
from star import Star
from levelbase import LevelBase
import jngl
class Level(LevelBase):
def __init__(self):
LevelBase.__init__(self)
self.stars = []
for x in range(170, 770, 160):
for y in range(250, 430, 160):
self.stars.append(Star(x, y, "blue"))
def drawHints(self):
jngl.print("Blue stars are worth 10 points.", 180, 170)
jngl.print("You can press M to turn off the music.", 160, 325)
``` |
{
"source": "jhauberg-archived/cards-deep-django",
"score": 3
} |
#### File: cards-deep-django/core/rules.py
```python
import logging
logger = logging.getLogger(__name__)
from models import Session, Card, CardDetail, Stack
from models import CARD_KIND_WEAPON, CARD_KIND_POTION, CARD_KIND_MONSTER, CARD_KIND_SCRAP, CARD_KIND_TREASURE
import random
HEALTH_CAPACITY = 20
REQUIRED_TURNS_BEFORE_SKIPPING = 5
ROOM_CAPACITY = 5
TREASURE_CAPACITY = 10
FORGE_CAPACITY = 10
DISCARD_CAPACITY = 10
def roll(min, max):
"""
Returns a random number between `min` and `max`.
"""
return random.randint(min, max)
def get_random_card_id_in_value_range(min, max, offset):
"""
Randomly picks a card ranged between `min` and `max` from a given offset.
The offset determines the type of card.
"""
card_id = roll(
min + offset,
max + offset)
return card_id
def get_random_weapon_id_in_value_range(min, max):
if min < 2 or max > 10:
return None
return get_random_card_id_in_value_range(min, max, 1)
def get_random_potion_id_in_value_range(min, max):
if min < 2 or max > 10:
return None
return get_random_card_id_in_value_range(min, max, 10)
def get_random_monster_id_in_value_range(min, max):
if min < 2 or max > 14:
return None
return get_random_card_id_in_value_range(min, max, 19)
def start(player):
"""
Attempts creating a new game session for the given player.
"""
if not player:
return None
# Initialize all the stacks.
room_stack = Stack()
room_stack.save()
you_stack = Stack()
you_stack.save()
equipment_stack = Stack()
equipment_stack.save()
forge_stack = Stack()
forge_stack.save()
treasure_stack = Stack()
treasure_stack.save()
discard_stack = Stack()
discard_stack.save()
# Begin a new session.
session = Session(
health=HEALTH_CAPACITY,
# Important to note that a session has to be tied to a player. Same goes for
# cards and stacks; they must, ultimately, be tied to a session. Otherwise
# it would be possible to move cards between sessions.
belongs_to_player=player,
room_stack=room_stack,
you_stack=you_stack,
equipment_stack=equipment_stack,
forge_stack=forge_stack,
treasure_stack=treasure_stack,
discard_stack=discard_stack
)
session.save()
# Draw the first 5 cards.
initial_room_cards = draw(session, ROOM_CAPACITY)
# Put the initial cards in place.
room_stack.push_many(initial_room_cards)
# If everything went as expected, activate the session by hooking it up to the player.
player.active_session = session
player.save()
return session
def draw_single(session, properties=None):
"""
Attempts drawing a single card.
Can optionally be given specific properties, determined randomly otherwise.
"""
if not session:
return None
card_should_be_special = False
if properties is None:
card_should_be_beneficial = roll(0, 100) >= 60 # 40 chance of not being a monster card
card_should_be_special = roll(0, 100) >= 95 # 5% chance of being special
details_id = None
if card_should_be_beneficial:
luck = roll(0, 100)
weapon_range = range(0, 45)
weapon_part_range = range(45, 75)
potion_range = range(75, 90)
treasure_range = range(90, 100)
if luck in weapon_part_range:
# Weapon Part
details_id = 1
# Mechanic not implemented yet.
card_should_be_special = False
elif luck in treasure_range:
# Treasure
details_id = 2
# Mechanic not implemented yet.
card_should_be_special = False
elif luck in weapon_range:
# Weapon (2-10)
details_id = get_random_weapon_id_in_value_range(2, 10)
elif luck in potion_range:
# Potion (2-10)
details_id = get_random_potion_id_in_value_range(2, 10)
# Mechanic not implemented yet.
card_should_be_special = False
else:
# Monster (2-14)
details_id = get_random_monster_id_in_value_range(2, 14)
# Mechanic not implemented yet.
card_should_be_special = False
if details_id is None:
return None
try:
properties = CardDetail.objects.get(pk=details_id)
except CardDetail.DoesNotExist:
return None
try:
card = Card(
belongs_to_session=session,
details=properties,
is_special=card_should_be_special
)
card.save()
except:
return None
try:
session.belongs_to_player.statistics.cards_drawn += 1
session.belongs_to_player.statistics.save()
except:
pass
return card
def draw(session, amount):
"""
Attempts drawing a specific amount of cards.
"""
if not session:
return None
if amount <= 0:
return None
cards = []
for i in range(0, amount):
card = draw_single(session)
if card is not None:
cards.append(card)
return cards
def can_activate_stack(session, stack):
"""
Determines whether a stack can be activated in its current state.
"""
if not session or session.is_lost() or not stack:
return False
# Assuming an empty stack can never be activated.
if stack.is_empty():
return False
if stack == session.room_stack:
# The current room can never be activated.
return False
if stack == session.discard_stack:
# The discarded stack can never be activated.
return False
if stack == session.equipment_stack or stack == session.you_stack:
# No special rules for weapons/monsters.
pass
if stack == session.forge_stack:
all_forgeable_cards = stack.all_cards()
if all_forgeable_cards:
amount_of_forgeable_cards = len(all_forgeable_cards)
if amount_of_forgeable_cards < 2:
# The forge stack can only be activated when at least 2 scrap cards are placed here.
return False
# todo: should forged cards always be special? (this means you can always override the current stack! potential game changer)
if not session.equipment_stack.is_empty():
return False
return True
def activate_stack(session, stack):
"""
Attempts activating/clearing a stack.
"""
if not session or session.is_lost() or not stack:
return False
if not can_activate_stack(session, stack):
return False
if stack == session.equipment_stack:
discard_many(session, session.equipment_stack.all_cards())
monster_cards = session.you_stack.all_cards()
monster_cards_discarded = discard_many(session, monster_cards)
score = (monster_cards_discarded * monster_cards_discarded)
if session.score_multiplier > 0:
score_percentage_multiplier = float(session.score_multiplier) / TREASURE_CAPACITY
score_bonus = score * (1 + score_percentage_multiplier)
score += score_bonus
session.score += score
session.score_multiplier = 0
session.save()
if stack == session.treasure_stack:
treasure_cards = session.treasure_stack.all_cards()
treasure_cards_discarded = discard_many(session, treasure_cards)
session.score_multiplier = treasure_cards_discarded
session.save()
if stack == session.forge_stack:
# Draw a new weapon card that is valued depending on how many cards were spent.
# Attempt discarding all cards that were spent creating a weapon.
value = discard_many(session, session.forge_stack.all_cards())
if value <= 0:
return False
details_id = get_random_weapon_id_in_value_range(value, value)
if details_id is None:
return False
try:
properties = CardDetail.objects.get(pk=details_id)
except CardDetail.DoesNotExist:
return False
# Draw the actual card, given the specific properties determined previously.
weapon_card = draw_single(session, properties)
# Attempt placing the new weapon on the equipment stack. Keep in mind that it is assumed
# that the equipment stack is empty when reaching this point.
did_equip_weapon_card = session.equipment_stack.push(weapon_card)
if not did_equip_weapon_card:
logger.error('boooooo!')
return False
return True
def can_activate_card(session, card):
"""
Determines whether a card has properties that allow it to be activated.
"""
if not session or session.is_lost() or not card:
return False
if card.details.kind is CARD_KIND_POTION:
if card.stack != session.you_stack:
# Can only be activated when placed on the You stack.
return False
if card.details.kind is CARD_KIND_MONSTER:
if card.stack != session.you_stack:
# Can only be activated when placed on the You stack.
return False
return True
def activate_card(session, card):
"""
Attempts activating a card.
This usually occurs when a card has been successfully moved from the current room.
"""
if not session or session.is_lost() or not card:
return False
if not can_activate_card(session, card):
return False
if card.details.kind is CARD_KIND_POTION:
restored_health = card.details.value
current_health = session.health
current_health += restored_health
if current_health > HEALTH_CAPACITY:
current_health = HEALTH_CAPACITY
try:
session.health = current_health
session.save()
except:
return False
discard(session, card)
if card.details.kind is CARD_KIND_MONSTER:
most_recently_played_weapon_card = session.equipment_stack.top()
if most_recently_played_weapon_card and most_recently_played_weapon_card.is_special:
try:
# Disable special status as soon as a monster has been placed.
most_recently_played_weapon_card.is_special = False
most_recently_played_weapon_card.save()
except:
return False
damage = card.details.value
if damage:
if most_recently_played_weapon_card:
damage -= most_recently_played_weapon_card.details.value
if damage > 0:
try:
new_health = session.health - damage
if new_health <= 0:
new_health = 0
session.health = new_health
session.save()
except:
return False
if not most_recently_played_weapon_card:
# Monsters only stack if player has a weapon equipped
session.score += 1
session.save()
discard(session, card)
return True
def can_move(session, card, to_stack):
"""
Determines whether a card can be moved to a given stack.
"""
if (not session or session.is_lost()
or not card
or not to_stack):
return False
if to_stack == session.room_stack:
# you can't move cards to the room...
logger.error(' * card can not be moved to the room!')
return False
if to_stack == session.treasure_stack:
if card.details.kind is not CARD_KIND_TREASURE:
# Not a treasure card, bail out...
logger.error(' * only treasure cards can be moved here!')
return False
if len(session.treasure_stack.all_cards()) >= TREASURE_CAPACITY:
# Treasure stack already holds maximum amount of treasure
logger.error(' * max treasure reached!')
return False
if to_stack == session.forge_stack:
if card.details.kind is not CARD_KIND_SCRAP:
# Not a scrap card, bail out...
logger.error(' * only scrap cards can be moved here!')
return False
if len(session.forge_stack.all_cards()) >= FORGE_CAPACITY:
# Forge stack already holds maximum amount of scraps
logger.error(' * max scraps reached!')
return False
if to_stack == session.equipment_stack:
if card.details.kind is not CARD_KIND_WEAPON:
# Not a weapon card, bail out...
logger.error(' * only weapon cards can be moved here!')
return False
most_recently_played_weapon_card = session.equipment_stack.top()
if most_recently_played_weapon_card is not None:
if not card.is_special:
# Only special cards can be placed on top of the previous weapon as a score multiplier.
logger.error(' * only special cards can do this!')
return False
if to_stack == session.you_stack:
if card.details.kind is not CARD_KIND_MONSTER and card.details.kind is not CARD_KIND_POTION:
# Only monsters or potions can be placed here
logger.error(' * only monster or potion cards can be moved here!')
return False
if card.details.kind is CARD_KIND_MONSTER:
# Card is a monster
most_recently_played_monster_card = session.you_stack.top()
if most_recently_played_monster_card is not None:
if most_recently_played_monster_card.details.value <= card.details.value:
most_recently_played_weapon_card = session.equipment_stack.top()
if most_recently_played_weapon_card and most_recently_played_weapon_card.is_special:
# Basically, you can only place monsters of higher value on other monsters if
# the current weapon is special.
return True
return False
return True
def move(session, card, to_stack):
"""
Attempts moving a card into a given stack.
"""
if not session or session.is_lost():
return False
if not card or not to_stack:
return False
if not card.can_be_moved(to_stack):
logger.error(' * card can not be moved!')
return False
if not can_move(session, card, to_stack):
logger.error(' * could not allow moving card!')
return False
from_stack = card.stack
if not to_stack.push(card):
logger.error(' * could not push card on stack!')
return False
if to_stack != session.discard_stack:
if not activate_card(session, card):
logger.error(' * could not activate card!')
return False
if from_stack == session.room_stack:
if session.amount_of_cards_moved_since_last_skip != -1:
# If set to -1 that means player has not skipped yet, so we don't need to track this.
try:
session.amount_of_cards_moved_since_last_skip += 1
session.save()
except:
logger.error(' * could not increment "cards_moved_since_last_skip"!')
return False
try:
new_card = draw_single(session)
except:
logger.error(' * could not draw new card to the room!')
return False
try:
session.room_stack.push(new_card)
except:
logger.error(' * could not push new card to the room!')
return False
return True
def discard(session, card):
"""
Discards a card from a game session. The 10 most recently discarded cards are stored.
"""
if not session:
return False
logger.info(' trying to move %s to "discard_stack"' % (card))
if move(session, card, session.discard_stack):
logger.info(' success!')
if session.discard_stack.count() > DISCARD_CAPACITY:
oldest_discarded_card = session.discard_stack.bottom()
if not session.discard_stack.pop_specific(oldest_discarded_card):
return False
return True
return False
def discard_many(session, cards):
"""
Attempts discarding several cards at once.
"""
if not session or not cards:
return False
amount_discarded = 0
for card in cards:
logger.info('discarding %s' % (card))
if discard(session, card):
logger.info(' success!')
amount_discarded += 1
else:
logger.info(' fail!')
return amount_discarded
def can_skip_on_next_move(session):
"""
Determines whether a game can have its current room skipped on the following turn.
"""
if not session or session.is_lost():
return False
if (session.amount_of_cards_moved_since_last_skip == REQUIRED_TURNS_BEFORE_SKIPPING - 1):
return True
return False
def can_skip(session):
"""
Determines whether a game can have its current room skipped or not.
"""
if not session or session.is_lost():
return False
if (session.amount_of_cards_moved_since_last_skip == -1
or session.amount_of_cards_moved_since_last_skip >= REQUIRED_TURNS_BEFORE_SKIPPING):
return True
return False
def skip(session):
"""
Attempts skipping the current room.
"""
if not session or session.is_lost():
return False
if can_skip(session):
room_cards = session.room_stack.all_cards()
logger.info('skipping %d cards...' % (len(room_cards)))
# Note that a new card is drawn into the room automatically for each successful discard
amount_discarded = discard_many(session, room_cards)
logger.info('discarded %d cards!' % (amount_discarded))
try:
session.amount_of_cards_moved_since_last_skip = 0
session.save()
except:
logger.error(' * could not save session properly!')
return False
return True
return False
``` |
{
"source": "jhauberg-archived/pydefs",
"score": 2
} |
#### File: jhauberg-archived/pydefs/setup.py
```python
import sys
import re
from setuptools import setup
def determine_version_or_exit() -> str:
""" Determine version identifier or exit the program. """
if sys.version_info < (3, 5):
sys.exit('Python 3.5 or newer is required for pydefs')
with open('pydefs/version.py') as file:
version_contents = file.read()
version_pattern = r'^__version__ = [\'"]([^\'"]*)[\'"]'
version_match = re.search(version_pattern, version_contents, re.M)
if version_match:
return version_match.group(1)
else:
sys.exit('Version could not be determined')
VERSION_IDENTIFIER = determine_version_or_exit()
setup(
name='pydefs',
version=VERSION_IDENTIFIER,
description='List functions and classes from Python modules',
long_description=open('README.md').read(),
url='https://github.com/jhauberg/pydefs',
download_url='https://github.com/jhauberg/pydefs/archive/master.zip',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['pydefs'],
include_package_data=True,
platforms='any',
install_requires=[
'docopt==0.6.2'
],
entry_points={
'console_scripts': [
'pydefs = pydefs.__main__:main',
],
}
)
``` |
{
"source": "jhauberg-archived/words.py",
"score": 4
} |
#### File: jhauberg-archived/words.py/words.py
```python
import sys
import argparse
import re
import random
import json
def from_rule(rule, parameters):
"""
Generates a string that matches the specified rules.
"""
if not rule or not parameters:
return None
# Find all parameters that are wrapped in curly braces (e.g. {foo})
matches = re.finditer('\\{(.*?)\\}', rule)
result = rule
for match in matches:
parameter = match.group(1)
if parameter not in parameters:
continue
possibilities = parameters[parameter]
next_possibility_index = random.randint(0, len(possibilities) - 1)
pick = possibilities[next_possibility_index]
# Remove the braces surrounding the parameter
result = result.replace('{', '')
result = result.replace('}', '')
# Insert the word that was picked
result = result.replace(parameter, pick, 1)
return result
def from_rules(ruleset, max_amount):
"""
Generates up to `max_amount` of strings from the rules
defined in the file `ruleset`.
It is not guaranteed to reach `max_amount` of strings, in case the ruleset
does not contain enough unique combinations.
In such a case, all possible combinations will be created.
"""
input = open(ruleset, 'r')
try:
json_data = json.load(input)
except ValueError:
json_data = None
input.close()
if json_data is None:
raise Exception('The ruleset contains invalid JSON data.')
rules = json_data
if 'formats' not in rules:
raise Exception('The ruleset must contain a rule definition '
'named `formats`.')
if 'parameters' not in rules:
raise Exception('The ruleset must contain a parameter definitions '
'named `parameters`.')
pairings = rules['formats']
parameters = rules['parameters']
results = []
if len(pairings) == 0 or len(parameters) == 0:
# Bail out since there's no rules defined
return results
generated_amount = 0
retries = 0
while generated_amount < max_amount:
# Keep going until we've generated as close to max_amount as possible
next_rule_index = random.randint(0, len(pairings) - 1)
rule = pairings[next_rule_index]
result = from_rule(rule, parameters)
if result is not None and result in results:
# Result was a duplicate, so retry...
retries += 1
# This could definitely be improved :)
if retries == 100:
break
continue
results.append(result)
generated_amount += 1
return results
def main(argv):
parser = argparse.ArgumentParser(
description='Generate strings from simple rules')
parser.add_argument('-f', '--filename',
help='The path to a JSON file containing rules',
required=True)
parser.add_argument('-n', '--amount',
type=int,
default=5,
help='The amount of strings to generate',
required=False)
args = vars(parser.parse_args())
rules = args['filename']
amount = args['amount']
if amount <= 0:
amount = 1
results = from_rules(rules, amount)
print json.dumps(
results,
sort_keys=True,
indent=2)
if __name__ == "__main__":
main(sys.argv)
``` |
{
"source": "jhauberg/cards.py",
"score": 2
} |
#### File: cards.py/cards/template.py
```python
import os
import re
import datetime
from typing import List
from cards.templatefield import TemplateField, fields
from cards.column import (
Row, get_column_contentd, get_definition_content, get_definition_contentd
)
from cards.resource import get_resource_path, is_image, supported_image_types, transformed_image_paths
from cards.util import first, dequote, get_line_number, get_padded_string
from cards.warning import WarningDisplay, WarningContext
from cards.constants import TemplateFields, TemplateFieldDescriptors, DateField
from cards.version import __version__
class Template: # pylint: disable=too-few-public-methods
""" Represents a template. """
def __init__(self, content: str, path: str=None):
self.content = content
self.path = path
def __iter__(self):
return fields(self.content)
def __str__(self):
truncated_content = (('\'' + self.content[:50] + '\'…')
if len(self.content) > 50
else self.content)
truncated_path = (('…\'' + self.path[-25:] + '\'')
if self.path is not None and len(self.path) > 25
else self.path)
return ('{0} ({1})'.format(truncated_content, truncated_path)
if truncated_path is not None
else truncated_content)
@staticmethod
def from_path(path: str, relative_to_path: str=None) -> ('Template', bool):
content, not_found, absolute_path = template_from_path(
path, relative_to_path)
return Template(content, absolute_path), not_found
class TemplateRenderData: # pylint: disable=too-few-public-methods
""" Provides additional data about the rendering of a template. """
def __init__(self,
image_paths: set=None,
unknown_fields: set=None,
unused_fields: set=None,
referenced_definitions: set=None,
embedded_styles: dict=None):
self.image_paths = image_paths
self.unknown_fields = unknown_fields
self.unused_fields = unused_fields
self.referenced_definitions = referenced_definitions
self.embedded_styles = embedded_styles
def strip_styles(template: Template) -> str:
""" Strip and return any embedded <style></style> content from a template. """
pattern = r'<style.*?>(.+?)</style>'
stripped_styles = ''
search = re.compile(pattern, re.DOTALL)
# find all style matches and extract embedded styles
for style_match in re.finditer(pattern, template.content, re.DOTALL):
# note that we strip the entire style- not the inner content
style = style_match.group(0).strip()
# separating each style block for good measure
stripped_styles = stripped_styles + '\n' + style if len(stripped_styles) > 0 else style
# finally remove all style matches
# note that this removes the <style></style> tags too
template.content = re.sub(search, '', template.content).strip()
# make sure we keep it clean- no unnecessary newlines or excess whitespace
stripped_styles = stripped_styles.strip()
template_field_names = list((field.name for field in fields(stripped_styles)))
if len(template_field_names) > 0:
context = template.path
# if there's any fields in the styles, display a warning about it
WarningDisplay.fields_in_styles(
WarningContext(context), template_field_names)
return stripped_styles
def template_from_path(template_path: str,
relative_to_path: str=None) -> (str, bool, str):
""" Return the template contents of the given path, if possible.
If specified, path is made relative to another path.
"""
template_content = None
template_not_found = False
if template_path is not None and len(template_path) > 0:
if not os.path.isabs(template_path):
# the path is not an absolute path; assume that it's located relative to the data
if relative_to_path is not None:
template_path = os.path.join(
os.path.dirname(relative_to_path),
template_path)
try:
with open(template_path) as template_file:
template_content = template_file.read().strip()
except IOError:
template_not_found = True
else:
template_not_found = True
return template_content, template_not_found, template_path
def image_size(image_path: str, from_context: str) -> (int, int):
""" Return the size specified by the context of an image field. """
explicit_width = None
# get each size specification separately (removing blanks)
size_components = list(filter(None, from_context.split('x')))
if len(size_components) > 0:
width_specification = size_components[0]
try:
explicit_width = int(width_specification)
except ValueError:
explicit_width = None
WarningDisplay.unknown_size_specification(
WarningContext(image_path), from_context)
else:
if explicit_width < 0:
WarningDisplay.invalid_width_specification(
WarningContext(image_path), explicit_width)
explicit_width = None
if len(size_components) > 1:
height_specification = size_components[1]
try:
explicit_height = int(height_specification)
except ValueError:
explicit_height = None
WarningDisplay.unknown_size_specification(
WarningContext(image_path), from_context)
else:
if explicit_height < 0:
WarningDisplay.invalid_height_specification(
WarningContext(image_path), explicit_height)
explicit_height = None
else:
# default to a squared size using the width specification
explicit_height = explicit_width
return explicit_width, explicit_height
def image(field: TemplateField) -> (str, str):
""" Transform an image field into an image tag, unless field specifies otherwise. """
image_path = field.name
no_transform = False
width = None
height = None
if field.context is not None:
if field.context == TemplateFieldDescriptors.COPY_ONLY:
no_transform = True
else:
width, height = image_size(image_path, field.context)
if not is_image(image_path):
# the file is not an image; or something has gone wrong
if no_transform or (width is not None or height is not None):
# if either of these attributes exist, then it likely was supposed to be an image
# but we could not resolve it properly- so warn about it
WarningDisplay.unresolved_image_reference_error(
image_reference=image_path,
closest_resolution_value=field.name)
return None, None # no image, no tag
image_name = os.path.basename(image_path)
# transform the path so that it is relative within the output directory,
# this way we can keep every resource contained
resource_path = get_resource_path(image_name)
if no_transform:
return image_path, resource_path # image path in resources, no tag
return image_path, get_image_tag(resource_path, width, height)
def get_image_tag(image_path: str,
width: int=None,
height: int=None) -> str:
""" Return a HTML-compliant image tag using the specified image path. """
if width is not None and height is not None:
# make a tag with the image at the specified dimensions
return '<img src="{0}" width="{1}" height="{2}">'.format(image_path, width, height)
# make a tag with the image at its intrinsic size
return '<img src="{0}">'.format(image_path)
def fill_image_fields(template: Template) -> List[str]:
""" Populate all image fields in the template.
An image field provides a way of transforming an image path into a HTML-compliant image tag.
An image field should look like this: '{{ my-image.png 16x16 }}'.
"""
image_paths = []
supported_images_pattern = '\\' + '|\\'.join(supported_image_types())
def next_image_field() -> TemplateField:
""" Return the next probable image field. """
return first(fields(template.content, with_name_like=supported_images_pattern))
field = next_image_field()
while field is not None:
# at this point we don't know that it's actually an image field - we only know that it's
# a template field, so we just attempt to create an <img> tag from the field.
# if it turns out to not be an image, we just ignore the field entirely and proceed
image_path, image_tag = image(field)
if image_path is not None:
# we at least discovered that the field was pointing to an image,
# so in the end it needs to be copied
image_paths.append(image_path)
if image_tag is not None:
# the field was transformed to either an <img> tag, or just the path (for copying only)
fill(field, image_tag, template)
field = next_image_field()
return image_paths
def fill(field: TemplateField,
field_value: str,
template: Template,
indenting: bool=False) -> None:
""" Populate a single template field in the template. """
start_index = field.indices.start
end_index = field.indices.stop
if ((start_index < 0 or start_index > len(template.content)) or
(end_index < 0 or end_index > len(template.content))):
raise ValueError('Template field \'{0}\' out of range ({1}-{2}).'
.format(field.inner_content, start_index, end_index))
if indenting:
field_value = get_padded_string(
field_value, template.content, field.indices.start)
template.content = template.content[:start_index] + field_value + template.content[end_index:]
def fill_each(field_inner_content: str,
field_value: str,
template: Template,
indenting: bool=False) -> int:
""" Populate all matching template fields in the template.
Matches are determined by comparing the inner content of each field with
the provided content.
"""
# make sure that we have a sane value
field_value = field_value if field_value is not None else ''
# template fields are always represented by wrapping {{ }}'s,
# however, both {{my_field}} and {{ my_field }} should be valid;
# i.e. any leading or trailing whitespace should simply be ignored
field_search = r'{{\s*' + field_inner_content + r'\s*}}'
# find any occurences of the field (case-sensitive)
search = re.compile(field_search)
if indenting:
match = search.search(template.content)
if match is not None:
# we only need the start index
start_index = match.span()[0]
field_value = get_padded_string(field_value, template.content, start_index)
# finally replace any found occurences of the template field with its value
template.content, occurences = search.subn(field_value, template.content)
return occurences
def fill_date_fields(template: Template,
date: datetime=DateField.TODAY) -> None:
""" Populate all date fields in the template.
A 'date' field provides an easy way of putting the current date into a template.
A date field uses built-in Python date formats, and should look like this:
'{{ date }}' - using default formatting
'{{ date '%d, %b %Y' }}' - using custom formatting
See all supported format identifiers here http://strftime.org
"""
def next_date_field():
""" Return the next probable date field. """
return first(fields(template.content, with_name_like='date'))
field = next_date_field()
while field is not None:
# default date format: January 04, 2018
# note that supported format specifiers may be different depending on platform (e.g. Windows or MacOS),
# and, as such, neat formatting like using %-d instead of %d to remove zero-padding is not viable
date_format = '%B %d, %Y'
if field.context is not None:
# a date field can have a custom format
custom_date_format = dequote(field.context).strip()
if len(custom_date_format) > 0:
# if found, we'll use that and let date.strftime handle it
date_format = custom_date_format
formatted_date = date.strftime(date_format)
# populate the include field with the content; or blank if unresolved
fill(field, formatted_date, template)
field = next_date_field()
def fill_include_fields(template: Template) -> dict:
""" Populate all include fields in the template.
An 'include' field provides a way of putting re-usable template content into a
separate file, and including it in place of the field.
An include field should look like this:
'{{ include 'path/to/file.html' }}'
"""
original_template_content = template.content
def next_include_field():
""" Return the next probable include/inline field. """
return first(fields(template.content, with_name_like='include|inline'))
field = next_include_field()
stripped_styles = {}
while field is not None:
is_include_command = field.name == TemplateFields.INCLUDE
is_inline_command = field.name == TemplateFields.INLINE
# default to blank
include_content = ''
include_path = None
if field.context is not None:
# the field should contain a path
include_path = dequote(field.context).strip()
if include_path is not None and len(include_path) > 0:
if not os.path.isabs(include_path):
# it's not an absolute path, so we should make it a relative path
if template.path is not None:
# make the path relative to the path of the containing template
include_path = os.path.join(
os.path.dirname(template.path), include_path)
if os.path.isfile(include_path):
# we've ended up with a path that can be opened
with open(include_path) as include_file:
if is_include_command:
# open it and read in the entire contents as is
include_content = include_file.read().strip()
elif is_inline_command:
# read each line
for line in include_file.readlines():
# stripping excess whitespace and newline in the process
include_content += line.strip()
else:
WarningDisplay.included_file_not_found_error(
WarningContext(os.path.basename(template.path)), include_path)
include_content = '<strong><included file not found></strong>'
stripped_template = Template(include_content)
stripped_styles[include_path] = strip_styles(stripped_template)
include_content = stripped_template.content
else:
WarningDisplay.include_should_specify_file(
WarningContext('{0}:{1}'.format(
os.path.basename(template.path),
# todo: the line number could potentially be incorrect, as we might not be going
# through the original template anymore- the lineno can only serve as a hint
get_line_number(field.indices.start, original_template_content))),
is_inline=is_inline_command)
# populate the include field with the content; or blank if unresolved
fill(field, include_content, template, indenting=is_include_command)
field = next_include_field()
return stripped_styles
def fill_partial_definition(definition: str,
value: str,
template: Template) -> int:
""" Populate any partial definitions in a template.
A partial definition is a definition that is included in another template field;
e.g. {{ my_column my_partial_definition }}, or {{ my_partial_definition 16x16 }}.
Populating a partial definition is essentially just replacing the definition key with
its resolved value, but otherwise leaving the field as it was.
For example, {{ my_column my_partial_definition }} would become {{ my_column some_value }}.
"""
partial_definition_occurences = 0
# only match as a partial definition if it is isolated by whitespace (or {{}}'s),
# otherwise it might just be part of something else;
# for example, the definition 'monster' should not match {{ path/to/monster.svg 16x16 }}
# note that this pattern actually has a limitation that it won't match more than one hit
# in a single field, so e.g. {{ partial partial }} would only match the first
pattern = r'(?:^|\s|{{)(' + definition + r')(?:$|\s|}})'
def next_partial_definition_field():
""" Return the next field likely to contain a partial definition. """
return first(fields(
template.content,
with_name_like=pattern,
with_context_like=pattern,
strictly_matching=False)) # match either name or context, or both
partial_definition_field = next_partial_definition_field()
while partial_definition_field is not None:
name = partial_definition_field.name
context = partial_definition_field.context
if name is not None:
name = re.sub(pattern, value, name)
if context is not None:
context = re.sub(pattern, value, context)
# essentially replace the field with a new and transformed field where the
# partial definition is resolved and populated
fill(partial_definition_field, str(TemplateField(name, context)), template)
partial_definition_occurences += 1
# keep searching for more matches
partial_definition_field = next_partial_definition_field()
return partial_definition_occurences
def fill_definitions(definitions: dict,
template: Template) -> set:
""" Populate all definition fields in the template. """
referenced_definitions = []
resolved_definitions = {}
# first resolve definitions and populate any definite definition fields (e.g. not partials)
for definition in definitions:
# note that this is an un-optimized solution; it loops through each definition, even if
# that particular definition is not even used- AND it loops again after this one
# recursively resolve the content of the definition
resolved_definition_value, resolution_data = get_definition_contentd(
definition, in_definitions=definitions,
content_resolver=resolve_column_content, field_resolver=resolve_column_field)
# we can save this for the partial pass coming up, to avoid having to resolve again
resolved_definitions[definition] = resolved_definition_value
# fill any definite occurences of the definition (e.g. '{{ my_definition }}')
definite_occurences = fill_each(definition, resolved_definition_value, template)
if definite_occurences > 0:
# the definition was used somewhere, so flag it as referenced
referenced_definitions.append(definition)
# and also flag any definitions referenced during the resolution of the definition
referenced_definitions.extend(
list(resolution_data.definition_references))
# then populate any partial definitions using the previously resolved definitions
for definition in definitions:
# we need this second loop, because a later definition might resolve to contain a partial
# definition that the loop already went through; this second loop solves that problem
partial_occurences = fill_partial_definition(
definition, resolved_definitions[definition], template)
if partial_occurences > 0:
# the definition was used somewhere, so flag it as referenced
referenced_definitions.append(definition)
return set(referenced_definitions)
def resolve_column_content(content, in_data_path) -> str:
""" Return content where any include, empty and date fields have been resolved.
This resolver function is run when starting the resolution of the content of a column.
"""
template = Template(content, path=in_data_path)
# fill any include fields before doing anything else
fill_include_fields(template)
# clear out any empty fields
fill_empty_fields(template)
# then fill any date fields
fill_date_fields(template)
return template.content
def resolve_column_field(field_name, field_value, in_content) -> (str, int):
""" Return content where any occurence of the provided field has been resolved.
This resolver function is run when the resolution of the content of a column
discovers a field.
"""
template = Template(in_content)
occurences = fill_each(field_name, field_value, template)
return template.content, occurences
def fill_index(index: str,
style: str,
pages: str,
header: str,
pages_total: int,
cards_total: int,
definitions: dict) -> (str, TemplateRenderData):
""" Populate and return index template with all styles and pages. """
template = Template(index)
if len(style) == 0:
style = '<style type="text/css">\n /* no embedded styles */\n</style>'
fill_each('_styles', style, template, indenting=True)
if len(header) > 0:
header_tag = '<div class="ui-header do-not-print">\n {{ _header }}\n</div>'
fill_each('_header', header_tag, template, indenting=True)
fill_each('_header', header, template, indenting=True)
fill_each(TemplateFields.PAGES, pages, template, indenting=True)
fill_each(TemplateFields.CARDS_TOTAL, str(cards_total), template)
fill_each(TemplateFields.PAGES_TOTAL, str(pages_total), template)
fill_each(TemplateFields.PROGRAM_VERSION, __version__, template)
# note that most of these fields could potentially be filled already when first getting the
# page template; however, we instead do it as the very last thing to allow cards
# using these fields (even if that might only be on rare occasions)
title = get_definition_content(
definition=TemplateFields.TITLE, in_definitions=definitions,
content_resolver=resolve_column_content, field_resolver=resolve_column_field)
if title is None:
title = ''
index_title = title if len(title) > 0 else 'cards.py [Untitled]'
description = get_definition_content(
definition=TemplateFields.DESCRIPTION, in_definitions=definitions,
content_resolver=resolve_column_content, field_resolver=resolve_column_field)
description = description if description is not None else ''
copyright_notice = get_definition_content(
definition=TemplateFields.COPYRIGHT, in_definitions=definitions,
content_resolver=resolve_column_content, field_resolver=resolve_column_field)
copyright_notice = copyright_notice if copyright_notice is not None else ''
author = get_definition_content(
definition=TemplateFields.AUTHOR, in_definitions=definitions,
content_resolver=resolve_column_content, field_resolver=resolve_column_field)
author = author if author is not None else ''
version_identifier = get_definition_content(
definition=TemplateFields.VERSION, in_definitions=definitions,
content_resolver=resolve_column_content, field_resolver=resolve_column_field)
version_identifier = version_identifier if version_identifier is not None else ''
fill_each('__title', index_title, template)
fill_each(TemplateFields.TITLE, title, template)
fill_each(TemplateFields.DESCRIPTION, description, template)
fill_each(TemplateFields.COPYRIGHT, copyright_notice, template)
fill_each(TemplateFields.AUTHOR, author, template)
fill_each(TemplateFields.VERSION, version_identifier, template)
fill_date_fields(template)
referenced_definitions = fill_definitions(definitions, template)
# fill any image fields that might have appeared by populating the metadata fields
image_paths_from_index = fill_image_fields(template)
return template.content, TemplateRenderData(
image_paths=set(image_paths_from_index),
referenced_definitions=referenced_definitions)
def fill_template(template: Template,
row: Row,
definitions: dict) -> TemplateRenderData:
""" Populate all template fields in a template.
Populating a template is done in 4 steps:
First, an attempt is made at filling any include fields, since they might provide
additional fields that needs to be resolved.
Secondly, for each column in the row, a pass is made in an attempt to fill any matching
column fields; recursively resolving any column references or definitions.
Thirdly, for each definition, a pass is made in an attempt to fill any matching definition
fields; recursively resolving any definition references.
Finally, once all fields and references have been resolved, any remaining fields will be
attempted resolved as image fields.
"""
# first of all, find any include fields and populate those,
# as they might contribute even more template fields to populate
stripped_styles = fill_include_fields(template)
# clear out any empty fields
fill_empty_fields(template)
# populate any images found in the template
image_paths_from_template = fill_image_fields(template)
# transform any discovered image paths to be relative to the template path
image_paths_from_template = transformed_image_paths(image_paths_from_template, template.path)
# any field that is in the data, but not found in the template; for example, if there's
# a 'rank' column in the data, but no '{{ rank }}' field in the template
unused_columns = []
column_references_in_data = []
discovered_definition_refs = []
# go through each data field for this card (row)
for column in row.data:
# fetch the content for the field
field_content, resolution_data = get_column_contentd(
column, row, definitions,
content_resolver=resolve_column_content,
field_resolver=resolve_column_field)
# fill content into the provided template
occurences = fill_each(column, field_content, template)
if occurences is 0:
# this field was not found anywhere in the specified template
unused_columns.append(column)
else:
# this field was found and populated in the template, so save any column references
# made in the column content, so we can later compare that to the list of missing fields
column_references_in_data.extend(list(resolution_data.column_references))
discovered_definition_refs.extend(list(resolution_data.definition_references))
# populate any images that may have appeared from any column
image_paths_from_datasource = fill_image_fields(template)
# transform any discovered image paths to be relative to the datasource path
image_paths_from_datasource = transformed_image_paths(image_paths_from_datasource, row.data_path)
# fill any definition fields
discovered_definition_refs.extend(
fill_definitions(definitions, template))
fill_date_fields(template)
# any template field visible in the template, but not found in the data; for example, if
# the template has a {{ rank }} field (or more), but no 'rank' column in the data
unknown_fields = []
# find any remaining template fields so we can warn that they were not filled
for field in template:
if (field.inner_content == TemplateFields.CARDS_TOTAL or
field.inner_content == TemplateFields.CARDS_TOTAL_IN_CONTEXT):
# this is a special case: these fields will not be filled until every card
# has been generated- so each field should not be treated as if missing;
# instead, simply ignore them at this point
pass
else:
# the field was not found in the card data, so make a warning about it
unknown_fields.append(field.inner_content)
# make sure we only have one of each reference
column_references = set(column_references_in_data)
# remove any "missing fields" that are actually referenced in column content-
# they may not be in the template, but they are not unused/missing, so don't warn about it
unused_columns = list(set(unused_columns) - column_references)
return TemplateRenderData(
image_paths=set(image_paths_from_template + image_paths_from_datasource),
unknown_fields=set(unknown_fields),
unused_fields=set(unused_columns),
referenced_definitions=set(discovered_definition_refs),
embedded_styles=stripped_styles)
def fill_empty_fields(template: Template) -> None:
""" Populate all empty fields in a template (with nothing). """
fill_each('', '', template)
def fill_card(template: Template,
row: Row,
card_index: int,
card_copy_index: int,
definitions: dict) -> (str, TemplateRenderData):
""" Return the contents of a card using the specified template. """
# attempt to fill all fields discovered in the template using the data for this card
render_data = fill_template(template, row, definitions)
# fill all row index fields (usually used for error templates)
fill_each(TemplateFields.CARD_ROW_INDEX, str(row.row_index), template)
# fill all template path fields (usually used for error templates)
fill_each(TemplateFields.CARD_TEMPLATE_PATH, template.path, template)
# fill all card index fields
fill_each(TemplateFields.CARD_INDEX, str(card_index), template)
fill_each(TemplateFields.CARD_COPY_INDEX, str(card_copy_index), template)
# card data might contain the following fields, but they would not have been rendered
# during fill_template(), so make sure to remove them from the missing list if necessary
except_fields = {TemplateFields.CARD_INDEX,
TemplateFields.CARD_ROW_INDEX,
TemplateFields.CARD_COPY_INDEX,
TemplateFields.CARD_TEMPLATE_PATH}
# update the set of unknown fields to not include the exceptions listed above
render_data.unknown_fields -= except_fields
return template.content, render_data
``` |
{
"source": "jhauberg/comply",
"score": 2
} |
#### File: comply/comply/__main__.py
```python
import os
import re
import sys
import datetime
from docopt import docopt
from pkg_resources import parse_version
from comply import (
VERSION_PATTERN,
EXIT_CODE_SUCCESS, EXIT_CODE_SUCCESS_WITH_SEVERE_VIOLATIONS,
exit_if_not_compatible
)
from comply.reporting import Reporter, OneLineReporter, HumanReporter, XcodeReporter
from comply.printing import printdiag, diagnostics, supports_unicode, is_windows_environment, Colors
from comply.checking import find_checkable_files, check
from comply.version import __version__
import comply.printing
from comply.rules.report import CheckResult
from comply.rules.rule import Rule, RuleViolation
from comply.rules import *
def check_for_update():
""" Determine whether a newer version is available remotely. """
from urllib.request import urlopen
from urllib.error import URLError, HTTPError
url = 'https://raw.githubusercontent.com/jhauberg/comply/master/comply/version.py'
try:
# specify a very short timeout, as this is a non-essential feature
# and should not stall program exit indefinitely
with urlopen(url, timeout=5) as response:
# we're certain this file is UTF8, so we'll decode it right away
response_body = response.read().decode('utf8')
# search for the version string
matches = re.search(VERSION_PATTERN, response_body, re.M)
if matches:
# if found, grab it and compare to the current installation
remote_version_identifier = matches.group(1)
if parse_version(__version__) < parse_version(remote_version_identifier):
printdiag(Colors.GOOD +
'A newer version is available ({0})'.format(
remote_version_identifier) +
Colors.RESET)
except HTTPError:
# fail silently
pass
except URLError:
# fail silently
pass
def expand_params(names: list) -> list:
""" Return an expanded list of parameters from a list of comma-separated parameters.
E.g. given a list of ['a', 'b,c,d'], returns ['a', 'b', 'c', 'd']
"""
expanded_names = []
for name in names:
expanded_names.extend(
[i.strip() for i in name.split(',')])
return expanded_names
def print_invalid_names(names: list, rules: list):
""" Go through and determine whether any of the provided names do not exist as named rules. """
for name in names:
if not is_name_valid(name, rules):
# attempt fixing the name to provide a suggestion
suggested_name = name.replace('_', '-').replace(' ', '-')
if is_name_valid(suggested_name, rules):
printdiag('Rule \'{rule}\' does not exist. Did you mean \'{suggestion}\'?'.format(
rule=name, suggestion=suggested_name))
else:
printdiag('Rule \'{rule}\' does not exist.'.format(
rule=name))
def is_name_valid(name: str, rules: list) -> bool:
""" Determine whether a name corresponds to a named rule. """
for rule in rules:
if rule.name == name:
return True
return False
def filtered_rules(names: list, exceptions: list, severities: list) -> list:
""" Return a list of rules to run checks on. """
rulesets = [comply.rules.standard]
rules = Rule.rules_in(rulesets)
if len(names) > 0:
print_invalid_names(names, rules)
# filter out any rule not explicitly listed in --check
rules = [rule for rule
in rules
if rule.name in names]
# filter out any rule of unlisted severities
rules = [rule for rule
in rules
if rule.severity in severities]
if len(exceptions) > 0:
print_invalid_names(exceptions, rules)
# filter out rules explicitly listed in --except
rules = [rule for rule
in rules
if rule.name not in exceptions]
# sort rules in descending order, first by severity, then collection hint,
# making sure severe violations are listed before less severe violations
return sorted(rules,
reverse=True,
key=lambda rule: (rule.severity,
rule.collection_hint))
def make_reporter(reporting_mode: str) -> Reporter:
""" Return a reporter appropriate for the mode. """
if reporting_mode == 'human':
return HumanReporter()
elif reporting_mode == 'oneline':
return OneLineReporter()
elif reporting_mode == 'xcode':
return XcodeReporter()
printdiag('Reporting mode \'{0}\' not available.'.format(reporting_mode),
as_error=True)
return Reporter()
def make_report(inputs: list, rules: list, reporter: Reporter) -> CheckResult:
""" Run checks and print a report. """
def not_checked(path: str, type: str, reason: str):
""" Print a diagnostic stating when a file was not checked. """
if reason is not None:
printdiag('{type} \'{path}\' was not checked ({reason}).'.format(
type=type, path=path, reason=reason))
else:
printdiag('{type} \'{path}\' was not checked.'.format(
type=type, path=path))
checkable_inputs = []
# find all valid files from provided inputs
for path in inputs:
paths = find_checkable_files(path)
if len(paths) > 0:
# one or more valid files were found
checkable_inputs.extend(paths)
else:
# no valid files were found in this path
if os.path.isdir(path):
# the path was a directory, but no valid files were found inside
not_checked(path, type='Directory', reason='no files found')
else:
# the path was a single file, but not considered valid so it must not be supported
not_checked(path, type='File', reason='file not supported')
# sort paths for consistent output per identical run
checkable_inputs = sorted(checkable_inputs)
result = CheckResult()
# set the total number of files we expect to report on
reporter.files_total = len(checkable_inputs)
# finally run the actual checks on each discovered file
for path in checkable_inputs:
file_result, checked = check(path, rules, reporter)
if checked == CheckResult.FILE_CHECKED:
# file was checked and results were reported if any
result += file_result
elif checked == CheckResult.FILE_SKIPPED:
# file was fine but not checked (it should still count toward the total)
result += file_result
else:
# file was not checked, for any number of reasons
reason = None
if checked == CheckResult.FILE_NOT_FOUND:
reason = 'file not found'
elif checked == CheckResult.FILE_NOT_READ:
reason = 'file not read'
not_checked(path, type='File', reason=reason)
return result
def print_profiling_results(rules: list):
""" Print benchmarking results/time taken for each rule. """
num_rules_profiled = 0
for rule in rules:
time_taken = rule.total_time_spent_collecting
if time_taken >= 0.1:
printdiag(' [{0}] took {1:.1f} seconds'.format(
rule.name, rule.total_time_spent_collecting))
num_rules_profiled += 1
num_rules_not_profiled = len(rules) - num_rules_profiled
if num_rules_not_profiled > 0:
printdiag(' (...{0} rules took nearly no time and were not shown)'.format(
num_rules_not_profiled))
def print_rules_checked(rules: list, since_starting):
""" Print the number of rules checked and time taken. """
time_since_report = datetime.datetime.now() - since_starting
report_in_seconds = time_since_report / datetime.timedelta(seconds=1)
total_time_taken = report_in_seconds
num_rules = len(rules)
rules_grammar = 'rule' if num_rules == 1 else 'rules'
printdiag('Checked {0} {1} in {2:.1f} seconds'.format(
num_rules, rules_grammar, total_time_taken))
def print_report(report: CheckResult):
""" Print the number of violations found in a report. """
# note the whitespace; important for the full format later on
severe_format = '({0} severe) ' if report.num_severe_violations > 0 else ''
severe_format = severe_format.format(report.num_severe_violations)
total_violations = report.num_violations + report.num_severe_violations
violations_grammar = 'violation' if total_violations == 1 else 'violations'
files_format = '{1}/{0}' if report.num_files_with_violations > 0 else '{0}'
files_format = files_format.format(report.num_files, report.num_files_with_violations)
printdiag('Found {num_violations} {violations} {severe}'
'in {files} files'
.format(num_violations=total_violations,
violations=violations_grammar,
severe=severe_format,
files=files_format))
def main():
""" Entry point for invoking the comply module. """
exit_if_not_compatible()
if comply.PROFILING_IS_ENABLED:
printdiag(('Profiling is enabled by default; '
'profiling should only be enabled through --profile or for debugging purposes'),
as_error=True)
if not supports_unicode():
if not is_windows_environment():
# do not warn about this on Windows, as it probably won't work anyway
printdiag('Unsupported shell encoding \'{0}\'. '
'Set environment variable `PYTHONIOENCODING` as UTF-8:\n'
'\texport PYTHONIOENCODING=UTF-8'
.format(diagnostics.encoding),
as_error=True)
arguments = docopt(__doc__, version='comply ' + __version__)
enable_profiling = arguments['--profile']
comply.PROFILING_IS_ENABLED = enable_profiling
is_verbose = arguments['--verbose']
if enable_profiling and not is_verbose:
printdiag('Profiling is enabled; --verbose was set automatically')
is_verbose = True
is_strict = arguments['--strict']
only_severe = arguments['--only-severe']
checks = expand_params(arguments['--check'])
exceptions = expand_params(arguments['--except'])
severities = ([RuleViolation.DENY] if only_severe else
[RuleViolation.DENY, RuleViolation.WARN, RuleViolation.ALLOW])
# remove potential duplicates
checks = list(set(checks))
exceptions = list(set(exceptions))
rules = filtered_rules(checks, exceptions, severities)
reporting_mode = arguments['--reporter']
reporter = make_reporter(reporting_mode)
reporter.suppress_similar = not is_strict
reporter.is_strict = is_strict
reporter.is_verbose = is_verbose
if arguments['--limit'] is not None:
reporter.limit = int(arguments['--limit'])
if not comply.printing.results.isatty() and reporter.suppress_similar:
# when piping output elsewhere, let it be known that some results might be suppressed
printdiag('Suppressing similar violations; results may be omitted '
'(set `--strict` to show everything)')
inputs = arguments['<input>']
time_started_report = datetime.datetime.now()
report = make_report(inputs, rules, reporter)
should_emit_verbose_diagnostics = reporter.is_verbose and report.num_files > 0
if should_emit_verbose_diagnostics:
print_rules_checked(rules, since_starting=time_started_report)
if comply.PROFILING_IS_ENABLED:
print_profiling_results(rules)
if should_emit_verbose_diagnostics:
print_report(report)
if report.num_severe_violations > 0:
# everything went fine; severe violations were encountered
sys.exit(EXIT_CODE_SUCCESS_WITH_SEVERE_VIOLATIONS)
else:
# everything went fine; violations might have been encountered
sys.exit(EXIT_CODE_SUCCESS)
if __name__ == '__main__':
# note that --profile does *not* cause PROFILING_IS_ENABLED to be True at this point!
# a developer must explicitly set PROFILING_IS_ENABLED to True to enable cProfile runs
# this allows users to run the included benchmarking utilities without also
# incurring the heavy duty cProfile runner, which is only interesting for developers
if comply.PROFILING_IS_ENABLED:
import cProfile
import pstats
filename = 'comply-profiling'
cProfile.run('main()', filename)
p = pstats.Stats(filename)
with open(filename, 'w') as file:
p.stream = file
p.sort_stats('time').print_stats(20)
with open(filename) as file:
s = file.read()
print('\n' + ('=' * len(s.splitlines()[0])))
print('Profiling results - ', end='')
print(s)
if os.path.exists(filename):
os.remove(filename)
else:
# we don't want to run update checks when we're profiling
check_for_update()
main()
```
#### File: comply/reporting/human.py
```python
import os
from comply.rules.rule import RuleViolation
from comply.reporting.base import Reporter
from comply.printing import printout, Colors
class HumanReporter(Reporter):
""" Provides reporting output (including suggestions) formatted for human readers. """
def report(self, violations: list, path: str):
# determine absolute path of file
absolute_path = os.path.abspath(path)
# group violations by reason so that we can suppress similar ones
grouped = self.group_by_reason(violations)
num_reported_results = 0
for reason, violations in grouped.items():
results = []
for violation in violations:
result = self.formatted_result(violation, reason, absolute_path)
results.append(result)
num_reported_results += self.report_results(results, prefix_if_suppressed='\n')
if self.is_verbose and num_reported_results > 0:
# make sure we separate the "Checking..." message with a newline
# note that this only occur when --verbose is set
printout('')
def formatted_result(self, violation: RuleViolation, reason: str, path: str) -> str:
""" Return a formatted result of a rule violation. """
rule = violation.which
rule.augment(violation)
location = Colors.DARK + '{0}:'.format(path) + Colors.RESET
severity = RuleViolation.report_severity_as(rule.severity, self.is_strict)
severity_color = (Colors.DENY if severity > RuleViolation.WARN else
(Colors.WARN if severity > RuleViolation.ALLOW else
Colors.ALLOW))
if reason is None or len(reason) == 0:
reason = ('Severe violation' if severity > RuleViolation.WARN else
('Cautioned violation' if severity > RuleViolation.ALLOW else
'Allowed violation'))
why = '{tint}{0} {vague}[{1}]'.format(reason, rule.name,
tint=severity_color,
vague=Colors.VAGUE) + Colors.RESET
solution = rule.solution(violation)
output = '{reason} in\n{location}'.format(
reason=why, location=location)
if len(violation.lines) > 0:
context = '\n'
for i, (linenumber, line) in enumerate(violation.lines):
# a "line" can, in some cases, actually span several lines
# (typically rules that match functions with parameters spanning several lines,
# so the entire function signature is considered "the line")
expanded_lines = HumanReporter.expand_line(linenumber, line)
for j, (n, l) in enumerate(expanded_lines):
if n is None:
n = ''
line = l.expandtabs(4)
lineno = str(n)
pad = ' '
context += Colors.EMPHASIS + lineno + Colors.RESET
context += Colors.RESET + '{0}{1}'.format(
('\t' + pad) if len(lineno) < 4 else pad, line) # assumes lineno < 9999
if j != len(expanded_lines) - 1:
context += '\n'
if i != len(violation.lines) - 1:
context += '\n'
output += context
if solution is not None and len(solution) > 0:
output += '\n{strong}{suggestion}'.format(
suggestion=solution, strong=Colors.STRONG)
return '\n' + output + Colors.RESET
@staticmethod
def expand_line(line_number: int, line: str):
""" Like str.splitlines() except including line numbers. """
if len(line) == 0:
return [(line_number, line)]
lines = []
for i, l in enumerate(line.splitlines()):
lines.append(
(line_number + i if line_number is not None else None, l)
)
return lines
```
#### File: comply/rules/rule.py
```python
import datetime
import comply
from typing import List, Tuple
from comply.rules.report import CheckFile
from comply.printing import can_apply_colors, Colors
class RuleViolation:
""" Represents an occurence of a rule violation. """
""" A hint to indicate that a violation typically only occur once per file. """
ONCE_PER_FILE = 1
""" A hint to indicate that a violation may occur more than once per file. """
MANY_PER_FILE = 0
""" A severity indicator for violations that have a negative impact,
but can't be objectively deemed an issue.
These violations typically represent code smells or refactoring opportunities.
"""
ALLOW = 0
""" A severity indicator for violations that have an objectively negative impact.
These violations should be considered warnings.
This is the default severity.
"""
WARN = 1
""" A severity indicator for violations that have an objectively severe negative impact.
These violations should be considered errors.
"""
DENY = 2
def __init__(self, which: 'Rule', starting: (int, int), ending: (int, int), lines: List[Tuple[int, str]], meta: dict=None):
self.which = which
self.starting = starting
self.ending = ending
self.lines = lines
self.meta = meta
def __repr__(self):
return '{0} at {1}'.format(self.which, self.lines)
def index_of_starting_line(self) -> int:
""" Return the index of the line where this violation occurs.
Note that the index *is not* the same as the line number.
"""
violating_line_number = self.starting[0]
return self.index_of_line_number(violating_line_number)
def index_of_line_number(self, line_number: int) -> int:
""" Return the index of the line that corresponds to a line number.
Note that the index *is not* the same as the line number.
"""
line_numbers = [line[0] for line in self.lines]
violating_line_index = line_numbers.index(line_number)
return violating_line_index
@staticmethod
def report_severity_as(severity: int, is_strict: bool) -> int:
""" Return an elevated severity indicator for some severities when strict compliance
is enabled.
ALLOW becomes WARN
WARN remains WARN
DENY remains DENY
"""
should_increase_severity = severity < RuleViolation.WARN and is_strict
return severity + (1 if should_increase_severity else 0)
class Rule:
""" Represents a single rule. """
def __init__(self, name: str, description: str, suggestion: str=None):
self.name = name
self.description = description
self.suggestion = suggestion
if comply.PROFILING_IS_ENABLED:
self.time_started_collecting = None
self.total_time_spent_collecting = 0
def __repr__(self):
name = (self.name
if self.name is not None
else '<unnamed>')
return '[{0}]'.format(name)
def reason(self, violation: RuleViolation=None):
""" Return a reason for why a given violation occurred.
Base behavior is to format any associated meta information from the violation into
the reason/description string as defined by the rule.
Subclasses may override to provide customized formatting.
"""
description = self.description
if description is None or len(description) == 0:
return description
if violation.meta is None or len(violation.meta) == 0:
return description
return description.format(**violation.meta)
def solution(self, violation: RuleViolation=None):
""" Return a solution for fixing a given violation.
Base behavior is to format any associated meta information from the violation into
the solution/suggestion string as defined by the rule.
Subclasses may override to provide customized formatting.
"""
suggestion = self.suggestion
if suggestion is None or len(suggestion) == 0:
return suggestion
if violation.meta is None or len(violation.meta) == 0:
return suggestion
return suggestion.format(**violation.meta)
def augment_by_color(self, violation: RuleViolation):
""" Augment the offending lines by applying highlight coloring that span the range of
the occurrence.
Subclasses may override to provide a customized result output.
"""
starting_line_number, starting_column = violation.starting
ending_line_number, ending_column = violation.ending
starting_line_index = violation.index_of_line_number(starting_line_number)
ending_line_index = violation.index_of_line_number(ending_line_number)
starting_char_index = starting_column - 1
ending_char_index = ending_column - 1
# go through each affected line, with the ending line included
for line_index in range(starting_line_index, ending_line_index + 1):
line_number, line = violation.lines[line_index]
if line_index == starting_line_index:
# markup begins on this line
if starting_line_index == ending_line_index:
# markup also ends on this line
line = (line[:starting_char_index] + Colors.BAD +
line[starting_char_index:ending_char_index] + Colors.RESET +
line[ending_char_index:])
else:
# markup exceeds this line
line = (line[:starting_char_index] + Colors.BAD +
line[starting_char_index:] + Colors.RESET)
elif line_index == ending_line_index:
# markup ends on this line
line = (Colors.BAD + line[:ending_char_index] +
Colors.RESET + line[ending_char_index:])
else:
# markup spans entire line
line = (Colors.BAD + line +
Colors.RESET)
violation.lines[line_index] = (line_number, line)
def augment_by_marker(self, violation: RuleViolation):
""" Augment the offending lines by adding an indicator marker (^) at the location
of the occurrence.
Subclasses may override to provide a customized result output.
"""
starting_line_number, starting_column = violation.starting
ending_line_number, ending_column = violation.ending
if starting_column == 0 or ending_column == 0:
return
violation_is_in_lines = False
for (line_number, line) in violation.lines:
if line_number == starting_line_number:
violation_is_in_lines = True
break
if not violation_is_in_lines:
return
prefix = ' ' * (starting_column - 1)
suffix = '-' * ((ending_column - starting_column) - 1)
line = (None, prefix + '^' + suffix)
starting_line_index = violation.index_of_line_number(starting_line_number)
ending_line_index = len(violation.lines) - 1
if starting_line_index < ending_line_index:
violation.lines.insert(starting_line_index + 1, line)
else:
violation.lines.append(line)
def augment(self, violation: RuleViolation):
""" Augment the offending lines of a violation to improve hints of its occurrence.
Default implementation either applies coloring spanning the range of the occurrence, or
provides a ^-marker indicating the location of the occurrence depending on whether
coloring is supported.
"""
if len(violation.lines) == 0:
# nothing to augment; this violation has not captured any lines
return
if can_apply_colors():
self.augment_by_color(violation)
else:
self.augment_by_marker(violation)
def violate(self, at: (int, int), to: (int, int)=None, lines: List[Tuple[int, str]]=list(), meta: dict=None) -> RuleViolation:
""" Return a rule violation spanning over a range of consecutive line numbers and
columns.
Captured lines do not have to match with the provided ranges.
"""
if to is None:
to = at
return RuleViolation(self, at, to, lines, meta)
def violate_at_file(self, file: CheckFile) -> RuleViolation:
""" Return a violation spanning no lines or characters, starting from the beginning of a
file.
"""
return self.violate(at=file.line_number_at_top())
def violate_at_match(self, file: CheckFile, at) -> RuleViolation:
""" Return a rule violation spanning the full result of a match. """
return self.violate_at_character_range(file, starting=at.start(), ending=at.end())
def violate_at_character_range(self, file: CheckFile, starting: int, ending: int=-1) -> RuleViolation:
""" Return a rule violation spanning over a range of character indices. """
if ending < 0:
ending = starting
starting_line_number, starting_column = file.line_number_at(starting)
ending_line_number, ending_column = file.line_number_at(ending)
offending_lines = file.lines_in_character_range((starting, ending))
return self.violate(at=(starting_line_number, starting_column),
to=(ending_line_number, ending_column),
lines=offending_lines)
def collect(self, file: CheckFile) -> List[RuleViolation]:
""" Analyze a given text and return a list of any found violations.
Subclasses should override and provide rule-specific collection logic.
"""
return []
@property
def severity(self) -> int:
""" Return a number indicating the severity of violating this rule. """
return RuleViolation.WARN
@property
def collection_hint(self) -> int:
""" Return a hint indicating how often this rule may be violated per file.
For example, some rule violations can only occur once per file; others more than once.
"""
return RuleViolation.MANY_PER_FILE
@property
def triggering_filename(self) -> str:
""" Return an assumed filename for a file triggering violations.
This is only used for test purposes.
"""
return None
@property
def triggers(self) -> List[str]:
""" Return a list of texts containing triggering examples.
This is only used for test purposes.
"""
return []
@property
def nontriggers(self) -> List[str]:
""" Return a list of texts containing non-triggering examples.
This is only used for test purposes.
"""
return []
def profile_begin(self):
""" Mark the beginning of a violation collection. """
self.time_started_collecting = datetime.datetime.now()
def profile_end(self):
""" Mark the end of a violation collection and accumulate the time taken. """
time_since_started_collecting = datetime.datetime.now() - self.time_started_collecting
time_spent_collecting = time_since_started_collecting / datetime.timedelta(seconds=1)
self.total_time_spent_collecting += time_spent_collecting
self.time_started_collecting = None
@staticmethod
def rules_in(modules: list) -> list:
""" Return a list of instances of all Rule-subclasses found in the provided modules.
Does not recurse through submodules.
"""
classes = []
def is_rule_implementation(cls):
""" Determine whether a class is a Rule implementation. """
return cls != Rule and type(cls) == type and issubclass(cls, Rule)
for module in modules:
for item in dir(module):
attr = getattr(module, item)
if is_rule_implementation(attr):
classes.append(attr)
instances = [c() for c in classes]
return instances
```
#### File: rules/standard/ambiguous_function.py
```python
import re
from comply.rules.rule import *
from comply.rules.patterns import FUNC_PROT_PATTERN
from comply.printing import Colors
class AmbiguousFunction(Rule):
""" Don't provide ambiguous function declarations.
This mainly pertains to functions with parameterless declarations.
<br/><br/>
In C, a function declaration with no parameters is ambiguous, as it implicitly declares a
function that can take an arbitrary number of parameters.
References:
* [Empty parameter list in C function, do you write func(void) or func()?](https://blog.zhaw.ch/icclab/empty-parameter-list-in-c-function-do-you-write-funcvoid-or-func/)
"""
def __init__(self):
Rule.__init__(self, name='ambiguous-func',
description='Ambiguous function declaration',
suggestion='Add \'void\' to indicate that this is a parameterless function.')
pattern = re.compile(FUNC_PROT_PATTERN)
def augment_by_color(self, violation: RuleViolation):
# assume only one offending line
line_index = violation.index_of_starting_line()
function_line_number, function_line = violation.lines[line_index]
insertion_index = violation.meta['insertion_index']
violation.lines[line_index] = (function_line_number,
function_line[:insertion_index] +
Colors.GOOD + 'void' + Colors.RESET +
function_line[insertion_index:])
def collect(self, file: CheckFile):
offenders = []
text = file.collapsed
for function_match in self.pattern.finditer(text):
function_parameters = function_match.group('params')
if len(function_parameters.strip()) > 0:
# this function has explicitly specified parameters; move on
continue
offending_index = function_match.start('name')
offending_line_number, offending_column = file.line_number_at(offending_index)
character_range = (function_match.start(),
function_match.end())
offending_lines = file.lines_in_character_range(character_range)
function_parameters_starting_index = function_match.start('params')
_, insertion_column = file.line_number_at(function_parameters_starting_index)
offender = self.violate(at=(offending_line_number, offending_column),
lines=offending_lines,
meta={'insertion_index': insertion_column - 1})
offenders.append(offender)
return offenders
@property
def triggers(self):
return [
'void ↓func();'
]
@property
def nontriggers(self):
return [
'void func(void);'
]
```
#### File: rules/standard/header_in_header.py
```python
import re
from comply.rules.rule import *
from comply.rules.patterns import INCLUDE_PATTERN
from comply.rules.standard.require_symbols import symbols_for_inclusion
class HeaderInHeader(Rule):
""" Don't include other headers if you can avoid it.
Avoiding header inclusions can help keep compile times low.
Forcing source files to include everything they need helps provide a clear picture on
the dependencies of the particular unit and makes it easier to spot redundancies.
References:
* Our Machinery: [Physical Design](http://ourmachinery.com/post/physical-design)
* <NAME>: [Notes on Programming in C](http://www.lysator.liu.se/c/pikestyle.html)
* <NAME>: [c-style](https://github.com/mcinglis/c-style#include-the-definition-of-everything-you-use)
"""
def __init__(self):
Rule.__init__(self, name='header-in-header',
description='Header included in header',
suggestion='Replace \'{inclusion}\' with a forward-declaration for each '
'needed type.')
pattern = re.compile(INCLUDE_PATTERN)
exceptions = ['stdbool.h',
'stdint.h',
'stddef.h',
'inttypes.h']
def is_excepted(self, included_filename: str):
allow_inclusion = False
for exception in self.exceptions:
if exception in included_filename:
allow_inclusion = True
break
return allow_inclusion
def is_inclusion_for_completeness(self, symbols: list) -> bool:
if len(symbols) == 0:
return False
for symbol in symbols:
if not is_symbol_included_for_completeness(symbol):
return False
return True
def collect(self, file: CheckFile):
if '.h' not in file.extension:
return []
offenders = []
for inclusion in self.pattern.finditer(file.stripped):
include_filename = file.original[inclusion.start('filename'):
inclusion.end('filename')]
if self.is_excepted(include_filename):
continue
symbols = symbols_for_inclusion(file, inclusion)
if self.is_inclusion_for_completeness(symbols):
continue
include = file.original[inclusion.start():
inclusion.end()]
offender = self.violate_at_match(file, at=inclusion)
offender.meta = {'inclusion': include}
offenders.append(offender)
return offenders
@property
def severity(self):
return RuleViolation.ALLOW
@property
def triggering_filename(self):
return 'header.h'
@property
def triggers(self):
return [
('// some header file\n'
'↓#include <header.h>'),
('// some header file\n'
'↓#include <header.h> // type')
]
@property
def nontriggers(self):
return [
('#include <stdbool.h>\n'
'#include <stdint.h>\n'
'#include <inttypes.h>'),
('// some header file\n'
'struct symbol_t;'),
'#include <header.h> // type :completeness',
'#include <header.h> // type:completeness'
]
def is_symbol_included_for_completeness(symbol: str) -> bool:
""" Determine whether a symbol is listed for sake of type completeness. """
return symbol.endswith(':completeness')
```
#### File: rules/standard/including_source_file.py
```python
import re
from comply.rules.rule import *
from comply.rules.patterns import INCLUDE_PATTERN
class IncludingSourceFile(Rule):
""" Don't include source files (.c) in other source files.
This is advisable to avoid potentially compiling the same unit twice, resulting in multiple
symbol definitions and linker errors.
"""
def __init__(self):
Rule.__init__(self, name='including-source',
description='Including source file',
suggestion='Find a way to remove the #include directive.')
pattern = re.compile(INCLUDE_PATTERN)
def collect(self, file: CheckFile):
offenders = []
for inclusion in self.pattern.finditer(file.stripped):
# note that we can't just grab the string captured by the 'filename' group in this case
# because we're searching on stripped source (which might have stripped literals)
included_file = file.original[inclusion.start('filename'):inclusion.end('filename')]
included_file = included_file.strip()
if included_file.endswith('.c'):
offender = self.violate_at_match(file, at=inclusion)
offenders.append(offender)
return offenders
@property
def triggers(self):
return [
'↓#include "source.c"',
('// some source file\n'
'↓#include <source.c>')
]
@property
def nontriggers(self):
return [
('// some header file\n'
'#include <file.h>')
]
```
#### File: rules/standard/invisible_characters.py
```python
from comply.rules.rule import *
from comply.printing import Colors
class InvisibleCharacters(Rule):
""" Don't put invisible characters in code.
Invisible characters (in code, i.e. not in literals) serve no useful purpose and may confuse
both editing tools and readers.
"""
def __init__(self):
Rule.__init__(self, name='invisible-characters',
description='File contains invisible characters (found {count})',
suggestion='Delete each occurrence or replace with whitespace.')
INVISIBLES = ['\u200b', '\u200c', '\u200d',
'\uFEFF']
def augment_by_color(self, violation: RuleViolation):
# assume only one offending line
linenumber, line = violation.lines[0]
for invisible in InvisibleCharacters.INVISIBLES:
line = line.replace(invisible, Colors.BAD + '~' + Colors.RESET)
augmented_line = (linenumber, line)
count = violation.meta['count'] if 'count' in violation.meta else 0
count_in_line = violation.meta['count_in_line'] if 'count_in_line' in violation.meta else 0
if count > count_in_line:
violation.lines = [
(0, Colors.EMPHASIS + 'listing first occurrence:' + Colors.RESET),
augmented_line
]
else:
violation.lines[0] = augmented_line
def collect(self, file: CheckFile):
offenders = []
text = file.original
invisibles_found = 0
for invisible in InvisibleCharacters.INVISIBLES:
invisibles_found += text.count(invisible)
if invisibles_found > 0:
first_invis_index = -1
for invisible in InvisibleCharacters.INVISIBLES:
first_invis_index = text.find(invisible)
if first_invis_index != -1:
break
assert first_invis_index != -1
linenumber, column = file.line_number_at(first_invis_index)
offending_line = (linenumber, file.lines[linenumber - 1])
invisibles_in_line = 0
for invisible in InvisibleCharacters.INVISIBLES:
invisibles_in_line += offending_line[1].count(invisible)
offender = self.violate(at=(linenumber, column),
lines=[offending_line],
meta={'count': invisibles_found,
'count_in_line': invisibles_in_line})
offenders.append(offender)
return offenders
@property
def severity(self):
return RuleViolation.DENY
@property
def collection_hint(self):
return RuleViolation.ONCE_PER_FILE
@property
def triggers(self):
return [
'source with an ↓\uFEFF invisible',
'source with an ↓\u200b invisible',
'source with an ↓\u200c invisible',
'source with an ↓\u200d invisible',
]
@property
def nontriggers(self):
return [
'source without invisibles'
]
```
#### File: rules/standard/missing_braces.py
```python
import re
from comply.rules.rule import *
from comply.rules.patterns import KEYWORDS
from comply.printing import Colors
class MissingBraces(Rule):
""" Always surround the bodies of control statements with scoped braces.
You might be tempted to save a line or two by not adding braces to that single-line `if`
statement.
<br/><br/>
However, such a decision may bite you later on, as an unsuspecting programmer may fail to
notice the lack of braces and unintentionally be writing code in the wrong scope- leading to
potentially undesirable or unpredictable consequences.
References:
* Carnegie Mellon University, SEI: [CERT C Secure Coding Standard](https://wiki.sei.cmu.edu/confluence/display/c/EXP19-C.+Use+braces+for+the+body+of+an+if%2C+for%2C+or+while+statement)
"""
def __init__(self):
Rule.__init__(self, name='missing-braces',
description='Body of control statement not surrounded by braces',
suggestion='Add opening and ending braces for the body of the control '
'statement.')
pattern = re.compile(r'\b({keywords})\b'.format(keywords=KEYWORDS))
def augment_by_color(self, violation: RuleViolation):
line_number, line = violation.lines[0]
i = len(line) - len(line.lstrip())
leading_space = line[:i]
violation.lines = [
(line_number, line + Colors.GOOD + ' {' + Colors.RESET),
(line_number + 1, leading_space + '...'),
(line_number + 2, leading_space + Colors.GOOD + '}' + Colors.RESET)
]
def collect(self, file: CheckFile):
offenders = []
# first, strip anything inside parens; this will help us find dangling bodies
from comply.util.stripping import strip_parens
text = strip_parens(file.stripped)
for dangling_match in self.pattern.finditer(text):
# start from the ending of the control keyword
# note that this procedure assumes all paren blocks have been stripped
# e.g. `if (true) {` is expected as `if {`
ending = dangling_match.end()
is_missing_opening_brace = True
# move through the immediately following characters until finding a brace or
# any other character (skipping whitespace and newlines)
for c in text[ending:]:
if c == '{':
# this body is properly braced
is_missing_opening_brace = False
break
if c in [' ', '\r', '\n']:
# there can be any amount of whitespace or newline before the body
continue
# any other character encountered; must mean that an opening brace is missing
break
if not is_missing_opening_brace:
# move on to the next match
continue
offending_index = dangling_match.start()
offending_line_number, offending_column = file.line_number_at(offending_index)
lines = [(offending_line_number, file.lines[offending_line_number - 1])]
offender = self.violate(at=(offending_line_number, offending_column),
lines=lines)
offenders.append(offender)
return offenders
@property
def triggers(self):
return [
'↓if (true) run();',
('↓if (true)\n'
' run();')
]
@property
def nontriggers(self):
return [
'if (true) { run(); }',
('if (true) {\n'
' run();\n'
'}')
]
```
#### File: rules/standard/parameterless_function.py
```python
import re
from comply.rules.standard.ambiguous_function import AmbiguousFunction
from comply.rules.patterns import FUNC_IMPL_PATTERN
class ParameterlessFunction(AmbiguousFunction):
""" Always specify parameters as `void` if a function implementation takes zero parameters.
Technically, this is not required for the compiler to do its job, but being explicit helps in
keeping a clear and consistent interface.
"""
def __init__(self):
AmbiguousFunction.__init__(self)
self.name = 'paramless-func'
self.description = 'Parameterless function does not specify parameters as \'void\''
self.pattern = re.compile(FUNC_IMPL_PATTERN)
@property
def triggers(self):
return [
'void ↓func() { ... }'
]
@property
def nontriggers(self):
return [
'void func(void) { ... }'
]
```
#### File: rules/standard/prefer_sized_integers.py
```python
import re
from comply.rules.rule import *
def match_exactly(int_type: str) -> str:
return r'\bunsigned\s+{type}\b|\bsigned\s+{type}\b|(\b{type}\b)'.format(
type=int_type)
def match_signed(int_type: str) -> str:
return r'\b(signed {type})\b'.format(
type=int_type)
def match_unsigned(int_type: str) -> str:
return r'\b(unsigned {type})\b'.format(
type=int_type)
class PreferSizedIntegers(Rule):
""" Always use explicitly sized integer types (e.g. `stdint.h`).
Being explicit about the type and size that you want to use helps improve portability.
<br/><br/>
It also increases readability as it makes types read more uniformly, and does away
entirely with the `unsigned` and `signed` keywords.
It's worth noting that when sticking with basic types (e.g. `int`), the compiler may just do a
*better* job than you at deciding which size is actually the optimal choice.
<br/><br/>
However, leaving that an implicit choice could result in unexpected issues down the line.
<br/><br/>
Being explicit lets you avoid making assumptions. The trade-off is potentially losing some
(often neglible) performance.
References:
* <NAME>: [How to C in 2016: Writing Code- Types](https://matt.sh/howto-c)
"""
def __init__(self):
Rule.__init__(self, name='prefer-stdint',
description='\'{int}\' used instead of \'{stdint}\'',
suggestion='Use \'{stdint}\' instead of \'{int}\'.')
INT_TYPES = {
# note that unsigned|signed char is often a perfectly valid choice over uint8_t|int8_t
# so we don't include that in the table
'unsigned short': ('uint16_t', match_unsigned('short')),
'signed short': ('int16_t', match_signed('short')),
'short': ('int16_t', match_exactly('short')),
'unsigned int': ('uint32_t', match_unsigned('int')),
'signed int': ('int32_t', match_signed('int')),
'int': ('int32_t', match_exactly('int')),
'unsigned long': ('uint64_t', match_unsigned('long')),
'signed long': ('int64_t', match_signed('long')),
'long': ('int64_t', match_exactly('long')),
'unsigned long long': ('uint64_t', match_unsigned('long long')),
'signed long long': ('int64_t', match_signed('long long')),
'long long': ('int64_t', match_exactly('long long')),
}
def collect(self, file: CheckFile):
offenders = []
text = file.stripped
ranges_collected = []
int_types = [int_type for int_type in PreferSizedIntegers.INT_TYPES]
# sort by length of type
sorted_int_types = sorted(int_types, key=lambda int_type: len(int_type.split(' ')))
# go through each type, but reversed so that we start with the longest types
for int_type in reversed(sorted_int_types):
prefer_int_type, pattern = PreferSizedIntegers.INT_TYPES[int_type]
for int_match in re.finditer(pattern, text):
if not int_match.group(1):
# expect actual match in first group (a match may occur without a capture)
continue
type_already_collected = False
for collected_type_start, collected_type_end in ranges_collected:
if collected_type_start <= int_match.start(1) <= collected_type_end:
type_already_collected = True
break
if type_already_collected:
continue
int_type_range = (int_match.start(1), int_match.end(1))
ranges_collected.append(int_type_range)
offender = self.violate_at_match(file, at=int_match)
offender.meta = {'stdint': prefer_int_type,
'int': int_type}
offenders.append(offender)
return offenders
@property
def triggers(self):
return [
'void func(↓int a);'
]
@property
def nontriggers(self):
return [
'void func(int32_t a);'
]
```
#### File: rules/standard/scope_too_deep.py
```python
import re
from comply.rules.rule import *
from comply.rules.patterns import FUNC_BODY_PATTERN
from comply.util.scope import depth
class ScopeTooDeep(Rule):
""" Don't write deeply nested code.
A deeply nested scope is often an indication of too high complexity and can be
difficult to read.
"""
def __init__(self):
Rule.__init__(self, name='scope-too-deep',
description='Scope is too deep ({depth} > {max} levels)',
suggestion='Avoid nesting code too deeply. Consider refactoring.')
MAX = 3
pattern = re.compile(FUNC_BODY_PATTERN)
def collect(self, file: CheckFile):
offenders = []
text = file.stripped
max_depth = ScopeTooDeep.MAX
for scope_match in self.pattern.finditer(text):
scope_index = scope_match.start()
scope_depth = depth(scope_index, text)
if scope_depth > max_depth:
line_number, column = file.line_number_at(scope_index)
offender = self.violate(at=(line_number, column),
to=(line_number, column + 1),
lines=[(line_number, file.lines[line_number - 1])],
meta={'depth': scope_depth,
'max': max_depth})
offenders.append(offender)
return offenders
@property
def triggers(self):
return [
('void func(...) {\n'
' if (true) {\n'
' if (false) {\n'
' if (true) {\n'
' if (true) ↓{\n'
' ...\n'
' }\n'
' }\n'
' }\n'
' }\n'
'}')
]
@property
def nontriggers(self):
return [
('void func(...) {\n'
' if (true) {\n'
' if (false) {\n'
' if (true) {\n'
' ...\n'
' }\n'
' }\n'
' }\n'
'}')
]
```
#### File: rules/standard/too_many_params.py
```python
import re
from comply.rules.rule import *
from comply.rules.patterns import FUNC_BOTH_PATTERN, FUNC_IMPL_PATTERN
from comply.printing import Colors
class TooManyParams(Rule):
""" Don't exceed 4 parameters per function.
When a function has many parameters, it is often a sign that it is doing too much and would
benefit from being refactored into smaller parts.
Each parameter adds to the complexity of a function, and the more it has, the harder it becomes
to understand (and use).
<br/><br/>
A common practice is to bundle parameters into a `struct` when many parameters are
absolutely necessary (a pattern commonly referred to as *Parameter Object*).
<br/><br/>
This practice, however, does *not* reduce the complexity of the function-
but it *does* improve its readability.
References:
* Stack Exchange discussion: [Are there guidelines on how many parameters a function should accept?](https://softwareengineering.stackexchange.com/a/145066)
"""
def __init__(self, only_check_implementations: bool=False):
Rule.__init__(self, name='too-many-params',
description='Function might be too broad ({count} > {max} parameters)',
suggestion='This function might be taking on too much work. '
'Consider refactoring.')
# determine whether to only match implementations, or both prototypes and implementations
# (prefer both, as e.g. inline functions won't be caught otherwise-
# since they don't require a prototype, they may end up going unnoticed)
if only_check_implementations:
self.pattern = re.compile(FUNC_IMPL_PATTERN)
MAX = 4
pattern = re.compile(FUNC_BOTH_PATTERN)
def augment_by_color(self, violation: RuleViolation):
line_index = violation.index_of_starting_line()
function_line_number, function_line = violation.lines[line_index]
# note that if we wanted to color up starting from the first exceeding parameter
# we would have a hard time spanning the color over multiple lines, because
# a reporter (e.g. 'human') may decide to clear colors per line
# for now we just mark up the function name
from_index, to_index = violation.meta['range']
augmented_line = (function_line[:from_index] +
Colors.BAD + function_line[from_index:to_index] + Colors.RESET +
function_line[to_index:])
violation.lines[line_index] = (function_line_number, augmented_line)
def collect(self, file: CheckFile):
offenders = []
max_params = TooManyParams.MAX
text = file.collapsed
for function_match in self.pattern.finditer(text):
function_name = function_match.group('name')
function_parameters = function_match.group('params')
# naively splitting by comma (macros may cause trouble here)
number_of_params = len(function_parameters.split(','))
if number_of_params > max_params:
offending_index = function_match.start('name')
offending_line_number, offending_column = file.line_number_at(offending_index)
character_range = (function_match.start(),
function_match.end())
offending_lines = file.lines_in_character_range(character_range)
offender = self.violate(at=(offending_line_number, offending_column),
lines=offending_lines,
meta={'count': number_of_params,
'max': max_params,
'range': (offending_column - 1,
offending_column - 1 + len(function_name))})
offenders.append(offender)
return offenders
@property
def triggers(self):
return [
'void ↓func(int, int, int, unsigned short, long);',
'void ↓func(int a, int b, int c, unsigned short d, long f);',
'void ↓func(int a, int b, int c, unsigned short d, long f) { ... }'
]
@property
def nontriggers(self):
return [
'void func(int, int, int, unsigned short);',
'void func(int a, int b, int c, unsigned short d);',
'void func(int a, int b, int c, unsigned short d) { ... }'
]
```
#### File: rules/standard/unified_header.py
```python
import re
from comply.rules.rule import *
from comply.rules.patterns import INCLUDE_PATTERN, FUNC_PROT_PATTERN, FUNC_BODY_PATTERN
class UnifiedHeader(Rule):
""" Don't use unified headers if you can avoid it.
A unified header is a header file whose only purpose is to include other header files.
As convenient as they may be, unified headers do not promote modularity and increases
compile time in cases where the consumer does not need all of the included headers.
References:
* <NAME>: [c-style](https://github.com/mcinglis/c-style#avoid-unified-headers)
"""
def __init__(self):
Rule.__init__(self, name='unified-header',
description='Header contains no prototype declarations',
suggestion='Avoid having headers that only includes other headers.')
pattern = re.compile(INCLUDE_PATTERN)
def collect(self, file: CheckFile):
if '.h' not in file.extension:
return []
offenders = []
text = file.stripped
has_includes = self.pattern.search(text) is not None
if has_includes:
has_function_prototypes = re.search(FUNC_PROT_PATTERN, text) is not None
has_bodies = re.search(FUNC_BODY_PATTERN, text) is not None
if not has_function_prototypes and not has_bodies:
offender = self.violate_at_file(file)
offenders.append(offender)
return offenders
@property
def severity(self):
return RuleViolation.ALLOW
@property
def collection_hint(self):
return RuleViolation.ONCE_PER_FILE
@property
def triggering_filename(self):
return 'header.h'
@property
def triggers(self):
return [
('▶// some header file\n'
'#include <header.h>\n'
'#include "other_header.h"')
]
@property
def nontriggers(self):
return [
('// some header file\n'
'#include <header.h>\n'
'#include "other_header.h"'
'void proto_func(int a);')
]
```
#### File: comply/test/expect.py
```python
import re
from comply.checking import check_text
from comply.rules.report import CheckFile
TRIGGER_AT = '↓'
TRIGGER_LINE = '▶'
pattern = re.compile(r'([{0}{1}])'.format(TRIGGER_LINE, TRIGGER_AT))
def match_triggers(texts: list, rule, assumed_filename: str=None):
""" Check texts for any violations to a rule and determine whether they match the
expected results.
"""
for text in texts:
check_triggers(text, rule, assumed_filename)
def check_triggers(text: str, rule, assumed_filename: str=None):
""" Check a text for any violations to a rule and assert whether they
correspond to the expected count and location.
"""
# find all expected violation triggers
triggers = []
for trigger in pattern.finditer(text):
trigger_symbol = trigger.group()
# the offset equals number of triggers added so far- assuming we find them in order
# (because each trigger before another trigger should be considered as not-there, since
# they will all be removed from the final text)
trigger_index_offset = len(triggers)
triggers.append((trigger_symbol,
trigger.start() - trigger_index_offset))
# make a clean snippet without triggers
snippet, num_triggers_removed = pattern.subn('', text)
assert len(triggers) == num_triggers_removed
# determine locations of all expected violations
trigger_locations = []
for trigger_symbol, trigger_index in triggers:
should_span_entire_line = trigger_symbol == TRIGGER_LINE
trigger_line_number, trigger_column = CheckFile.line_number_in_text(
trigger_index, snippet, span_entire_line=should_span_entire_line)
trigger_locations.append((trigger_line_number, trigger_column))
# determine number of expected violations
expected_number_of_violations = len(trigger_locations)
if assumed_filename is not None:
result = check_text(snippet, [rule], assumed_filename)
else:
result = check_text(snippet, [rule])
# make sure resulting violations are in ascending order to match the trigger indices
violations_in_order = sorted(result.violations,
key=lambda v: v.starting)
total_violations = len(violations_in_order)
if total_violations != expected_number_of_violations:
violation_locations = [violation.starting for violation in violations_in_order]
raise AssertionError(('[{5}] Found unexpected number of violations ({0} != {1}):\n'
'Found {2}\n'
'Expected {3}\n'
'In text:\n{4}').format(
total_violations, expected_number_of_violations,
violation_locations, trigger_locations, text, rule.name))
for i, violation in enumerate(violations_in_order):
trigger_location = trigger_locations[i]
if violation.starting != trigger_location:
raise AssertionError(('[{3}] Found unexpected violation ({0} != {1})\n'
'In text:\n{2}').format(
violation.starting, trigger_location, text, rule.name))
```
#### File: comply/test/test_rules.py
```python
import comply.rules
from comply.rules.rule import Rule
from test.expect import match_triggers
rulesets = [comply.rules.standard]
rules = Rule.rules_in(rulesets)
def test_rule_triggers():
for rule in rules:
triggers = rule.triggers
nontriggers = rule.nontriggers
if len(triggers) == 0:
raise AssertionError('[{0}] No triggering texts to test'.format(rule.name))
texts = triggers + nontriggers
match_triggers(texts, rule, assumed_filename=rule.triggering_filename)
def test_no_duplicates():
seen_names = set()
for rule in rules:
name = rule.name
assert name not in seen_names
seen_names.add(name)
```
#### File: comply/test/test_scope.py
```python
from comply.util.scope import depth
def test_scope_depth():
text = '{ } scope [0] { { } [1] { [2] } [3] } { [4]'
assert depth(text.index('[0]'), text) == 0
assert depth(text.index('[1]'), text) == 1
assert depth(text.index('[2]'), text) == 2
assert depth(text.index('[3]'), text) == 1
assert depth(text.index('[4]'), text) == 1
```
#### File: comply/test/test_stripping.py
```python
from comply.util.stripping import (
blanked,
strip_parens,
strip_single_line_literals,
strip_single_character_literals,
strip_line_comments,
strip_block_comments
)
def test_blanked():
text = 'abcd'
assert blanked(text) == ' '
text = ('abcd\n'
'efgh')
assert blanked(text) == (' \n'
' ')
assert blanked(text, keepends=False) == ' '
def test_strip_parens():
text = 'if (true) {'
assert strip_parens(text) == 'if {'
text = ('if (true)\n'
'{')
assert strip_parens(text) == ('if \n'
'{')
text = ('if (true &&\n'
' true)\n'
'{')
assert strip_parens(text) == ('if \n'
' \n'
'{')
def test_strip_literal_strings():
text = 'char const * str = "abc";'
assert strip_single_line_literals(text) == 'char const * str = " ";'
text = 'char a = \'"\'; char b = \'"\''
assert strip_single_line_literals(text) == text
def test_strip_literal_chars():
text = 'char c = \'a\', d = \'\'\';'
assert strip_single_character_literals(text) == 'char c = \' \', d = \' \';'
def test_strip_line_comments():
text = 'char a; // strip me'
assert strip_line_comments(text) == 'char a; '
def test_strip_block_comments():
text = '/** strip me */'
assert strip_block_comments(text) == ' '
text = '/** strip me // fully */'
assert strip_block_comments(text) == ' '
text = ('/**\n'
' * strip * me\n'
' */\n'
'char a;')
assert strip_block_comments(text) == (' \n'
' \n'
' \n'
'char a;')
``` |
{
"source": "jhauberg/dledger",
"score": 3
} |
#### File: dledger/test/test_dates.py
```python
import calendar
from datetime import datetime, date, timedelta
from dledger.dateutil import (
months_between,
in_months,
next_month,
previous_month,
last_of_month,
parse_period,
parse_datestamp,
parse_month,
months_in_quarter,
)
def test_months_between():
assert (
months_between(date(year=2019, month=1, day=1), date(year=2019, month=1, day=1))
== 0
)
assert (
months_between(date(year=2019, month=1, day=1), date(year=2019, month=2, day=1))
== 1
)
assert (
months_between(
date(year=2019, month=3, day=1), date(year=2019, month=12, day=1)
)
== 9
)
assert (
months_between(
date(year=2019, month=12, day=1), date(year=2020, month=3, day=1)
)
== 3
)
assert (
months_between(
date(year=2019, month=1, day=1), date(year=2019, month=12, day=1)
)
== 11
)
assert (
months_between(date(year=2019, month=1, day=1), date(year=2020, month=1, day=1))
== 12
)
assert (
months_between(
date(year=2019, month=12, day=1), date(year=2020, month=1, day=1)
)
== 1
)
assert (
months_between(date(year=2019, month=5, day=1), date(year=2020, month=4, day=1))
== 11
)
def test_months_between_normalized():
assert (
months_between(
date(year=2019, month=1, day=1),
date(year=2019, month=1, day=1),
ignore_years=True,
)
== 12
)
assert (
months_between(
date(year=2019, month=1, day=1),
date(year=2020, month=1, day=1),
ignore_years=True,
)
== 12
)
assert (
months_between(
date(year=2019, month=3, day=1),
date(year=2019, month=12, day=1),
ignore_years=True,
)
== 9
)
assert (
months_between(
date(year=2019, month=1, day=1),
date(year=2019, month=2, day=1),
ignore_years=True,
)
== 1
)
assert (
months_between(
date(year=2019, month=1, day=1),
date(year=2019, month=12, day=1),
ignore_years=True,
)
== 11
)
assert (
months_between(
date(year=2018, month=1, day=1),
date(year=2020, month=1, day=1),
ignore_years=True,
)
== 12
)
assert (
months_between(
date(year=2019, month=5, day=1),
date(year=2020, month=4, day=1),
ignore_years=True,
)
== 11
)
def test_months_in_quarter():
assert months_in_quarter(1) == [1, 2, 3]
assert months_in_quarter(2) == [4, 5, 6]
assert months_in_quarter(3) == [7, 8, 9]
assert months_in_quarter(4) == [10, 11, 12]
try:
_ = months_in_quarter(5)
except ValueError:
assert True
else:
assert False
try:
_ = months_in_quarter(-1)
except ValueError:
assert True
else:
assert False
def test_in_months():
d = in_months(date(year=2019, month=1, day=1), months=1)
assert d.year == 2019 and d.month == 2 and d.day == 1
d = in_months(date(year=2019, month=1, day=15), months=1)
assert d.year == 2019 and d.month == 2 and d.day == 15
d = in_months(date(year=2019, month=1, day=31), months=1)
assert d.year == 2019 and d.month == 2 and d.day == 28
d = in_months(date(year=2019, month=1, day=29), months=1)
assert d.year == 2019 and d.month == 2 and d.day == 28
d = in_months(date(year=2019, month=1, day=1), months=2)
assert d.year == 2019 and d.month == 3 and d.day == 1
d = in_months(date(year=2019, month=1, day=1), months=12)
assert d.year == 2020 and d.month == 1 and d.day == 1
d = in_months(date(year=2019, month=1, day=1), months=-1)
assert d.year == 2018 and d.month == 12 and d.day == 1
d = in_months(date(year=2019, month=1, day=31), months=-1)
assert d.year == 2018 and d.month == 12 and d.day == 31
d = in_months(date(year=2020, month=6, day=1), months=-12)
assert d.year == 2019 and d.month == 6 and d.day == 1
def test_last_of_month():
d = last_of_month(date(year=2020, month=1, day=1))
assert d.year == 2020 and d.month == 1 and d.day == 31
d = last_of_month(date(year=2019, month=6, day=8))
assert d.year == 2019 and d.month == 6 and d.day == 30
def test_next_month():
d = next_month(date(year=2019, month=1, day=1))
assert d.year == 2019 and d.month == 2 and d.day == 1
d = next_month(date(year=2019, month=1, day=16))
assert d.year == 2019 and d.month == 2 and d.day == 1
d = next_month(date(year=2019, month=12, day=1))
assert d.year == 2020 and d.month == 1 and d.day == 1
def test_previous_next_month():
d = previous_month(date(year=2019, month=2, day=1))
assert d.year == 2019 and d.month == 1 and d.day == 31
d = previous_month(date(year=2019, month=2, day=16))
assert d.year == 2019 and d.month == 1 and d.day == 31
d = previous_month(date(year=2020, month=1, day=1))
assert d.year == 2019 and d.month == 12 and d.day == 31
def test_parse_datestamp():
assert parse_datestamp("2019/1/01") == date(2019, 1, 1)
assert parse_datestamp("2019/01/1") == date(2019, 1, 1)
assert parse_datestamp("2019/1/1") == date(2019, 1, 1)
assert parse_datestamp("2019/11/01") == date(2019, 11, 1)
assert parse_datestamp("2019/11/1") == date(2019, 11, 1)
assert parse_datestamp("2019/11/11") == date(2019, 11, 11)
assert parse_datestamp("2019/11") == date(2019, 11, 1)
assert parse_datestamp("2019") == date(2019, 1, 1)
assert parse_datestamp("2019-1-01") == date(2019, 1, 1)
assert parse_datestamp("2019-01-1") == date(2019, 1, 1)
assert parse_datestamp("2019-1-1") == date(2019, 1, 1)
assert parse_datestamp("2019-11-01") == date(2019, 11, 1)
assert parse_datestamp("2019-11-1") == date(2019, 11, 1)
assert parse_datestamp("2019-11-11") == date(2019, 11, 11)
assert parse_datestamp("2019-11") == date(2019, 11, 1)
assert parse_datestamp("2019") == date(2019, 1, 1)
assert parse_datestamp("2019.1.01") == date(2019, 1, 1)
assert parse_datestamp("2019.01.1") == date(2019, 1, 1)
assert parse_datestamp("2019.1.1") == date(2019, 1, 1)
assert parse_datestamp("2019.11.01") == date(2019, 11, 1)
assert parse_datestamp("2019.11.1") == date(2019, 11, 1)
assert parse_datestamp("2019.11.11") == date(2019, 11, 11)
assert parse_datestamp("2019.11") == date(2019, 11, 1)
assert parse_datestamp("2019") == date(2019, 1, 1)
assert parse_datestamp("2019/11/11", strict=True) == date(2019, 11, 11)
try:
parse_datestamp("2019/11", strict=True)
except ValueError:
assert True
else:
assert False
try:
parse_datestamp("")
except ValueError:
assert True
else:
assert False
try:
parse_datestamp("2019/11/11/11")
except ValueError:
assert True
else:
assert False
try:
parse_datestamp("2019/11-11")
except ValueError:
assert True
else:
assert False
try:
parse_datestamp("2019 / 11/11")
except ValueError:
assert True
else:
assert False
try:
parse_datestamp("200/11/11")
except ValueError:
assert True
else:
assert False
assert parse_datestamp("0200/11/11") == date(200, 11, 11)
def test_parse_period():
assert parse_period("2019/11/11:2020/11/11") == (
date(2019, 11, 11),
date(2020, 11, 11),
)
assert parse_period("2019/11:2020/11") == (date(2019, 11, 1), date(2020, 11, 1))
assert parse_period("2019:2020") == (date(2019, 1, 1), date(2020, 1, 1))
assert parse_period("2019:") == (date(2019, 1, 1), None)
assert parse_period(":2019") == (None, date(2019, 1, 1))
assert parse_period("2019") == (date(2019, 1, 1), date(2020, 1, 1))
assert parse_period("2019/11") == (date(2019, 11, 1), date(2019, 12, 1))
assert parse_period("2019/11/11") == (date(2019, 11, 11), date(2019, 11, 12))
assert parse_period("2019/11/11:2020/11") == (date(2019, 11, 11), date(2020, 11, 1))
assert parse_period("2020/11/11:2019/11/11") == (
date(2019, 11, 11),
date(2020, 11, 11),
)
assert parse_period("2019/11/11:2019/11/11") == (
date(2019, 11, 11),
date(2019, 11, 11),
)
try:
parse_period("")
except ValueError:
assert True
else:
assert False
try:
parse_period("2019/11/11:2020/11/11:2021/11/11")
except ValueError:
assert True
else:
assert False
today = datetime.today().date()
assert parse_period("11") == (date(today.year, 11, 1), date(today.year, 12, 1))
assert parse_period("11:12") == (date(today.year, 11, 1), date(today.year, 12, 1))
assert parse_period("6:1") == (date(today.year, 1, 1), date(today.year, 6, 1))
assert parse_period("q1") == (date(today.year, 1, 1), date(today.year, 4, 1))
assert parse_period("q2") == (date(today.year, 4, 1), date(today.year, 7, 1))
assert parse_period("q3") == (date(today.year, 7, 1), date(today.year, 10, 1))
assert parse_period("q4") == (date(today.year, 10, 1), date(today.year + 1, 1, 1))
assert parse_period("q2:q3") == (date(today.year, 4, 1), date(today.year, 7, 1))
assert parse_period("q2:q4") == (date(today.year, 4, 1), date(today.year, 10, 1))
assert parse_period("q4:q1") == (date(today.year, 1, 1), date(today.year, 10, 1))
tomorrow = today + timedelta(days=1)
yesterday = today + timedelta(days=-1)
assert parse_period("today") == (today, tomorrow)
assert parse_period("tod") == (today, tomorrow)
assert parse_period("Today") == (today, tomorrow)
assert parse_period("tomorrow") == (tomorrow, tomorrow + timedelta(days=1))
assert parse_period("tom") == (tomorrow, tomorrow + timedelta(days=1))
assert parse_period("yesterday") == (yesterday, today)
assert parse_period("yest") == (yesterday, today)
assert parse_period("y") == (yesterday, today)
assert parse_period("today:tomorrow") == (today, tomorrow)
assert parse_period("tomorrow:tomorrow") == (tomorrow, tomorrow)
assert parse_period("yesterday:tomorrow") == (yesterday, tomorrow)
assert parse_period("y:tom") == (yesterday, tomorrow)
def test_parse_period_months():
year = datetime.today().date().year
assert parse_period("november") == (
date(year, 11, 1),
date(year, 12, 1),
)
assert parse_period("November") == (
date(year, 11, 1),
date(year, 12, 1),
)
assert parse_period("nov") == (date(year, 11, 1), date(year, 12, 1))
assert parse_period("no") == (date(year, 11, 1), date(year, 12, 1))
assert parse_period("n") == (date(year, 11, 1), date(year, 12, 1))
assert parse_period("nov:dec") == (date(year, 11, 1), date(year, 12, 1))
def test_parse_month():
for n, name in enumerate(calendar.month_name):
if n == 0:
assert parse_month(name) is None
else:
assert parse_month(name) == n
```
#### File: dledger/test/test_projection.py
```python
from datetime import date
from dledger.journal import Transaction, Amount, Distribution
from dledger.projection import (
GeneratedAmount,
GeneratedDate,
estimated_monthly_schedule,
frequency,
normalize_interval,
next_scheduled_date,
next_linear_dividend,
future_transactions,
estimated_transactions,
conversion_factors,
latest_exchange_rates,
scheduled_transactions,
)
def test_normalize_interval():
assert normalize_interval(1) == 1
assert normalize_interval(2) == 3
assert normalize_interval(3) == 3
assert normalize_interval(4) == 6
assert normalize_interval(5) == 6
assert normalize_interval(6) == 6
assert normalize_interval(7) == 12
assert normalize_interval(8) == 12
assert normalize_interval(9) == 12
assert normalize_interval(10) == 12
assert normalize_interval(11) == 12
assert normalize_interval(12) == 12
def test_annual_frequency():
records = [Transaction(date(2019, 3, 1), "ABC", 1)]
assert frequency(records) == 12
records = [
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2020, 3, 1), "ABC", 1),
Transaction(date(2021, 3, 1), "ABC", 1),
]
assert frequency(records) == 12
records = [
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2020, 3, 1), "ABC", 1),
Transaction(date(2021, 5, 1), "ABC", 1),
Transaction(date(2022, 3, 1), "ABC", 1),
Transaction(date(2023, 5, 1), "ABC", 1),
]
assert frequency(records) == 12
records = [
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2021, 3, 1), "ABC", 1),
]
assert frequency(records) == 12
records = [
Transaction(date(2018, 5, 4), "ABC", 1),
Transaction(date(2018, 5, 4), "ABC", 1),
]
assert frequency(records) == 12
records = [
Transaction(date(2018, 5, 4), "ABC", 1),
Transaction(date(2018, 5, 4), "ABC", 1),
Transaction(date(2019, 5, 4), "ABC", 1),
Transaction(date(2019, 5, 4), "ABC", 1),
]
assert frequency(records) == 12
def test_biannual_frequency():
records = [
Transaction(date(2019, 5, 1), "ABC", 1),
Transaction(date(2019, 11, 1), "ABC", 1),
]
assert frequency(records) == 6
records = [
Transaction(date(2019, 4, 1), "ABC", 1),
Transaction(date(2019, 6, 1), "ABC", 1),
Transaction(date(2020, 4, 1), "ABC", 1),
Transaction(date(2020, 6, 1), "ABC", 1),
]
assert frequency(records) == 6
records = [
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2019, 6, 1), "ABC", 1),
]
# ambiguous; fallback as biannual
assert frequency(records) == 6
records = [
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2019, 12, 1), "ABC", 1),
]
# ambiguous; fallback as biannual
assert frequency(records) == 6
records = [
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2019, 12, 1), "ABC", 1),
Transaction(date(2020, 3, 1), "ABC", 1),
]
# ambiguous; fallback as biannual
assert frequency(records) == 6
records = [
Transaction(date(2019, 3, 5), "ABC", 1),
Transaction(date(2019, 12, 1), "ABC", 1),
Transaction(date(2020, 3, 1), "ABC", 1),
]
# ambiguous; fallback as biannual
assert frequency(records) == 6
records = [
Transaction(date(2019, 4, 1), "ABC", 1),
Transaction(date(2019, 5, 1), "ABC", 1),
]
# ambiguous; fallback as biannual
assert frequency(records) == 6
records = [
Transaction(date(2018, 3, 1), "ABC", 1),
Transaction(date(2018, 8, 1), "ABC", 1),
Transaction(date(2018, 8, 1), "ABC", 1),
]
assert frequency(records) == 6
records = [
Transaction(date(2019, 8, 1), "ABC", 1),
Transaction(date(2019, 8, 1), "ABC", 1),
Transaction(date(2020, 3, 1), "ABC", 1),
]
assert frequency(records) == 6
records = [
Transaction(date(2018, 3, 1), "ABC", 1),
Transaction(date(2018, 8, 1), "ABC", 1),
Transaction(date(2018, 8, 1), "ABC", 1),
Transaction(date(2019, 3, 1), "ABC", 1),
]
# note that while this result is not a biannual frequency, it is actually correct for the
# records given- in an actual scenario where this could occur, the same-date record would
# would have been pruned beforehand, making frequency == 6
assert frequency(records) == 12
def test_quarterly_frequency():
records = [
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2019, 6, 1), "ABC", 1),
Transaction(date(2019, 9, 1), "ABC", 1),
Transaction(date(2019, 12, 1), "ABC", 1),
]
assert frequency(records) == 3
records = [
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2019, 6, 1), "ABC", 1),
Transaction(date(2019, 9, 1), "ABC", 1),
]
assert frequency(records) == 3
records = [
Transaction(date(2019, 1, 1), "ABC", 1),
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2019, 6, 1), "ABC", 1),
Transaction(date(2019, 9, 1), "ABC", 1),
]
assert frequency(records) == 3
records = [
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2019, 9, 1), "ABC", 1),
Transaction(date(2019, 12, 1), "ABC", 1),
]
assert frequency(records) == 3
records = [
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2020, 6, 1), "ABC", 1),
Transaction(date(2021, 12, 1), "ABC", 1),
]
assert frequency(records) == 3
records = [
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2019, 6, 1), "ABC", 1),
Transaction(date(2019, 9, 5), "ABC", 1),
Transaction(date(2019, 12, 1), "ABC", 1),
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2019, 6, 5), "ABC", 1),
]
assert frequency(records) == 3
records = [
Transaction(date(2019, 8, 29), "ABC", 1),
Transaction(date(2019, 10, 31), "ABC", 1),
Transaction(date(2020, 2, 6), "ABC", 1),
]
# assert frequency(records) == 3
# todo: note that this is a false-positive, we expect quarterly here
# requires an additional transaction; see next
assert frequency(records) == 6
records = [
Transaction(date(2019, 5, 9), "ABC", 1), # additional
Transaction(date(2019, 8, 29), "ABC", 1),
Transaction(date(2019, 10, 31), "ABC", 1),
Transaction(date(2020, 2, 6), "ABC", 1),
]
assert frequency(records) == 3
records = [
Transaction(date(2019, 2, 7), "ABC", 1),
Transaction(date(2019, 5, 9), "ABC", 1),
Transaction(date(2019, 8, 29), "ABC", 1),
Transaction(date(2019, 10, 31), "ABC", 1),
Transaction(date(2020, 2, 6), "ABC", 1),
]
assert frequency(records) == 3
records = [
Transaction(date(2019, 9, 5), "ABC", 1),
Transaction(date(2019, 12, 5), "ABC", 1),
Transaction(date(2020, 2, 27), "ABC", 1),
]
# assert frequency(records) == 3
# todo: note that this would correctly result in quarterly frequency if
# the last record was dated in march instead of february
# but because it isnt, there's ambiguity in timespan
assert frequency(records) == 6
records = [
Transaction(date(2019, 9, 16), "ABC", 1),
Transaction(date(2019, 11, 18), "ABC", 1),
Transaction(date(2020, 2, 24), "ABC", 1),
Transaction(date(2020, 5, 18), "ABC", 1),
# note, one month earlier than last year
Transaction(date(2020, 8, 17), "ABC", 1),
]
assert frequency(records) == 3
def test_monthly_frequency():
records = [
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2019, 4, 1), "ABC", 1),
Transaction(date(2019, 5, 1), "ABC", 1),
Transaction(date(2019, 6, 1), "ABC", 1),
]
assert frequency(records) == 1
records = [
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2019, 4, 1), "ABC", 1),
Transaction(date(2019, 5, 1), "ABC", 1),
]
assert frequency(records) == 1
def test_irregular_frequency():
records = [
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2019, 4, 1), "ABC", 1),
Transaction(date(2019, 6, 1), "ABC", 1),
Transaction(date(2019, 8, 1), "ABC", 1),
Transaction(date(2019, 9, 1), "ABC", 1),
]
# todo: this is a bad case; can this really be considered quarterly?
assert frequency(records) == 3
def test_estimate_monthly_schedule():
records = [
Transaction(date(2019, 1, 1), "ABC", 1),
Transaction(date(2019, 2, 1), "ABC", 1),
Transaction(date(2019, 3, 1), "ABC", 1),
]
schedule = estimated_monthly_schedule(records, interval=1)
assert schedule == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
records = [
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2019, 6, 1), "ABC", 1),
Transaction(date(2019, 9, 1), "ABC", 1),
Transaction(date(2019, 12, 1), "ABC", 1),
]
schedule = estimated_monthly_schedule(records, interval=3)
assert schedule == [3, 6, 9, 12]
records = [Transaction(date(2019, 3, 1), "ABC", 1)]
schedule = estimated_monthly_schedule(records, interval=3)
assert schedule == [3, 6, 9, 12]
records = [
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2019, 9, 1), "ABC", 1),
]
schedule = estimated_monthly_schedule(records, interval=3)
assert schedule == [3, 6, 9, 12]
records = [
Transaction(date(2019, 3, 1), "ABC", 1),
# note the different ticker
Transaction(date(2019, 9, 1), "ABCD", 1),
]
schedule = estimated_monthly_schedule(records, interval=3)
assert schedule == [3, 6, 9, 12]
records = [
Transaction(date(2019, 3, 1), "ABC", 1),
Transaction(date(2019, 4, 1), "ABC", 1),
Transaction(date(2019, 6, 1), "ABC", 1),
Transaction(date(2019, 8, 1), "ABC", 1),
Transaction(date(2019, 9, 1), "ABC", 1),
]
# note that this is an incorrect interval; it is irregular
schedule = estimated_monthly_schedule(records, interval=3)
# but it works out anyway; the schedule just isn't padded out, because
# there's essentially no gaps if this was a quarterly distribution
assert schedule == [3, 4, 6, 8, 9]
def test_next_scheduled_date():
d = next_scheduled_date(date(2019, 3, 1), months=[3, 6, 9, 12])
assert d.year == 2019 and d.month == 6 and d.day == 1
d = next_scheduled_date(date(2019, 3, 12), months=[3, 6, 9, 12])
assert d.year == 2019 and d.month == 6 and d.day == 1
d = next_scheduled_date(date(2019, 12, 1), months=[3, 6, 9, 12])
assert d.year == 2020 and d.month == 3 and d.day == 1
def test_next_linear_dividend():
records = [
Transaction(date(2019, 3, 1), "ABC", 1, amount=Amount(100), dividend=Amount(1))
]
dividend = next_linear_dividend(records)
assert dividend == GeneratedAmount(1)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, amount=Amount(100), dividend=Amount(1)),
Transaction(date(2019, 6, 1), "ABC", 1, amount=Amount(100), dividend=Amount(2)),
]
dividend = next_linear_dividend(records)
assert dividend == GeneratedAmount(2)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, amount=Amount(100), dividend=Amount(1)),
Transaction(date(2019, 6, 1), "ABC", 1, amount=Amount(100), dividend=Amount(2)),
Transaction(date(2019, 9, 1), "ABC", 1, amount=Amount(100), dividend=Amount(2)),
]
dividend = next_linear_dividend(records)
assert dividend == GeneratedAmount(2)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, amount=Amount(100), dividend=Amount(1)),
Transaction(date(2019, 6, 1), "ABC", 1, amount=Amount(100), dividend=Amount(2)),
Transaction(date(2019, 9, 1), "ABC", 1, amount=Amount(100), dividend=Amount(3)),
]
dividend = next_linear_dividend(records)
assert dividend == GeneratedAmount(3)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, amount=Amount(100), dividend=Amount(3)),
Transaction(date(2019, 6, 1), "ABC", 1, amount=Amount(100), dividend=Amount(2)),
Transaction(date(2019, 9, 1), "ABC", 1, amount=Amount(100), dividend=Amount(1)),
]
dividend = next_linear_dividend(records)
assert dividend == GeneratedAmount(1)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, amount=Amount(100), dividend=Amount(2)),
Transaction(date(2019, 6, 1), "ABC", 1, amount=Amount(100), dividend=Amount(1)),
]
dividend = next_linear_dividend(records)
assert dividend == GeneratedAmount(1)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, amount=Amount(100), dividend=Amount(1)),
Transaction(date(2019, 6, 1), "ABC", 1, amount=Amount(100), dividend=Amount(2)),
Transaction(
date(2019, 9, 1), "ABC", 1, amount=Amount(100), dividend=Amount(1.5)
),
]
dividend = next_linear_dividend(records)
assert dividend is None
records = [
Transaction(date(2019, 3, 1), "ABC", 1, amount=Amount(100), dividend=Amount(1)),
Transaction(date(2019, 6, 1), "ABC", 1, amount=Amount(100), dividend=Amount(2)),
Transaction(
date(2019, 6, 15),
"ABC",
1,
amount=Amount(100),
dividend=Amount(1.5),
kind=Distribution.SPECIAL,
),
Transaction(date(2019, 9, 1), "ABC", 1, amount=Amount(100), dividend=Amount(3)),
]
dividend = next_linear_dividend(records)
assert dividend == GeneratedAmount(3)
records = [
Transaction(date(2019, 6, 1), "ABC", 1, amount=Amount(100), dividend=Amount(2)),
Transaction(
date(2019, 6, 15),
"ABC",
1,
amount=Amount(100),
dividend=Amount(1.5),
kind=Distribution.INTERIM,
),
Transaction(date(2020, 6, 1), "ABC", 1, amount=Amount(100), dividend=Amount(3)),
]
dividend = next_linear_dividend(records)
assert dividend == GeneratedAmount(3)
def test_future_transactions():
records = [Transaction(date(2019, 3, 1), "ABC", 1)]
futures = future_transactions(records)
assert len(futures) == 0
records = [Transaction(date(2019, 3, 1), "ABC", 1, Amount(100))]
futures = future_transactions(records)
assert len(futures) == 1
assert futures[0].entry_date == GeneratedDate(2020, 3, 15)
records = [Transaction(date(2019, 3, 16), "ABC", 1, Amount(100))]
futures = future_transactions(records)
assert len(futures) == 1
assert futures[0].entry_date == GeneratedDate(2020, 3, 31)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2020, 12, 16), "ABC", 1, Amount(100)),
]
futures = future_transactions(records)
assert len(futures) == 2
assert futures[0].entry_date == GeneratedDate(2020, 3, 15)
assert futures[1].entry_date == GeneratedDate(2021, 12, 31)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100, symbol="$")),
Transaction(date(2019, 5, 1), "ABC", 1, Amount(100, symbol="$")),
Transaction(date(2019, 7, 1), "ABC", 1, Amount(100, symbol="kr")),
]
futures = future_transactions(records)
# only transactions that match in currency will be projected
# because of that we only expect 1 in this case
assert len(futures) == 1
assert futures[0].entry_date == GeneratedDate(2020, 7, 15)
def test_estimated_transactions():
records = [Transaction(date(2019, 3, 1), "ABC", 1)]
estimations = estimated_transactions(records)
assert len(estimations) == 0
records = [Transaction(date(2019, 3, 1), "ABC", 1, Amount(100))]
estimations = estimated_transactions(records)
assert len(estimations) == 1
assert estimations[0].entry_date == GeneratedDate(2020, 3, 15)
records = [Transaction(date(2019, 3, 16), "ABC", 1, Amount(100))]
estimations = estimated_transactions(records)
assert len(estimations) == 1
assert estimations[0].entry_date == GeneratedDate(2020, 3, 31)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2020, 12, 16), "ABC", 1, Amount(100)),
]
estimations = estimated_transactions(records)
assert len(estimations) == 2
assert estimations[0].entry_date == GeneratedDate(2021, 3, 31)
assert estimations[1].entry_date == GeneratedDate(2021, 12, 31)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100, symbol="$")),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100, symbol="$")),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100, symbol="kr")),
]
estimations = estimated_transactions(records)
# varying currencies should not have an effect on the resulting number of projections
# (e.g. it's not limiting like future_transactions())
# it does affect the estimated amount, however, as that will only ever be based upon
# the latest transaction (and all previous transactions of matching symbols)
assert len(estimations) == 4
assert estimations[0].entry_date == GeneratedDate(2019, 12, 15)
assert estimations[1].entry_date == GeneratedDate(2020, 3, 15)
assert estimations[2].entry_date == GeneratedDate(2020, 6, 15)
assert estimations[3].entry_date == GeneratedDate(2020, 9, 15)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
]
estimations = estimated_transactions(records)
assert len(estimations) == 4
assert estimations[0].entry_date == GeneratedDate(2019, 12, 15)
assert estimations[0].amount.value == 100
assert estimations[1].entry_date == GeneratedDate(2020, 3, 15)
assert estimations[1].amount.value == 100
assert estimations[2].entry_date == GeneratedDate(2020, 6, 15)
assert estimations[2].amount.value == 100
assert estimations[3].entry_date == GeneratedDate(2020, 9, 15)
assert estimations[3].amount.value == 100
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(30)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(40)),
Transaction(date(2019, 9, 1), "ABC", 2, Amount(100)),
]
estimations = estimated_transactions(records)
assert len(estimations) == 4
assert estimations[0].entry_date == GeneratedDate(2019, 12, 15)
assert estimations[0].amount.value == 80 # mean of highest / lowest
assert estimations[1].entry_date == GeneratedDate(2020, 3, 15)
assert estimations[1].amount.value == 90
assert estimations[2].entry_date == GeneratedDate(2020, 6, 15)
assert estimations[2].amount.value == 100
assert estimations[3].entry_date == GeneratedDate(2020, 9, 15)
assert estimations[3].amount.value == 100
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(40, symbol="$")),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(200, symbol="kr")),
Transaction(date(2019, 9, 1), "ABC", 2, Amount(600, symbol="kr")),
]
estimations = estimated_transactions(records)
assert len(estimations) == 4
assert estimations[0].amount.value == 500 # mean of highest aps / lowest aps
assert estimations[1].entry_date == GeneratedDate(2020, 3, 15)
assert estimations[1].amount.value == 500
assert estimations[2].entry_date == GeneratedDate(2020, 6, 15)
assert estimations[2].amount.value == 600
assert estimations[3].entry_date == GeneratedDate(2020, 9, 15)
assert estimations[3].amount.value == 600
def test_scheduled_transactions():
records = [
Transaction(date(2018, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 9, 1), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2019, 10, 1))
assert len(scheduled) == 0
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 8, 1))
assert len(scheduled) == 1
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 9, 1))
assert len(scheduled) == 1
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 2), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 9, 1))
assert len(scheduled) == 1
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)), # dated in future
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)), # dated in future
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)), # dated in future
]
# here, the trailing date range will be from 2018/09/01-2019/09/01
# which should result in only 1 forecast within the forward 12month
# range from the `since` date at 2019/01/01
scheduled = scheduled_transactions(records, since=date(2019, 1, 1))
assert len(scheduled) == 1
assert scheduled[0].entry_date == GeneratedDate(2019, 12, 15)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
]
# the PEP case where payouts are [3, 6, 9, 1], but until a january transaction
# has been recorded, january will be forecasted as a december payout
scheduled = scheduled_transactions(records, since=date(2019, 10, 1))
assert len(scheduled) == 4
assert scheduled[0].entry_date == GeneratedDate(2019, 12, 15)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
# but once a january transaction is recorded, forecasts should be on track
Transaction(date(2020, 1, 1), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 1, 15))
assert len(scheduled) == 4
assert scheduled[0].entry_date == GeneratedDate(2020, 3, 15)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 12, 1), "ABC", 1, Amount(100)), # dated in the future
]
scheduled = scheduled_transactions(records, since=date(2019, 10, 1))
assert len(scheduled) == 3
assert scheduled[0].entry_date == GeneratedDate(
2020, 3, 15
) # because we have one prelim record for dec
assert scheduled[1].entry_date == GeneratedDate(2020, 6, 15)
assert scheduled[2].entry_date == GeneratedDate(2020, 9, 15)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 12, 1), "ABC", 1, Amount(100)), # dated in the future
Transaction(date(2020, 3, 1), "ABC", 1, Amount(100)), # dated in the future
]
scheduled = scheduled_transactions(records, since=date(2019, 10, 1))
assert len(scheduled) == 2
assert scheduled[0].entry_date == GeneratedDate(2020, 6, 15)
assert scheduled[1].entry_date == GeneratedDate(2020, 9, 15)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 11, 1), "ABC", 1, Amount(100)), # dated in the future
]
scheduled = scheduled_transactions(records, since=date(2019, 10, 1))
assert len(scheduled) == 3
assert scheduled[0].entry_date == GeneratedDate(2020, 3, 15)
assert scheduled[1].entry_date == GeneratedDate(2020, 6, 15)
assert scheduled[2].entry_date == GeneratedDate(2020, 9, 15)
records = [
Transaction(date(2019, 9, 16), "ABC", 1, Amount(100)),
Transaction(date(2019, 11, 18), "ABC", 1, Amount(100)),
Transaction(date(2020, 2, 24), "ABC", 1, Amount(100)),
Transaction(date(2020, 5, 18), "ABC", 1, Amount(100)),
# note, one month earlier than last year
Transaction(date(2020, 8, 17), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 8, 18))
# todo: issue here is that 2019/9 is projected to 2020/9, but we can clearly tell,
# based on month interval not matching expected frequency (i.e. 3), that we don't
# actually want/expect this projection - it should just be weeded out
assert len(scheduled) == 4
assert scheduled[0].entry_date == GeneratedDate(2020, 11, 30)
assert scheduled[1].entry_date == GeneratedDate(2021, 2, 28)
assert scheduled[2].entry_date == GeneratedDate(2021, 5, 31)
assert scheduled[3].entry_date == GeneratedDate(2021, 8, 31)
records = [
Transaction(date(2020, 3, 13), "ABC", 1, Amount(100)),
Transaction(date(2020, 6, 15), "ABC", 1, Amount(100)),
# preliminary record; e.g. in future, results in projection more than 1 year later
Transaction(date(2020, 9, 15), "ABC", 1, GeneratedAmount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 9, 2))
assert len(scheduled) == 4
assert scheduled[0].entry_date == GeneratedDate(2020, 12, 15)
assert scheduled[1].entry_date == GeneratedDate(2021, 3, 15)
assert scheduled[2].entry_date == GeneratedDate(2021, 6, 15)
# note that this one is included though more than 365 days later; see earliest/cutoff in scheduled_transactions
assert scheduled[3].entry_date == GeneratedDate(2021, 9, 15)
def test_scheduled_grace_period():
records = [
Transaction(date(2018, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 9, 1), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2019, 9, 16))
assert len(scheduled) == 1
records = [
Transaction(date(2018, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 9, 1), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2019, 9, 30))
assert len(scheduled) == 1
records = [
Transaction(date(2018, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 9, 1), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2019, 10, 1))
assert len(scheduled) == 0
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100))
# a quarterly distribution skipped for december
# this should not prevent forecasts for previous distributions;
# we can't know whether this means distribution stopped completely, or is just a change in frequency;
# require user input
]
scheduled = scheduled_transactions(records, since=date(2020, 1, 20))
assert len(scheduled) == 3
for n in range(0, 14):
# going back 13 days; spanning 2021/03/18 - 2021/04/15; a 28 day period
records = [
Transaction(date(2020, 4, 7), "ABC", 1, Amount(1)),
Transaction(date(2021, 3, 31 - n), "ABC", 1, Amount(1)),
]
scheduled = scheduled_transactions(records, since=date(2021, 3, 31))
assert len(scheduled) == 1
records = [
Transaction(date(2020, 4, 7), "ABC", 1, Amount(1)),
# note that this date is the first date far enough back that
# it is not considered a fit for the april forecast
# i.e. if the date was one day later (2021/03/18), it would be
# considered a fit, and the forecast would be discarded
Transaction(date(2021, 3, 17), "ABC", 1, Amount(1)),
]
scheduled = scheduled_transactions(records, since=date(2021, 3, 31))
assert len(scheduled) == 2
def test_scheduled_transactions_closed_position():
records = [
Transaction(date(2019, 1, 20), "ABC", 1, Amount(100)),
Transaction(date(2020, 1, 19), "ABC", 0),
]
scheduled = scheduled_transactions(records, since=date(2020, 1, 20))
assert len(scheduled) == 0
records = [
Transaction(date(2019, 1, 20), "ABC", 1, Amount(100)),
Transaction(date(2020, 1, 19), "ABC", 0),
Transaction(date(2020, 2, 1), "ABC", 1),
]
scheduled = scheduled_transactions(records, since=date(2020, 1, 20))
assert len(scheduled) == 0
# see example/strategic.journal
records = [
Transaction(date(2019, 1, 20), "ABC", 1, Amount(100)),
Transaction(date(2019, 4, 20), "ABC", 1, Amount(100)),
Transaction(date(2019, 7, 20), "ABC", 1, Amount(100)),
Transaction(date(2019, 10, 20), "ABC", 1, Amount(100)),
Transaction(date(2020, 1, 19), "ABC", 0),
Transaction(date(2020, 2, 1), "ABC", 1),
]
scheduled = scheduled_transactions(records, since=date(2020, 2, 20))
assert len(scheduled) == 4
assert scheduled[0].entry_date == GeneratedDate(2020, 4, 30)
assert scheduled[0].position == 1
assert scheduled[0].amount == GeneratedAmount(100)
# ...
assert scheduled[3].entry_date == GeneratedDate(2021, 1, 31)
assert scheduled[3].position == 1
assert scheduled[3].amount == GeneratedAmount(100)
records = [
Transaction(date(2018, 8, 15), "ABC", 1, Amount(100)),
Transaction(date(2018, 11, 14), "ABC", 1, Amount(100)),
Transaction(date(2019, 2, 20), "ABC", 1, Amount(100)),
Transaction(date(2019, 5, 15), "ABC", 1, Amount(100)),
Transaction(date(2019, 8, 14), "ABC", 1, Amount(100)),
Transaction(date(2019, 11, 20), "ABC", 1, Amount(100)),
# simulate preliminary record, using --by-payout-date (entry_date=ex_date)
Transaction(
date(2020, 3, 12), "ABC", 1, GeneratedAmount(100), ex_date=date(2020, 2, 19)
),
Transaction(date(2020, 2, 28), "ABC", 0),
]
scheduled = scheduled_transactions(records, since=date(2020, 3, 8))
assert len(scheduled) == 0
# for this scenario, assume a user records by payout date, but makes sure to put in
# ex-date when necessary to maintain correct forecasting
records = [
# past dividend transaction; assume semi-annual distribution for scenario
Transaction(date(2018, 10, 5), "ABC", 100, Amount(100)),
# closing position right after passed ex-date
Transaction(date(2019, 1, 16), "ABC", 0),
# opening lower position before reaching payout date
Transaction(date(2019, 1, 26), "ABC", 50),
# payout date; note ex-date set
Transaction(
date(2019, 2, 5), "ABC", 100, Amount(100), ex_date=date(2019, 1, 15)
),
]
scheduled = scheduled_transactions(records, since=date(2019, 2, 16))
assert len(scheduled) == 2
assert scheduled[0].entry_date == date(2019, 10, 15)
assert scheduled[1].entry_date == date(2020, 2, 15)
assert scheduled[0].position == 50
assert scheduled[1].position == 50
# same exact scenario, except in this case, user forgot to set ex-date
from dataclasses import replace
records.append(replace(records[3], ex_date=None))
records.pop(3)
scheduled = scheduled_transactions(records, since=date(2019, 2, 16))
assert len(scheduled) == 2
assert scheduled[0].entry_date == date(2019, 10, 15)
assert scheduled[1].entry_date == date(2020, 2, 15)
assert scheduled[0].position == 100
assert scheduled[1].position == 100
def test_scheduled_transactions_sampling():
records = [
Transaction(date(2019, 3, 10), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 12, 1), "ABC", 1, Amount(100)),
# note 5 days earlier than in the past; this leads to an additional projection
# since there's not more than 12m between; e.g. records sampled will range from:
# 2019/03/05 (exclusive) - 2020/03/05 (inclusive)
# e.g. 2019/03/10 => 2020/03/15, but this one will be discarded (as it has been realized)
Transaction(date(2020, 3, 5), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 3, 12))
assert len(scheduled) == 4
assert scheduled[0].entry_date == date(2020, 6, 15)
records = [
Transaction(date(2019, 3, 5), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 12, 1), "ABC", 1, Amount(100)),
# if it was 5 days later, however, then it would be more than 12m and prove no issue
# e.g. records sampled will range from:
# 2019/03/10 (exclusive) - 2020/03/10 (inclusive)
Transaction(date(2020, 3, 10), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 3, 15))
assert len(scheduled) == 4
assert scheduled[0].entry_date == GeneratedDate(2020, 6, 15)
assert scheduled[3].entry_date == GeneratedDate(2021, 3, 15)
records = [
Transaction(date(2019, 3, 10), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 12, 1), "ABC", 1, Amount(100)),
Transaction(date(2020, 3, 5), "ABC", 1, Amount(100)),
]
# no issue whether earliest record was dated later,
# because the earliest record is now out of the 12m period entirely
scheduled = scheduled_transactions(records, since=date(2020, 4, 1))
assert len(scheduled) == 4
assert scheduled[0].entry_date == date(2020, 6, 15)
records = [
Transaction(date(2019, 3, 10), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 12, 1), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 3, 12))
assert len(scheduled) == 4
assert scheduled[0].entry_date == GeneratedDate(2020, 3, 15)
records = [
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 12, 1), "ABC", 1, Amount(100)),
# note february instead of march; i.e. less than 12m between
Transaction(date(2020, 2, 28), "ABC", 1, Amount(100)), # dated today
]
scheduled = scheduled_transactions(records, since=date(2020, 2, 28))
assert len(scheduled) == 4
assert scheduled[0].entry_date == date(2020, 6, 15)
records = [
Transaction(date(2018, 1, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 2, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 4, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 5, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 7, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 8, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 9, 1), "ABC", 1, Amount(110)),
Transaction(date(2018, 10, 1), "ABC", 1, Amount(110)),
Transaction(date(2018, 11, 1), "ABC", 1, Amount(110)),
Transaction(date(2018, 12, 1), "ABC", 1, Amount(110)),
]
scheduled = scheduled_transactions(records, since=date(2019, 1, 1))
assert len(scheduled) == 12
assert scheduled[0].entry_date == GeneratedDate(2019, 1, 15)
assert scheduled[0].amount == GeneratedAmount(
100
) # to verify that this is not projected by averaging amounts
assert scheduled[1].entry_date == GeneratedDate(2019, 2, 15)
assert scheduled[1].amount == GeneratedAmount(100)
assert scheduled[2].entry_date == GeneratedDate(2019, 3, 15)
# ...
assert scheduled[11].entry_date == GeneratedDate(2019, 12, 15)
assert scheduled[11].amount == GeneratedAmount(110)
records = [
Transaction(date(2018, 1, 31), "ABC", 1, Amount(100)),
Transaction(date(2018, 2, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 4, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 5, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 6, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 7, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 8, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 9, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 10, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 11, 1), "ABC", 1, Amount(100)),
Transaction(date(2018, 12, 1), "ABC", 1, Amount(100)),
Transaction(date(2019, 1, 1), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2019, 1, 3))
assert len(scheduled) == 12
assert scheduled[0].entry_date == GeneratedDate(2019, 2, 15)
assert scheduled[11].entry_date == GeneratedDate(2020, 1, 15)
records = [
Transaction(date(2019, 4, 30), "ABC", 1, Amount(100)),
Transaction(date(2019, 5, 31), "ABC", 1, Amount(100)),
Transaction(date(2019, 6, 28), "ABC", 1, Amount(100)),
Transaction(date(2019, 7, 31), "ABC", 1, Amount(100)),
Transaction(date(2019, 8, 30), "ABC", 1, Amount(100)),
Transaction(date(2019, 9, 30), "ABC", 1, Amount(100)),
Transaction(date(2019, 10, 31), "ABC", 1, Amount(100)),
Transaction(date(2019, 11, 28), "ABC", 1, Amount(100)),
Transaction(date(2019, 12, 31), "ABC", 1, Amount(100)),
Transaction(date(2020, 1, 31), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 2, 11))
assert len(scheduled) == 12
assert scheduled[0].entry_date == GeneratedDate(2020, 2, 29)
records = [
# this case simulates --by-ex-date, but main tracking date is payout date
# running a report here will (maybe confusingly so) only show 11 forecasted transactions- not 12
# this is correct, however, as the grace period of >15 days has passed, and
# the logic dictates that this transaction is then considered out of schedule
# (but ticker still inferred as being a monthly payer, thus the 11 transactions)
Transaction(date(2019, 12, 31), "ABC", 1, Amount(1)),
Transaction(date(2020, 1, 31), "ABC", 1, Amount(1)),
Transaction(date(2020, 2, 28), "ABC", 1, Amount(1)),
Transaction(date(2020, 3, 31), "ABC", 1, Amount(1)),
Transaction(date(2020, 4, 30), "ABC", 1, Amount(1)),
Transaction(date(2020, 5, 29), "ABC", 1, Amount(1)),
Transaction(date(2020, 6, 30), "ABC", 1, Amount(1)),
Transaction(date(2020, 7, 31), "ABC", 1, Amount(1)),
Transaction(date(2020, 8, 31), "ABC", 1, Amount(1)),
Transaction(date(2020, 9, 30), "ABC", 1, Amount(1)),
Transaction(date(2020, 10, 30), "ABC", 1, Amount(1)),
Transaction(date(2020, 11, 30), "ABC", 1, Amount(1)),
# the record at 2020/12/31 has not been paid out yet and thus not recorded yet
# running --by-payout-date will still show 12 forecasts, because in this schedule
# the transaction is still set in the future (e.g. 2021/01/31)
]
scheduled = scheduled_transactions(records, since=date(2021, 1, 18))
assert len(scheduled) == 11
# first record is actually a forecast of the 2020/01/31 record; e.g. a year back
assert scheduled[0].entry_date == GeneratedDate(2021, 1, 31)
def test_scheduled_transactions_in_leap_year():
records = [Transaction(date(2019, 2, 28), "ABC", 1, Amount(100))]
scheduled = scheduled_transactions(records, since=date(2020, 1, 1))
assert len(scheduled) == 1
assert scheduled[0].entry_date == GeneratedDate(2020, 2, 29)
records = [
Transaction(date(2019, 2, 28), "ABC", 1, Amount(100)),
Transaction(date(2019, 3, 25), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 1, 1))
assert len(scheduled) == 2
assert scheduled[0].entry_date == GeneratedDate(2020, 2, 29)
assert scheduled[1].entry_date == GeneratedDate(2020, 3, 31)
records = [
Transaction(date(2019, 2, 28), "ABC", 1, Amount(100)),
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2020, 2, 29), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 3, 1))
# assert len(scheduled) == 2
# assert scheduled[0].date == date(2020, 3, 15)
# assert scheduled[1].date == date(2021, 2, 28)
# todo: note that this should be considered a false-positive, as we may not expect
# 2020/03/15 to be discarded, but in other cases, we do!
assert len(scheduled) == 1
assert scheduled[0].entry_date == GeneratedDate(2021, 2, 28)
records = [
Transaction(date(2019, 2, 28), "ABC", 1, Amount(100)),
Transaction(date(2019, 3, 1), "ABC", 1, Amount(100)),
Transaction(date(2020, 2, 15), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 3, 1))
assert len(scheduled) == 2
assert scheduled[0].entry_date == GeneratedDate(2020, 3, 15)
assert scheduled[1].entry_date == GeneratedDate(2021, 2, 15)
records = [
Transaction(date(2019, 2, 28), "ABC", 1, Amount(100)),
Transaction(date(2020, 2, 1), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 2, 15))
assert len(scheduled) == 1
assert scheduled[0].entry_date == GeneratedDate(2021, 2, 15)
records = [
Transaction(date(2019, 2, 1), "ABC", 1, Amount(100)),
Transaction(date(2020, 2, 29), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2020, 3, 1))
assert len(scheduled) == 1
assert scheduled[0].entry_date == GeneratedDate(2021, 2, 28)
records = [
Transaction(date(2020, 2, 29), "ABC", 1, Amount(100)),
Transaction(date(2021, 2, 1), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2021, 2, 15))
assert len(scheduled) == 1
assert scheduled[0].entry_date == GeneratedDate(2022, 2, 15)
records = [
Transaction(date(2020, 2, 1), "ABC", 1, Amount(100)),
Transaction(date(2021, 2, 28), "ABC", 1, Amount(100)),
]
scheduled = scheduled_transactions(records, since=date(2021, 3, 1))
assert len(scheduled) == 1
assert scheduled[0].entry_date == GeneratedDate(2022, 2, 28)
def test_conversion_factors():
records = [
Transaction(
date(2019, 3, 1),
"ABC",
100,
amount=Amount(100, symbol="kr"),
dividend=Amount(1, symbol="$"),
)
]
factors = conversion_factors(records)
rates = latest_exchange_rates(records)
assert len(factors) == 1
assert factors[("$", "kr")] == [1]
assert rates[("$", "kr")] == 1
records = [
Transaction(
date(2019, 3, 1),
"ABC",
100,
amount=Amount(675, symbol="kr"),
dividend=Amount(1, symbol="$"),
)
]
factors = conversion_factors(records)
rates = latest_exchange_rates(records)
assert len(factors) == 1
assert factors[("$", "kr")] == [6.75]
assert rates[("$", "kr")] == 6.75
records = [
Transaction(
date(2019, 3, 1),
"ABC",
100,
amount=Amount(10, symbol="kr"),
dividend=Amount(1, symbol="$"),
)
]
factors = conversion_factors(records)
rates = latest_exchange_rates(records)
assert len(factors) == 1
assert factors[("$", "kr")] == [0.1]
assert rates[("$", "kr")] == 0.1
records = [
Transaction(
date(2019, 3, 1),
"ABC",
100,
amount=Amount(1, symbol="kr"),
dividend=Amount(10, symbol="$"),
)
]
factors = conversion_factors(records)
rates = latest_exchange_rates(records)
assert len(factors) == 1
assert factors[("$", "kr")] == [0.001]
assert rates[("$", "kr")] == 0.001
records = [
Transaction(
date(2019, 3, 1),
"ABC",
100,
amount=Amount(100, symbol="kr"),
dividend=Amount(1, symbol="$"),
),
Transaction(
date(2019, 6, 1),
"ABC",
100,
amount=Amount(110, symbol="kr"),
dividend=Amount(1, symbol="$"),
),
Transaction(
date(2019, 9, 1),
"ABC",
100,
amount=Amount(105, symbol="kr"),
dividend=Amount(1, symbol="$"),
),
]
factors = conversion_factors(records)
rates = latest_exchange_rates(records)
assert len(factors) == 1
assert factors[("$", "kr")] == [1.05]
assert rates[("$", "kr")] == 1.05
records = [
Transaction(
date(2019, 3, 1),
"ABC",
100,
amount=Amount(100, symbol="kr"),
dividend=Amount(1, symbol="$"),
),
Transaction(
date(2019, 3, 1),
"XYZ",
100,
amount=Amount(100, symbol="kr"),
dividend=Amount(1, symbol="$"),
),
]
factors = conversion_factors(records)
rates = latest_exchange_rates(records)
assert len(factors) == 1
assert factors[("$", "kr")] == [1]
assert rates[("$", "kr")] == 1
records = [
Transaction(
date(2019, 3, 1),
"ABC",
100,
amount=Amount(100, symbol="kr"),
dividend=Amount(1, symbol="$"),
),
Transaction(
date(2019, 3, 1),
"XYZ",
100,
amount=Amount(110, symbol="kr"),
dividend=Amount(1, symbol="$"),
),
]
factors = conversion_factors(records)
rates = latest_exchange_rates(records)
assert len(factors) == 1
assert factors[("$", "kr")] == [1, 1.1]
assert rates[("$", "kr")] == 1.1
records = [
Transaction(
date(2019, 3, 1),
"ABC",
100,
amount=Amount(100, symbol="kr"),
dividend=Amount(1, symbol="$"),
),
Transaction(
date(2019, 3, 1),
"XYZ",
100,
amount=Amount(110, symbol="kr"),
dividend=Amount(1, symbol="$"),
),
Transaction(
date(2019, 3, 1),
"WWW",
100,
amount=Amount(110, symbol="kr"),
dividend=Amount(1, symbol="$"),
),
]
factors = conversion_factors(records)
rates = latest_exchange_rates(records)
assert len(factors) == 1
assert factors[("$", "kr")] == [1, 1.1]
assert rates[("$", "kr")] == 1.1
records = [
Transaction(
date(2019, 2, 28),
"ABC",
100,
amount=Amount(100, symbol="kr"),
dividend=Amount(1, symbol="$"),
payout_date=date(2019, 3, 1),
),
Transaction(
date(2019, 3, 1),
"XYZ",
100,
amount=Amount(110, symbol="kr"),
dividend=Amount(1, symbol="$"),
),
]
factors = conversion_factors(records)
rates = latest_exchange_rates(records)
assert len(factors) == 1
assert factors[("$", "kr")] == [1, 1.1]
assert rates[("$", "kr")] == 1.1
records = [
Transaction(
date(2019, 2, 26),
"ABC",
100,
amount=Amount(100, symbol="kr"),
dividend=Amount(1, symbol="$"),
payout_date=date(2019, 2, 28),
),
Transaction(
date(2019, 3, 1),
"XYZ",
100,
amount=Amount(110, symbol="kr"),
dividend=Amount(1, symbol="$"),
),
]
factors = conversion_factors(records)
rates = latest_exchange_rates(records)
assert len(factors) == 1
assert factors[("$", "kr")] == [1.1]
assert rates[("$", "kr")] == 1.1
records = [
Transaction(
date(2019, 3, 1),
"ABC",
100,
amount=Amount(100, symbol="kr"),
dividend=Amount(1, symbol="$"),
ex_date=date(2019, 2, 28),
),
Transaction(
date(2019, 3, 1),
"XYZ",
100,
amount=Amount(110, symbol="kr"),
dividend=Amount(1, symbol="$"),
),
]
factors = conversion_factors(records)
rates = latest_exchange_rates(records)
assert len(factors) == 1
assert factors[("$", "kr")] == [1, 1.1]
assert rates[("$", "kr")] == 1.1
def test_secondary_date_monthly():
records = [
Transaction(date(2019, 4, 30), "O", 1, amount=Amount(1), dividend=Amount(1)),
Transaction(date(2019, 5, 31), "O", 1, amount=Amount(1), dividend=Amount(1)),
Transaction(date(2019, 6, 28), "O", 1, amount=Amount(1), dividend=Amount(1)),
Transaction(date(2019, 7, 31), "O", 1, amount=Amount(1), dividend=Amount(1)),
Transaction(date(2019, 8, 30), "O", 1, amount=Amount(1), dividend=Amount(1)),
Transaction(date(2019, 9, 30), "O", 1, amount=Amount(1), dividend=Amount(1)),
Transaction(date(2019, 10, 31), "O", 1, amount=Amount(1), dividend=Amount(1)),
Transaction(date(2019, 11, 28), "O", 1, amount=Amount(1), dividend=Amount(1)),
Transaction(
date(2019, 12, 31),
"O",
1,
amount=Amount(1),
dividend=Amount(1),
payout_date=date(2020, 1, 15),
),
Transaction(
date(2020, 1, 31),
"O",
1,
amount=Amount(1),
dividend=Amount(1),
payout_date=date(2020, 2, 14),
),
]
# simulate --by-payout-date
from dataclasses import replace
records = [
r
if r.payout_date is None
else replace(r, entry_date=r.payout_date, payout_date=None)
for r in records
]
projections = scheduled_transactions(records, since=date(2020, 3, 2))
assert len(projections) == 12
transactions = records
transactions.extend(projections)
assert transactions[9].entry_date == date(2020, 2, 14)
assert transactions[10].entry_date == date(2020, 3, 15)
def test_seemingly_missing_projection():
# this test simulates reporting with --by-ex-date where a projected record
# is projected "in the past", beyond the grace period, and is therefore discarded
# but the payout/entry date might lie in the future still so it seems incorrect to be missing
# the logic is correct, however, so it is intentional and not an error
#
# it could be included by considering other dates; i.e. not only entry_date
# however, that requires a mechanism to determine the "primary" date
# as currently we replace entry_date and discard the field used (with a function to determine
# primary date, we would not alter the record at all- except for setting some flag- but this
# is a large task that goes deep almost everywhere)
# additionally, it might introduce unwanted projections in situations where
# the dividend distribution was actually eliminated (the projection would just linger longer)
records = [
Transaction(
date(2019, 3, 15), "A", 1, amount=Amount(1), ex_date=date(2019, 2, 20)
),
Transaction(
date(2019, 6, 14), "A", 1, amount=Amount(1), ex_date=date(2019, 5, 15)
),
Transaction(
date(2019, 9, 13), "A", 1, amount=Amount(1), ex_date=date(2019, 8, 14)
),
Transaction(
date(2019, 12, 13), "A", 1, amount=Amount(1), ex_date=date(2019, 11, 20)
),
Transaction(
date(2020, 3, 13), "A", 1, amount=Amount(1), ex_date=date(2020, 2, 19)
),
Transaction(
date(2020, 6, 12), "A", 1, amount=Amount(1), ex_date=date(2020, 5, 20)
),
]
projections = scheduled_transactions(records, since=date(2020, 9, 5))
assert len(projections) == 4
assert projections[0].entry_date == date(2020, 9, 15)
# simulate --by-ex-date
from dataclasses import replace
records = [
r if r.ex_date is None else replace(r, entry_date=r.ex_date, ex_date=None)
for r in records
]
projections = scheduled_transactions(records, since=date(2020, 9, 5))
assert len(projections) == 3
# note the "missing" projection at 2020/08/15, because this is 20 days ago;
# i.e. more than the grace period of 15 days
assert projections[0].entry_date == date(2020, 11, 30)
def test_secondary_date_quarterly():
records = [
Transaction(date(2019, 4, 30), "ABC", 1, amount=Amount(1), dividend=Amount(1)),
Transaction(date(2019, 7, 31), "ABC", 1, amount=Amount(1), dividend=Amount(1)),
Transaction(
date(2019, 10, 31),
"ABC",
1,
amount=Amount(1),
dividend=Amount(1),
payout_date=date(2020, 1, 13),
),
]
# simulate --by-payout-date
from dataclasses import replace
records = [
r
if r.payout_date is None
else replace(r, entry_date=r.payout_date, payout_date=None)
for r in records
]
projections = scheduled_transactions(records, since=date(2020, 1, 18))
assert len(projections) == 4
transactions = records
transactions.extend(projections)
assert transactions[2].entry_date == date(2020, 1, 13)
assert transactions[3].entry_date == date(2020, 4, 30)
def test_12month_projection():
records = [
Transaction(date(2019, 4, 4), "TOP", 1, amount=Amount(1), dividend=Amount(1)),
Transaction(
date(2020, 4, 3),
"TOP",
2,
amount=Amount(2),
dividend=Amount(1),
payout_date=date(2020, 4, 7),
),
]
# here we expect 2020/4/3 => 2021/4/15, but since that is more than 365 days away
# this test reveals whether projections count by days or months;
# e.g. we expect a forecast to include any projections within remainder of current month, all
# up until next month, a year ahead: e.g. 2020/4/8 (inclusive) to 2021/5/1 (exclusive)
projections = scheduled_transactions(records, since=date(2020, 4, 8))
assert len(projections) == 1
assert projections[0].entry_date == date(2021, 4, 15)
def test_estimated_position_by_ex_dividend():
# test whether projected positions are correctly based on ex-dates (if applicable),
# even if not tracking entry date by ex-date
records = [
Transaction(
date(2019, 9, 17),
"ABCD",
1,
amount=Amount(1),
dividend=Amount(1),
payout_date=date(2019, 9, 16),
ex_date=date(2019, 8, 19),
),
Transaction(
date(2019, 10, 16),
"ABCD",
1,
amount=Amount(1),
dividend=Amount(1),
payout_date=date(2019, 10, 15),
ex_date=date(2019, 9, 18),
),
Transaction(
date(2019, 11, 18),
"ABCD",
1,
amount=Amount(1),
dividend=Amount(1),
payout_date=date(2019, 11, 15),
ex_date=date(2019, 10, 17),
),
Transaction(
date(2019, 12, 12),
"ABCD",
1,
amount=Amount(1),
dividend=Amount(1),
payout_date=date(2019, 12, 11),
ex_date=date(2019, 11, 19),
),
Transaction(
date(2020, 1, 16),
"ABCD",
1,
amount=Amount(1),
dividend=Amount(1),
payout_date=date(2020, 1, 15),
ex_date=date(2019, 12, 27),
),
Transaction(
date(2020, 2, 17),
"ABCD",
1,
amount=Amount(1),
dividend=Amount(1),
payout_date=date(2020, 2, 14),
ex_date=date(2020, 1, 20),
),
Transaction(
date(2020, 3, 16),
"ABCD",
1,
amount=Amount(1),
dividend=Amount(1),
payout_date=date(2020, 3, 13),
ex_date=date(2020, 2, 19),
),
Transaction(
date(2020, 4, 16),
"ABCD",
1,
amount=Amount(1),
dividend=Amount(1),
payout_date=date(2020, 4, 15),
ex_date=date(2020, 3, 17),
),
Transaction(
date(2020, 5, 18),
"ABCD",
1,
amount=Amount(1),
dividend=Amount(1),
payout_date=date(2020, 5, 15),
ex_date=date(2020, 4, 17),
),
Transaction(
date(2020, 6, 16),
"ABCD",
1,
amount=Amount(1),
dividend=Amount(1),
payout_date=date(2020, 6, 15),
ex_date=date(2020, 5, 19),
),
Transaction(
date(2020, 7, 16),
"ABCD",
1,
amount=Amount(1),
dividend=Amount(1),
payout_date=date(2020, 7, 15),
ex_date=date(2020, 6, 17),
),
Transaction(date(2020, 8, 3), "ABCD", 2),
]
projections = scheduled_transactions(records, since=date(2020, 8, 4))
assert len(projections) == 12
assert projections[0].entry_date == date(2020, 8, 31)
assert projections[0].position == 1
assert projections[1].entry_date == date(2020, 9, 30)
assert projections[1].position == 2
def test_future_position_by_ex_dividend():
records = [
# this is a dividend transaction with all dates plotted in; note that only
# entry date is actually projected, which puts it "in front" of the purchase record below;
# this effectively means that, unless ex-date is properly accounted for, the future position
# would be based on the latest record before the projected date; i.e. the purchase record
# what we actually want, though, is to additionally project the ex-date, and *then*
# find the latest record before *that* date; which, in this case, would be this dividend
# transaction and result in a position=1, as expected
Transaction(
date(2019, 8, 17),
"ABCD",
1,
amount=Amount(1),
dividend=Amount(1),
payout_date=date(2019, 8, 16),
ex_date=date(2019, 7, 19),
),
# this is a purchase record; note dated prior to a projected entry date of the record above
Transaction(date(2020, 8, 3), "ABCD", 2),
]
projections = scheduled_transactions(records, since=date(2020, 8, 4))
assert len(projections) == 1
assert projections[0].entry_date == date(2020, 8, 31)
assert projections[0].position == 1
def test_ambiguous_position():
records = [
Transaction(date(2019, 2, 14), "AAPL", 100, amount=Amount(73)),
Transaction(date(2019, 2, 14), "AAPL", 50, amount=Amount(36.5)),
]
try:
scheduled_transactions(records, since=date(2019, 2, 18))
except ValueError:
assert True
else:
assert False
records = [
Transaction(date(2019, 2, 14), "AAPL", 100, amount=Amount(73)),
Transaction(
date(2019, 2, 14),
"AAPL",
100,
amount=Amount(36.5),
kind=Distribution.SPECIAL,
),
]
projections = scheduled_transactions(records, since=date(2019, 2, 18))
assert len(projections) == 1
records = [
Transaction(date(2019, 2, 14), "AAPL", 100, amount=Amount(73)),
# ambiguous position
Transaction(
date(2019, 2, 14),
"AAPL",
50,
amount=Amount(36.5),
kind=Distribution.SPECIAL,
),
]
try:
scheduled_transactions(records, since=date(2019, 2, 18))
except ValueError:
assert True
else:
assert False
``` |
{
"source": "jhauberg/gitdoctor",
"score": 3
} |
#### File: gitdoctor/doctor/repo.py
```python
import os
import re
import subprocess
def can_be_examined() -> bool:
""" Return True if git is installed. """
# assume that if a simple git invocation fails, then git is probably not installed
# this is a portable way to determine existence of a binary on PATH, versus using
# platform-specific tools like `which` on macOS or (sometimes) `where` on Windows
try:
result = subprocess.run([
'git', '--version'],
stdout=subprocess.DEVNULL, # ignore stdout
stderr=subprocess.DEVNULL) # ignore stderr
return result.returncode == 0
except OSError:
return False
def exists() -> bool:
""" Return True if current working directory is inside the work tree of a repository. """
result = subprocess.run([
'git', 'rev-parse', '--is-inside-work-tree'],
stdout=subprocess.PIPE, # capture stdout
stderr=subprocess.DEVNULL) # ignore stderr
if result.returncode != 0:
# will exit with non-zero code if not in a git repository at all
return False
status = result.stdout.decode('utf-8')
# certain checks require being inside the work tree; e.g. not inside .git/
# (for example, finding unwanted files through `git ls-files -i`)
return 'true' in status.lower()
def has_remote() -> (bool, str):
""" Return True if current repository has one or more remotes, False otherwise. """
result = subprocess.run([
'git', 'remote'],
check=True, # print stacktrace on non-zero exit status
stdout=subprocess.PIPE, # capture stdout
stderr=subprocess.DEVNULL) # ignore stderr
remotes = result.stdout.decode('utf-8').splitlines()
has_remotes = len(remotes) > 0
# bias toward first listed remote; this could be wrong
return has_remotes, remotes[0] if has_remotes else None
def default_branch(remote: str) -> str:
""" Return the name of the default branch on a remote. """
result = subprocess.run([
'git', 'remote', 'show', remote],
check=True, # print stacktrace on non-zero exit status
stdout=subprocess.PIPE, # capture stdout
stderr=subprocess.DEVNULL) # ignore stderr
output = result.stdout.decode('utf-8')
match = re.search(r'HEAD branch:(.*)', output)
assert match is not None
name = match.group(1)
return name.strip()
def absolute_path() -> str:
""" Return the absolute path to the root of current repository. """
result = subprocess.run([
'git', 'rev-parse', '--show-toplevel'],
check=True, # print stacktrace on non-zero exit status
stdout=subprocess.PIPE, # capture stdout
stderr=subprocess.DEVNULL) # ignore stderr
path = result.stdout.decode('utf-8')
return path.strip()
def size_in_bytes(exclude_work_tree: bool=False) -> int:
""" Return the size (in bytes) of current repository.
If exclude_work_tree is True, only count size of the .git directory.
"""
path = absolute_path()
if exclude_work_tree:
path = os.path.join(path, '.git')
files = (os.path.join(dirpath, filename) for dirpath, dirnames, filenames in os.walk(path) for filename in filenames)
filesizes = [os.path.getsize(filepath) for filepath in files if not os.path.islink(filepath)]
size = sum(filesizes)
return size
```
#### File: gitdoctor/doctor/scrub.py
```python
from doctor import command
import doctor.repo as repo
GIT_EXPIRE = 'git reflog expire --expire-unreachable=now --all --stale-fix'
GIT_GC = 'git gc --prune=now'
GIT_GC_AGGRESSIVE = GIT_GC + ' --aggressive'
def trim(aggressively: bool=False, verbose: bool=False) -> int:
""" Trim current repository and return the difference (in bytes) from before and after.
The difference is negative if the repository became smaller, positive if it became larger.
"""
# only check size of the .git directory
only_count_git_dir = True
size_before = repo.size_in_bytes(exclude_work_tree=only_count_git_dir)
# expire all reflog entries to unreachable objects immediately, enabling pruning through gc
command.execute(GIT_EXPIRE, show_argv=verbose, show_output=verbose)
# run garbage collection; automatically triggers prune, repack and more
command.execute(
(GIT_GC_AGGRESSIVE if aggressively else
GIT_GC),
show_argv=verbose,
show_output=verbose)
size_after = repo.size_in_bytes(exclude_work_tree=only_count_git_dir)
size_difference = size_before - size_after
return -size_difference
```
#### File: jhauberg/gitdoctor/setup.py
```python
import sys
import re
from setuptools import setup, find_packages
from doctor import VERSION_PATTERN, exit_if_not_compatible
exit_if_not_compatible()
def determine_version_or_exit() -> str:
""" Determine version identifier or exit with non-zero status. """
with open('doctor/__version__.py') as file:
version_contents = file.read()
version_match = re.search(VERSION_PATTERN, version_contents, re.M)
if version_match:
version = version_match.group(1)
return version
sys.exit('Version not found')
VERSION = determine_version_or_exit()
setup(
name='git-doctor',
version=VERSION,
description='Keep a healthy repository',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url='https://github.com/jhauberg/gitdoctor',
download_url='https://github.com/jhauberg/gitdoctor/archive/master.zip',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
include_package_data=True,
platforms='any',
install_requires=[
'docopt==0.6.2'
],
entry_points={
'console_scripts': [
'git-doctor=doctor.__main__:main',
],
}
)
``` |
{
"source": "jhauschild/lecture_comp_methods",
"score": 3
} |
#### File: lecture_comp_methods/1_monte_carlo/metropolis.py
```python
import numpy as np
import matplotlib.pyplot as plt
import time
from numba import jit
@jit(nopython=True)
def energy(system, i, j, L):
"""Energy function of spins connected to site (i, j)."""
return -1. * system[i, j] * (system[np.mod(i - 1, L), j] + system[np.mod(i + 1, L), j] +
system[i, np.mod(j - 1, L)] + system[i, np.mod(j + 1, L)])
@jit
def prepare_system(L):
"""Initialize the system."""
system = 2 * (0.5 - np.random.randint(0, 2, size=[L, L]))
return system
@jit(nopython=True)
def measure_energy(system):
L = system.shape[0]
E = 0
for i in range(L):
for j in range(L):
E += energy(system, i, j, L) / 2.
return E
@jit(nopython=True)
def metropolis_loop(system, T, N_sweeps, N_eq, N_flips):
""" Main loop doing the Metropolis algorithm."""
E = measure_energy(system)
L = system.shape[0]
E_list = []
for step in range(N_sweeps + N_eq):
i = np.random.randint(0, L)
j = np.random.randint(0, L)
dE = -2. * energy(system, i, j, L)
if dE <= 0.:
system[i, j] *= -1
E += dE
elif np.exp(-1. / T * dE) > np.random.rand():
system[i, j] *= -1
E += dE
if step >= N_eq and np.mod(step, N_flips) == 0:
# measurement
E_list.append(E)
return np.array(E_list)
if __name__ == "__main__":
""" Scan through some temperatures """
# Set parameters here
L = 4 # Linear system size
N_sweeps = 5000 # Number of steps for the measurements
N_eq = 1000 # Number of equilibration steps before the measurements start
N_flips = 10 # Number of steps between measurements
N_bins = 10 # Number of bins use for the error analysis
T_range = np.arange(1.5, 3.1, 0.1)
C_list = []
system = prepare_system(L)
for T in T_range:
C_list_bin = []
for k in range(N_bins):
Es = metropolis_loop(system, T, N_sweeps, N_eq, N_flips)
mean_E = np.mean(Es)
mean_E2 = np.mean(Es**2)
C_list_bin.append(1. / T**2. / L**2. * (mean_E2 - mean_E**2))
C_list.append([np.mean(C_list_bin), np.std(C_list_bin) / np.sqrt(N_bins)])
print(T, mean_E, C_list[-1])
# Plot the results
C_list = np.array(C_list)
plt.errorbar(T_range, C_list[:, 0], C_list[:, 1])
Tc = 2. / np.log(1. + np.sqrt(2))
print(Tc)
plt.axvline(Tc, color='r', linestyle='--')
plt.xlabel('$T$')
plt.ylabel('$c$')
plt.show()
```
#### File: lecture_comp_methods/5_quantum_monte_carlo/sse.py
```python
import numpy as np
from numba import jit
def site(x, y, Lx, Ly):
"""Defines a numbering of the sites, given positions x and y."""
return y * Lx + x
def init_SSE_square(Lx, Ly):
"""Initialize a starting configuration on a 2D square lattice."""
n_sites = Lx*Ly
# initialize spins randomly with numbers +1 or -1, but the average magnetization is 0
spins = 2*np.mod(np.random.permutation(n_sites), 2) - 1
op_string = -1 * np.ones(10, np.intp) # initialize with identities
bonds = []
for x0 in range(Lx):
for y0 in range(Ly):
s0 = site(x0, y0, Lx, Ly)
s1 = site(np.mod(x0+1, Lx), y0, Lx, Ly) # bond to the right
bonds.append([s0, s1])
s2 = site(x0, np.mod(y0+1, Ly), Lx, Ly) # bond to the top
bonds.append([s0, s2])
bonds = np.array(bonds, dtype=np.intp)
return spins, op_string, bonds
@jit(nopython=True)
def diagonal_update(spins, op_string, bonds, beta):
"""Perform the diagonal update: insert or remove diagonal operators into/from the op_string."""
n_bonds = bonds.shape[0]
M = op_string.shape[0]
# count the number of non-identity operators
n = np.sum(op_string != -1)
# calculate ratio of acceptance probabilities for insert/remove n <-> n+1
# <alpha|Hdiag|alpha> = 1/4 + <alpha |SzSz|alpha> = 0.5 for antiparallel spins
prob_ratio = 0.5*beta*n_bonds # /(M-n) , but the latter depends on n which still changes
for p in range(M): # go through the operator string
op = op_string[p]
if op == -1: # identity: propose to insert a new operator
b = np.random.randint(0, n_bonds) # select a bond
if spins[bonds[b, 0]] != spins[bonds[b, 1]]:
# can only insert if the two spins are anti-parallel!
prob = prob_ratio / (M - n)
if np.random.rand() < prob: # (metropolis-like)
# insert a diagonal operator
op_string[p] = 2*b
n += 1
elif np.mod(op, 2) == 0: # diagonal operator: propose to remove
prob = 1/prob_ratio * (M-n+1) # n-1 = number operators after removal = n in above formula
if np.random.rand() < prob:
# remove diagonal operator
op_string[p] = -1
n -= 1
else: # offdiagonal operator: update spin configuration to get propagated |alpha(p)>
b = op // 2
# H^off ~= (S+S- + S-S+) = spin flip on both sites for antiparallel spins.
# (We never have configurations with operators acting on parallel spins!)
spins[bonds[b, 0]] = -spins[bonds[b, 0]]
spins[bonds[b, 1]] = -spins[bonds[b, 1]]
return n
@jit(nopython=True)
def loop_update(spins, op_string, bonds):
"""Perform the offdiagonal update: construct loops and flip each of them with prob. 0.5."""
# create the loops
vertex_list, first_vertex_at_site = create_linked_vertex_list(spins, op_string, bonds)
# and flip them
flip_loops(spins, op_string, vertex_list, first_vertex_at_site)
@jit(nopython=True)
def create_linked_vertex_list(spins, op_string, bonds):
"""Given a configuration, construct a linked list between vertices defining the loops.
Given a configuration of spins and operators, we need to construct the loops.
An efficient way to do this is to create a double-linked `vertex_list` which contains
the connections between the vertices of the operators. Each operator has 4 vertices (=legs in
the tensor network language), so if we simply enumerate all the vertices in the operator
string, we get v0 = 4*p, v1=4*p+1, v2=4*p+2, v4=4*p+3 for the vertices
v0 v1
|--|
|Op| <-- op_string[p]
|--|
v2 v3
In this function, we set the entries of the `vertex_list` for any
(vertically) connected pair `v, w` (i.e. vertical parts of the loops) we have
``v = vertex_list[w]`` and ``w = vertex_list[v]``.
Later on, an entry -1 indicates that the loop along this connection was flipped;
an entry -2 indices that the loop was visited and proposed to flip, but the flip was rejected.
Identity operators are completely ignored for the connections, its vertices are directly
marked with a -2.
The returned array `first_vertex_at_site` contains the first vertex encountered at each site,
entries -1 indicate that there is no (non-identity) operator acting on that site.
"""
n_sites = spins.shape[0]
M = op_string.shape[0]
vertex_list = np.zeros(4*M, np.intp)
# (initial value of vertex_list doesn't matter: get's completely overwritten)
first_vertex_at_site = -1 * np.ones(n_sites, np.intp) # -1 = no vertex found (yet)
last_vertex_at_site = -1 * np.ones(n_sites, np.intp) # -1 = no vertex found (yet)
# iterate over all operators
for p in range(M):
v0 = p*4 # left incoming vertex
v1 = v0 + 1 # right incoming vertex
op = op_string[p]
if op == -1: # identity operator
# ignore it for constructing/flipping loops: mark as visited
vertex_list[v0:v0+4] = -2
else:
b = op//2
s0 = bonds[b, 0]
s1 = bonds[b, 1]
v2 = last_vertex_at_site[s0]
v3 = last_vertex_at_site[s1]
if v2 == -1: # no operator encountered at this site before
first_vertex_at_site[s0] = v0
else: # encountered an operator at this vertex before -> create link
vertex_list[v2] = v0
vertex_list[v0] = v2
if v3 == -1: # and similar for other site
first_vertex_at_site[s1] = v1
else:
vertex_list[v3] = v1
vertex_list[v1] = v3
last_vertex_at_site[s0] = v0 + 2 # left outgoing vertex of op
last_vertex_at_site[s1] = v0 + 3 # right outgoing vertex of op
# now we need to connect vertices between top and bottom
for s0 in range(n_sites):
v0 = first_vertex_at_site[s0]
if v0 != -1: # there is an operator acting on that site -> create link
v1 = last_vertex_at_site[s0]
vertex_list[v1] = v0
vertex_list[v0] = v1
return vertex_list, first_vertex_at_site
@jit(nopython=True)
def flip_loops(spins, op_string, vertex_list, first_vertex_at_site):
"""Given the vertex_list, flip each loop with prob. 0.5.
Once we have the vertex list, we can go through all the vertices and flip each loop with
probability 0.5. When we propose to flip a loop, we go through it and mark it as flipped (-1)
or visited (-2) in the vertex list to avoid a secend proposal to flip it.
Note that for an integer number `i`, the operation ``i ^ 1`` gives i+1 or i-1 depending on
whether `i` is even or odd: it flips 0<->1, 2<->3, 4<->5, ...
This is used to switch between diagonal/offdiagonal operators in the operator string when
flipping a loop, and to propagate the open end of the loop vertically between vertices
v0<->v1, v2<->v3 of the operators.
"""
n_sites = spins.shape[0]
M = op_string.shape[0]
# iterate over all possible beginnings of loops
# (step 2: v0+1 belongs to the same loop as v0)
for v0 in range(0, 4*M, 2):
if vertex_list[v0] < 0: # marked: we've visited the loop starting here before.
continue
v1 = v0 # we move v1 as open end of the loop around until we come back to v0
if np.random.rand() < 0.5:
# go through the loop and flip it
while True:
op = v1 // 4
op_string[op] = op_string[op] ^ 1 # flip diagonal/offdiagonal
vertex_list[v1] = -1
v2 = v1 ^ 1
v1 = vertex_list[v2]
vertex_list[v2] = -1
if v1 == v0:
break
else:
# don't flip the loop, but go through it to mark it as visited
while True:
vertex_list[v1] = -2
v2 = v1 ^ 1
v1 = vertex_list[v2]
vertex_list[v2] = -2
if v1 == v0:
break
for s0 in range(0, n_sites):
if first_vertex_at_site[s0] == -1: # no operator acting on that site -> flip with p=0.5
if np.random.rand() < 0.5:
spins[s0] = -spins[s0]
else: # there is an operator acting on that site
if vertex_list[first_vertex_at_site[s0]] == -1: # did we flip the loop?
spins[s0] = -spins[s0] # then we also need to flip the spin
# done
def thermalize(spins, op_string, bonds, beta, n_updates_warmup):
"""Perform a lot of upates to thermalize, without measurements."""
if beta == 0.:
raise ValueError("Simulation doesn't work for beta = 0")
for _ in range(n_updates_warmup):
n = diagonal_update(spins, op_string, bonds, beta)
loop_update(spins, op_string, bonds)
# check if we need to increase the length of op_string
M_old = len(op_string)
M_new = n + n // 3
if M_new > M_old:
op_string = np.resize(op_string, M_new)
op_string[M_old:] = -1
return op_string
def measure(spins, op_string, bonds, beta, n_updates_measure):
"""Perform a lot of updates with measurements."""
ns = []
for _ in range(n_updates_measure):
n = diagonal_update(spins, op_string, bonds, beta)
loop_update(spins, op_string, bonds)
ns.append(n)
return np.array(ns)
def run_simulation(Lx, Ly, betas=[1.], n_updates_measure=10000, n_bins=10):
"""A full simulation: initialize, thermalize and measure for various betas."""
spins, op_string, bonds = init_SSE_square(Lx, Ly)
n_sites = len(spins)
n_bonds = len(bonds)
Es_Eerrs = []
for beta in betas:
print("beta = {beta:.3f}".format(beta=beta), flush=True)
op_string = thermalize(spins, op_string, bonds, beta, n_updates_measure//10)
Es = []
for _ in range(n_bins):
ns = measure(spins, op_string, bonds, beta, n_updates_measure)
# energy per site
E = (-np.mean(ns)/beta + 0.25*n_bonds) / n_sites
Es.append(E)
E, Eerr = np.mean(Es), np.std(Es)/np.sqrt(n_bins)
Es_Eerrs.append((E, Eerr))
return np.array(Es_Eerrs)
if __name__ == "__main__":
# parameters
beta = 1.
Es_Eerrs = run_simulation(4, 4, [1.])
print("Energy per site ={E:.8f} at T={T:.3f}".format(E=Es_Eerrs[0,0], T=1./beta))
``` |
{
"source": "jhaus/pinax",
"score": 2
} |
#### File: apps/account/auth_backends.py
```python
from django.conf import settings
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
class AuthenticationBackend(ModelBackend):
def authenticate(self, **credentials):
lookup_params = {}
if settings.ACCOUNT_EMAIL_AUTHENTICATION:
field, identity = "email__iexact", credentials.get("email")
else:
field, identity = "username__iexact", credentials.get("username")
if identity is None:
return None
lookup_params[field] = identity
try:
user = User.objects.get(**lookup_params)
except User.DoesNotExist:
return None
else:
if user.check_password(credentials["password"]):
return user
EmailModelBackend = AuthenticationBackend
``` |
{
"source": "jhautefeuille/garden.supershape",
"score": 3
} |
#### File: jhautefeuille/garden.supershape/__init__.py
```python
__title__ = 'garden.supershape'
__version__ = '0.1.5'
__author__ = '<EMAIL>'
__all__ = ('Shape',)
# Main Kivy import
import kivy
kivy.require('1.8.0')
from kivy.app import App
from kivy.utils import get_color_from_hex
from kivy.uix.relativelayout import RelativeLayout
from kivy.properties import ObjectProperty
from kivy.properties import StringProperty
from kivy.properties import ListProperty
from kivy.properties import NumericProperty
from kivy.properties import BooleanProperty
from kivy.properties import BoundedNumericProperty
from kivy.graphics import Color
from kivy.graphics import Rectangle
from kivy.graphics import Bezier
from kivy.graphics import Line, Point
from kivy.graphics import Scale, Translate, Rotate, PopMatrix, PushMatrix
# Computing import
import math
def superformula(a, b, m, n1, n2, n3, phi):
'''
Computes the position of the point on a
superformula curve.
Superformula has first been proposed by <NAME>
and is a generalization of superellipse.
see: http://en.wikipedia.org/wiki/Superformula
'''
t1 = math.cos(m * phi / 4.0) / a
t1 = abs(t1)
t1 = math.pow(t1, n2)
t2 = math.sin(m * phi / 4.0) / b
t2 = abs(t2)
t2 = math.pow(t2, n3)
t3 = -1 / float(n1)
r = math.pow(t1 + t2, t3)
if abs(r) == 0:
return (0, 0)
else:
return (r * math.cos(phi), r * math.sin(phi))
def supershape(
width,
height,
m,
n1,
n2,
n3,
point_count=100,
percent=1.0,
a=1.0,
b=1.0,
travel=None):
'''
Supershape, generated using the superformula first proposed
by <NAME>.
- `points_count` is the total number of points to compute.
- `travel` is the length of the outline drawn in radians.
3.1416 * 2 is a complete cycle.
'''
travel = travel or (math.pi * 2)
# Compute points
phis = [i*travel/point_count for i in range(int(point_count * percent))]
points = [superformula(a, b, m, n1, n2, n3, x) for x in phis]
# Scale and transpose
path = []
for x, y in points:
x *= width
y *= height
path.append((x, y))
return path
class Shape(RelativeLayout):
'''
Shape class
'''
shape_size = BoundedNumericProperty(256, min=1, max=512, errorvalue=512)
color = StringProperty('3619ffff')
bg_color = StringProperty('19526699')
a = BoundedNumericProperty(1, min=0.1, max=1, errorvalue=1)
b = BoundedNumericProperty(1, min=0.1, max=1, errorvalue=1)
m = BoundedNumericProperty(7, min=-100, max=100, errorvalue=16)
n1 = BoundedNumericProperty(2, min=1, max=50, errorvalue=4)
n2 = BoundedNumericProperty(8, min=1, max=50, errorvalue=4)
n3 = BoundedNumericProperty(4, min=1, max=50, errorvalue=10)
nbp = BoundedNumericProperty(100, min=2, max=1000, errorvalue=100)
percent = BoundedNumericProperty(1, min=1, max=10, errorvalue=1)
travel = BoundedNumericProperty(2, min=2, max=100, errorvalue=2)
line = BooleanProperty(False)
wdth = BoundedNumericProperty(1, min=1, max=10, errorvalue=1)
path = ListProperty()
def __init__(self, **kwargs):
super(Shape, self).__init__(**kwargs)
self.bind(
pos=self.update,
size=self.update,
shape_size=self.update,
color=self.update,
bg_color=self.update,
a=self.update,
b=self.update,
m=self.update,
n1=self.update,
n2=self.update,
n3=self.update,
nbp=self.update,
percent=self.update,
travel=self.update,
line=self.update,
wdth=self.update)
def update(self, *args):
with self.canvas:
# Refresh canvas
self.canvas.clear()
# Background configuration
Color(
get_color_from_hex(self.bg_color)[0],
get_color_from_hex(self.bg_color)[1],
get_color_from_hex(self.bg_color)[2], 100)
Rectangle(pos=self.pos, size=self.size)
# Path configuration
Translate(self.width / 2, self.height / 2)
Color(
get_color_from_hex(self.color)[0],
get_color_from_hex(self.color)[1],
get_color_from_hex(self.color)[2], 100)
s = supershape(
width=self.shape_size / 2.0,
height=self.shape_size / 2.0,
m=self.m,
n1=self.n1,
n2=self.n2,
n3=self.n3,
point_count=self.nbp,
percent=self.percent,
a=self.a,
b=self.b,
travel=math.pi * self.travel)
# clear path list
self.path[:] = []
for elem in s:
self.path.append(elem[0])
self.path.append(elem[1])
if self.line:
Line(
points=(self.path),
width=self.wdth,
cap="round",
joint="round",
close=True)
else:
Point(
points=(self.path),
pointsize=self.wdth,
cap="round",
joint="round",
close=True)
class ShapeTest(App):
"""
Example application
"""
def build(self):
shape = Shape(n1=7, color='3619ffff', bg_color='19526699', size_hint=(0.8, 1))
return shape
if __name__ in ('__main__'):
ShapeTest().run()
``` |
{
"source": "jhaux/alphapose_pipeline",
"score": 2
} |
#### File: jhaux/alphapose_pipeline/estimate_and_track.py
```python
from tqdm import tqdm, trange
import argparse
import multiprocessing as mp
import numpy as np
import os
import pandas as pd
import re
from PIL import Image
import sys
import traceback
import json
from edflow.data.util import get_support, adjust_support
from multiperson.multiperson_dataset import MultiPersonDataset
from status import status_of_video
PARTS = [
'',
'frames',
'poses',
'tracks',
'labels',
'crops',
'masks',
'done'
]
DONE = 'done'
def test_ending(string, tests=[], mode='or'):
if mode == 'or':
for test in tests:
if string[-len(test):] == test:
return True
return False
elif mode == 'and':
for test in tests:
if string[-len(test):] != test:
return False
return True
else:
raise ValueError('Unrecognized mode. Must be one of `or`, '
+ '`and` but is {}'.format(mode))
def listfiles(folder):
ret_list = []
for root, folders, files in os.walk(folder):
new_folders = []
for f in folders:
if not test_ending(f, ['_frames', '_masks', '_crops', '_track']):
new_folders += [f]
folders[:] = new_folders
for filename in folders + files:
ret_list += [os.path.join(root, filename)]
return ret_list
def e_and_t(vgplf):
videos, gpu, parts, logfile, force = vgplf
for vid in videos:
print(vid)
vid_ = vid.replace(' ', '\ ')
try:
with open(logfile, 'a+') as f:
f.write('Start: {}\n'.format(vid))
if 1 in parts:
if not os.path.exists(vid+'_frames') or 1 in force:
os.system('bash make_frames.sh {} {}'.format(vid_, gpu))
with open(logfile, 'a+') as f:
f.write('frames: {}\n'.format(vid))
else:
with open(logfile, 'a+') as f:
f.write('frames - skipped: {}\n'.format(vid))
if 2 in parts:
pose_file = vid+'_track/alphapose-results.json'
if not os.path.exists(pose_file) or 2 in force:
os.system('bash estimate_olympic {} {}'.format(vid_, gpu))
with open(logfile, 'a+') as f:
f.write('estimate: {}\n'.format(vid))
else:
with open(logfile, 'a+') as f:
f.write('estimate - skipped: {}\n'.format(vid))
if 3 in parts:
track_file = vid+'_track/alphapose-forvis-tracked.json'
if not os.path.exists(track_file) or 3 in force:
os.system('bash track_olympic.sh {} {}'.format(vid_, gpu))
with open(logfile, 'a+') as f:
f.write('track: {}\n'.format(vid))
else:
with open(logfile, 'a+') as f:
f.write('track - skipped: {}\n'.format(vid))
if 4 in parts:
lpath = os.path.join(vid + '_track', 'per_person_labels.npz')
if not os.path.exists(lpath) or 4 in force:
make_csv_and_npz(vid)
with open(logfile, 'a+') as f:
f.write('csv+npz: {}\n'.format(vid))
else:
with open(logfile, 'a+') as f:
f.write('csv+npz - skipped: {}\n'.format(vid))
if 5 in parts:
success_file = os.path.join(vid+'_crops', '.Success')
if not os.path.exists(success_file) or 5 in force:
make_crops(vid)
with open(logfile, 'a+') as f:
f.write('crops: {}\n'.format(vid))
else:
with open(logfile, 'a+') as f:
f.write('crops - skipped: {}\n'.format(vid))
if 6 in parts:
success_file = os.path.join(vid+'_masks', '.Success')
if not os.path.exists(success_file) or 5 in force:
os.system('bash make_masks.sh {} {}'.format(vid_, gpu))
with open(logfile, 'a+') as f:
f.write('Masks: {}\n'.format(vid))
else:
with open(logfile, 'a+') as f:
f.write('Masks - skipped: {}\n'.format(vid))
if 7 in parts:
make_flows(vid)
with open(logfile, 'a+') as f:
f.write('Flow: {}\n'.format(vid))
with open(logfile, 'a+') as f:
f.write('Done: {}\n'.format(vid))
except Exception as e:
with open(logfile, 'a+') as f:
f.write('Error: {} -*- {}\n'.format(vid, str(e).replace(': ', '; ')))
if not isinstance(e, FileNotFoundError):
traceback.print_exc()
continue
def extract_lines(tracking_data):
''' Converts dict of list of persons to list of persons with frame
annotation.
Args:
tracking_data (dict): ``frame: [{idx: 1, ...}, {...}, ...]``
'''
linear = []
for i, k in enumerate(sorted(tracking_data.keys())):
for data in tracking_data[k]:
example = {'orig': k, 'fid': i}
example.update(data)
linear += [example]
sorted_linear = sorted(linear, key=lambda e: [e['idx'], e['fid']])
last_id_change = 0
last_id = None
last_fid = -1
for example in sorted_linear:
ex_id = example['idx']
if last_id != ex_id or last_fid != example['fid'] - 1:
last_id_change = example['fid']
seq_idx = example['fid'] - last_id_change
example['sid'] = seq_idx
last_id = ex_id
last_fid = example['fid']
return sorted_linear
def prepare_keypoints(kps_raw):
'''Converts kps of form ``[x, y, c, x, y, c, ...]`` to
``[[x, y, c], [x, y, c], ...]``'''
x = kps_raw[::3]
y = kps_raw[1::3]
c = kps_raw[2::3]
return np.stack([x, y, c], axis=-1)
def square_bbox(prepared_kps, pad=0.35, kind='percent'):
if not kind in ['percent', 'abs']:
raise ValueError('`kind` must be one of [`percent`, `abs`], but is {}'
.format(kind))
x = prepared_kps[:, 0]
y = prepared_kps[:, 1]
minx, maxx = x.min(), x.max()
miny, maxy = y.min(), y.max()
wx = maxx - minx
wy = maxy - miny
w = max(wx, wy)
centerx = minx + wx / 2.
centery = miny + wy / 2.
if pad is not None and pad != 0:
if kind == 'percent':
w = (1 + pad) * w
else:
w += pad
bbox = np.array([centerx - w/2., centery - w/2., w, w])
return bbox
def get_kps_rel(kps_abs, bbox):
kps_rel = np.copy(kps_abs)
kps_rel[:, :2] = kps_rel[:, :2] - bbox[:2]
kps_rel[:, :2] = kps_rel[:, :2] / bbox[2:]
return kps_rel
def add_lines_to_csv(data_frame, track_dir, frame_dir, root, kp_in_csv=True):
json_name = os.path.join(root,
track_dir,
'alphapose-forvis-tracked.json')
with open(json_name, 'r') as json_file:
tracking_data = json.load(json_file)
all_kps_abs = []
all_kps_rel = []
all_boxes = []
raw_lines = extract_lines(tracking_data)
for j, line in enumerate(tqdm(raw_lines, 'L')):
kps_abs = prepare_keypoints(line['keypoints'])
bbox = square_bbox(kps_abs)
kps_rel = get_kps_rel(kps_abs, bbox)
frame_root = os.path.join(root, frame_dir, line['orig'])
vid = os.path.join(root, frame_dir[:-7])
pid = line['idx']
fid = line['fid']
sid = line['sid']
if kp_in_csv:
data_frame = data_frame.append(
{
'frame_path': frame_root,
'video_path': vid,
'frame_idx': fid,
'sequence_idx': sid,
'person_id': pid,
'keypoints_abs': kps_abs,
'bbox': bbox,
'keypoints_rel': kps_rel
},
ignore_index=True # append with incremental index
)
else:
all_kps_abs += [kps_abs]
all_kps_rel += [kps_rel]
all_boxes += [bbox]
data_frame = data_frame.append(
{
'frame_path': frame_root,
'video_path': vid,
'frame_idx': fid,
'sequence_idx': sid,
'person_id': pid,
},
ignore_index=True # append with incremental index
)
if not kp_in_csv:
return data_frame, np.stack(all_kps_abs), np.stack(all_kps_rel), np.stack(all_boxes)
else:
return data_frame
def make_csv_and_npz(video):
'''Writes a csv containing all frame paths, with person id etc and a .npz
containing all keypoints of each person as well as the bounding boxes around
those keypoints with the keypoints relative to that bounding box.
'''
data_frame = pd.DataFrame(columns=
[
'frame_path',
'video_path',
'frame_idx',
'sequence_idx',
'person_id'
]
)
root = os.path.dirname(video)
data_frame, kps_abs, kps_rel, boxes = add_lines_to_csv(data_frame,
video+'_track',
video+'_frames',
root,
False)
csv_name = os.path.join(video + '_track', 'per_person_content.csv')
data_frame.to_csv(csv_name, sep=';', index=False)
lpath = os.path.join(video + '_track', 'per_person_labels.npz')
labels = {'keypoints_abs': kps_abs,
'keypoints_rel': kps_rel,
'bbox': boxes
}
np.savez(lpath, **labels)
def crop(image, box):
'''Arguments:
image (np.ndarray or PIL.Image): Image to crop.
box (list): Box specifying ``[x, y, width, height]``
points (np.ndarray): Optional set of points in image coordinate, which
are translated to box coordinates. Shape: ``[(*), 2]``.
Returns:
np.ndarray: Cropped image with shape ``[W, H, C]`` and same support
as :attr:`image`.
If points is not None:
np.ndarray: The translated point coordinates.
'''
is_image = True
if not isinstance(image, Image.Image):
in_support = get_support(image)
image = adjust_support(image, '0->255')
image = Image.fromarray(image)
is_image = False
box[2:] = box[:2] + box[2:]
image = image.crop(box)
if not is_image:
image = adjust_support(np.array(image), in_support)
return image
def make_crops(video):
crop_path = video + '_crops'
os.makedirs(crop_path, exist_ok=True)
data_root = video + '_track'
print(data_root)
MP = MultiPersonDataset(data_root)
for i in trange(len(MP), desc='Crop'):
example = MP[i]
im_path = example['frame_path']
box = example['bbox']
pid = example['person_id']
sid = example['sequence_idx']
fid = example['frame_idx']
crop_im = crop(Image.open(im_path), box)
savepath = '{:0>5}-p{:0>3}-s{:0>3}-f{:0>3}.png'.format(
i,
pid,
sid,
fid
)
savepath = os.path.join(crop_path, savepath)
if i == 0:
print(savepath)
crop_im.save(savepath, 'PNG')
with open(os.path.join(crop_path, '.Success'), 'w+') as sf:
sf.write('We did it!')
def make_flows(video):
'''Estimate the flow between sets of frames.'''
pass
if __name__ == '__main__':
from datetime import datetime
from multiperson.aggregated_dataset import find_videos
A = argparse.ArgumentParser()
A.add_argument('--root',
type=str,
default='/export/scratch/jhaux/Data/olympic_test/')
# A.add_argument('--nw',
# type=int,
# default=10)
A.add_argument('--p', type=int, nargs='+', default=list(range(10)),
help='Which parts to do')
A.add_argument('--f', type=int, nargs='*', default=[],
help='Which parts to force and not skip. '
'Only considers parts specified by `--p`.')
A.add_argument('--gpus',
type=int,
nargs='+',
default=list(range(5)))
A.add_argument('--per_gpu',
type=int,
default=2)
A.add_argument('--ext',
type=str,
nargs='+',
default='mp4')
A.add_argument('--vids',
type=str,
nargs='+',
default=None)
args = A.parse_args()
root = args.root
gpus = args.gpus
per_gpu = args.per_gpu
force = args.f
nw = len(gpus) * per_gpu
if args.vids is None:
all_videos = find_videos(root, args.ext)
else:
all_videos = args.vids
videos = []
for v in all_videos:
any_not_done = False
status = status_of_video(v)
for p in args.p:
key = PARTS[p]
if not status[key]:
videos += [v]
continue
vid_indices_per_gpu = np.array_split(videos, len(gpus))
now = datetime.now()
date_time = now.strftime("%d-%m-%Y_%H:%M:%S")
logfile = os.path.join(root, 'pipeline_log.{}.txt'.format(date_time))
old_logs = [o for o in os.listdir(root) if 'pipeline_log' in o]
for o in old_logs:
if o[0] != '.':
print(o)
src = os.path.join(root, o)
dst = os.path.join(root, '.'+o)
os.rename(src, dst)
with mp.Pool(nw) as pool:
args_ = []
for gpu, indices in zip(gpus, vid_indices_per_gpu):
sub_indices = np.array_split(indices, per_gpu)
for si in sub_indices:
args_ += [(si, gpu, args.p, logfile, force)]
print(args_)
tqdm(pool.map(e_and_t, args_), total=len(videos))
``` |
{
"source": "jhaux/bin",
"score": 2
} |
#### File: bin/python/evaporation.py
```python
__author__ = 'jhaux'
import jimlib as jim
import image_operations as imop
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import cv2
def rescale(image, low=0, top=100):
image[image > top] = top
image[image < low] = low
image -= image.min()
image *= 100/image.max()
return image
def main():
# Load the pictures for plotting purposes
path_to_files = u'/Users/jhaux/Desktop/Bachelorarbeit/Measurements/measurement_2014-12-17_15-50-16/measurement_2014-12-17_15-50-16/images/630_nm'
all_Files = jim.listdir_nohidden(path_to_files)
pic_1 = 1
pic_2 = len(all_Files) - 5
print all_Files[pic_1]
print all_Files[pic_2]
yshift = 1
xmin,xmax,ymin,ymax,cmin,cmax = 673,None,0,None,None,None
P_1 = jim.rotate_90(cv2.imread(all_Files[pic_1]),0)[:-yshift,xmin:,:]
P_2 = jim.rotate_90(cv2.imread(all_Files[pic_2]),0)[yshift:,xmin:,:]
D_21 = np.mean(imop.difference(P_1,P_2),axis=-1)
D_21 = rescale(D_21, 30, 70)
images = (P_1, P_2, D_21)
titles = (jim.get_timestamp(all_Files[pic_1]), jim.get_timestamp(all_Files[pic_2]), 'Difference')
print D_21
# plot
fig, axes = plt.subplots(nrows=1, ncols=3)
for ax, i in zip(axes.flat, np.arange(len(images))):
im = ax.imshow(images[i])
ax.set_title(titles[i])
ax.set_xticklabels([])
ax.set_yticklabels([])
cbar_ax, kw = mpl.colorbar.make_axes([ax for ax in axes.flat])
plt.colorbar(im, cax=cbar_ax, **kw)
plt.show()
return 0
if __name__ == '__main__':
main()
```
#### File: bin/python/get_results_v2.py
```python
__author__ = 'jhaux'
import cv2
import numpy as np
import os # getting the files
import jimlib as jim # my own set of functions
import sys # commandline arguments and output
import time # timer
def listdir_nohidden(path):
list = [n for n in os.listdir(path) if not n.startswith(".")]
return list
def image_Directories_and_Types(working_directory):
'''Get the image directories and Types in an easy way. Like this you can have several types
of images without the need of declaring everything by hand
returns e.g. ['cwd/base/images/630_nm', 'cwd/base/images/dark', 'cwd/base/images/no_filter'], ['630_nm', 'dark', 'no_filter']'''
basename = working_directory.split('/')[-1]
Types = listdir_nohidden(str(working_directory + '/' + basename + '/images/'))
Directories = [working_directory + '/' + basename + '/images/' + type for type in Types]
# ret. e.g. ['cwd/base/images/630_nm', 'cwd/base/images/dark', 'cwd/base/images/no_filter']
return Types, Directories
# def define_reference_path(reference, Directories):
# ''' return a list of reference images (arrays!) as defined by the integer "reference"'''
# References = np.zeros(len(Directories)).astype('string')
# for image in np.arange(len(Directories)):
# References[image] = Directories[image] + '/' + listdir_nohidden(Directories[image])[reference]
#
# return References
def make_std_video(directory, type,
ref=1,
fps = 25, fourcc = cv2.cv.CV_FOURCC(*'mp4v'),
b_h = 200, b_c=(255,255,255),
font = cv2.FONT_HERSHEY_SIMPLEX, font_color=(0,0,0)):
''' make two standarized videos:
one with and one without overlayed colormap containing information about the CO2 concentracion'''
# Check if directory contains files. If not: abort!
if not listdir_nohidden(directory):
print "no images of type " + type
pass
elif type == 'dark':
print "skipping dark frames"
pass
else:
# adjust video hight to fit image and bar
reference = cv2.imread(directory + '/' + listdir_nohidden(directory)[ref])
hight, width = reference.shape[0:2]
hight = hight + b_h
shape = (width, hight)
# Tell std out what we are doing
infostring = "\n### VIDEO " + type + " ###"
print infostring
# Setup output files, where the video will be written in
grayscale = cv2.VideoWriter(str(type +'_grayscale.mov'), fourcc, fps, shape, False)
ovrld_cmp = cv2.VideoWriter(str(type +'_color-map.mov'), fourcc, fps, shape, False)
# stats for the loading bar function
current_image = 0
amount_of_images = len(listdir_nohidden(directory)) - 1
timer_start = time.clock()
for image in listdir_nohidden(directory):
# iterable is a list of image names!
# get timestamp from filename
timestamp = jim.get_timestamp(image)
# Grayscale video:
orig = cv2.imread( directory + '/' + image ).astype('uint8')
frame_g = jim.addColorBar(orig, bar_h=b_h, color=b_c)
cv2.putText(frame_g,timestamp,(10, hight - 40), font, 4,(0,0,0),2,cv2.CV_AA)
grayscale.write(frame_g)
# Colormap video:
conc = jim.getConcentration(orig, reference)
over = jim.overlay_colormap(conc, background=orig,
lower_threshold=3, upper_threshold=50,
cmap='HOT', blurKernel=(1,1))
frame_c = jim.addColorBar(over, bar_h=b_h, color=b_c)
cv2.putText(frame_c,timestamp,(10, hight - 40), font, 4,(255,255,255),2,cv2.CV_AA)
ovrld_cmp.write(frame_c)
# now generate a nice loading bar:
jim.progress_bar( current_image, amount_of_images, starttime=timer_start )
current_image += 1
grayscale.release()
ovrld_cmp.release()
print "\nfinished!"
return 0
def simple_main():
working_directory = os.getcwd()
Types, Directories = image_Directories_and_Types(working_directory)
ref = int(sys.argv[1])
# References = define_reference_path(int(sys.argv[1]), Directories)
for dir, type in zip(Directories, Types):
make_std_video(dir, type, ref)
if __name__ == '__main__':
simple_main()
```
#### File: bin/python/ltm_waves.py
```python
__author__ = 'jhaux'
# -*- coding: utf8 -*-
import cv2
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.colors as colors
import matplotlib.cm as cmx
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
import scipy.ndimage as ndimage
from scipy.interpolate import interp1d
import os
import datetime
import jimlib as jim
import image_operations as imop
# class HS-Cell:
# '''Base class for the Hele-Shaw experiment'''
# # some basic properties of the cell
# width = 23 # cm
def format_time(time):
return datetime.datetime.fromtimestamp(time).strftime('%Y-%m-%d %H:%M:%S')
def dt2str(t1, t2):
timestring = datetime.datetime.fromtimestamp(t2 - t1).strftime('%H:%M:%S')
s = list(timestring)
hour = float(s[1]) - 1
s[1] = str(hour)[0]
timestring = "".join(s)
return timestring
def format_data(all_data, cell_width=0.23):
'''Standard operations to get the wanted information out of the previously stored wavelength-data files'''
if len(all_data.shape) > 1:
timesteps = all_data[:,0]
intensities = all_data[:,1:]
pixels = np.arange(len(all_data[0,1:]))
meters = np.linspace(0,cell_width,num=len(intensities[0]))
else:
timesteps = all_data[0]
intensities = all_data[1:]
pixels = np.arange(len(all_data[1:]))
meters = np.linspace(0,cell_width,num=len(intensities))
return timesteps, intensities, pixels, meters
def scalar_cmap(data, cmap_name='cool'):
'''returns an array of rgba colorvalues for plotting purposes'''
values = np.arange(len(data))
cmap = cm = plt.get_cmap(cmap_name)
cNorm = colors.Normalize(vmin=0, vmax=values[-1])
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cmap)
color_vals = [scalarMap.to_rgba(values[i]) for i in values]
return color_vals
def plot_single_timestep(all_data):
'''input must be a one-column array!'''
timesteps, intensities, pixels, meters = format_data(all_data)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(intensities)
ax.set_title(format_time(timestep))
plt.show()
def plot_all_timesteps3d(all_data, cmap_name):
timesteps, intensities, pixels, meters = format_data(all_data)
colors = scalar_cmap(timesteps, cmap_name)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
zticks = []
for i in np.arange(len(timesteps)):
ax.plot(xs=pixels, ys=intensities[i], zs=timesteps[i], zdir='z', c=colors[i], alpha=1.)
zticks.append(format_time(timesteps[i]))
ax.set_title('all data')
ax.set_zticks(timesteps[::6])
ax.set_zticklabels(zticks[::6])
plt.show()
def plot_all_timesteps2d(all_data, cmap_name):
timesteps, intensities, pixels, meters = format_data(all_data)
colors = scalar_cmap(timesteps, cmap_name)
fig = plt.figure()
ax = fig.add_subplot(111)
zticks = []
for i in np.arange(len(timesteps)):
ax.plot(intensities[i], c=colors[i], alpha=1.)
zticks.append(format_time(timesteps[i]))
ax.set_title('all data')
# ax.set_zticks(timesteps[::6])
# ax.set_zticklabels(zticks[::6])
plt.show()
def fourier_clean(all_data, handle=25000, cell_width=0.23, clean_criterium='amplitude'):
'''all_data must be a one-column array!'''
timesteps, intensities, pixels, meters = format_data(all_data)
N = intensities.shape[0]
waves = intensities
# map real spacial coordinates to the pixel data:
xmin = 0. # [m]
xmax = cell_width # [m]
xlength = xmax - xmin
x_array = meters # spatial domain of the fingers!
# get wavelengthspace and corresponding wavelngths
wavelengthspace = np.fft.rfft(waves)
wavelengths = np.fft.rfftfreq(waves.shape[0], d=xlength/waves.shape[0]) # d: Distance of datapoints in "Space-space"
# clean wavlengthspace
wavelengthspace_clean = np.empty_like(wavelengthspace)
wavelengthspace_clean[:] = wavelengthspace
if clean_criterium == 'amplitude':
wavelengthspace_clean[(abs(wavelengthspace) < handle)] = 0 # filter all unwanted wavelengths
elif clean_criterium == 'wavelength':
wavelengthspace_clean[(wavelengths > handle)] = 0
# get cleaned version of the waves
waves_clean = np.fft.irfft(wavelengthspace_clean) # inverse fft returns cleaned wave
x_array_clean = np.linspace(xmin, xmax, num=waves_clean.shape[0])
return wavelengthspace, wavelengths, waves_clean, x_array_clean, wavelengthspace_clean
def plot_wavelengthspace(all_data, step=1, start=0, end=-1, handle=25000, clean_crit='amplitude', cell_width=0.23, cmap_name='Greys', alpha=0.4, f_bars=True):
'''TO DO: for i in it: ax.plot(fourierclean)'''
# iterate over timesteps
timesteps, intensities, pixels, meters = format_data(all_data[start:end][::step])
colors = scalar_cmap(timesteps, cmap_name)
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
width = 5*cell_width/len(intensities[0])
t_0 = timesteps[0]
for t, i in zip(timesteps, np.arange(len(timesteps))[::-1]):
wavelengthspace, wavelengths, intensities_clean, meters_clean, wavelengthspace_clean \
= fourier_clean(intensities[i], handle=handle, cell_width=cell_width, clean_criterium=clean_crit)
fingers, n_fings = finger_count(intensities_clean)
ax1.plot( meters, intensities[i], c=colors[i], alpha=alpha, label=dt2str(t, t_0) )
ax2.plot( wavelengths, abs(wavelengthspace), c=colors[i], alpha=alpha, label=dt2str(t, t_0) )
ax3.plot( meters_clean, intensities_clean, c=colors[i], alpha=alpha, label=dt2str(t, t_0) )
if f_bars:
ax3.bar( meters_clean - width/2, fingers * 26000, width=width, color='k', edgecolor='')
ax4.plot( wavelengths, abs(wavelengthspace_clean), c=colors[i], alpha=alpha, label=dt2str(t, t_0) )
t_0 = t
# now make it beautiful
pad = 1.5
wavelengthspace, wavelengths, _, _1, _2 = fourier_clean(intensities[-1])
ax1.set_xlabel('Distance from left cell boarder $[m]$')
ax1.set_ylabel('Intensity (sum over column)')
ax2.set_xlim(0,wavelengths.max() / 10)
ax2.set_ylim(0,pad * abs(wavelengthspace[1:]).max())
ax2.set_xlabel('Wavenumber k $[m^{-1}]$')
ax2.set_ylabel('$|A|$')
ax3.set_ylim(ax1.get_ylim())
ax3.set_xlabel('Distance from left cell boarder $[m]$')
ax3.set_ylabel('Intensity (sum over column)')
ax4.set_xlim(0,wavelengths.max() / 10)
ax4.set_ylim(0,pad * abs(wavelengthspace[1:]).max())
ax4.set_xlabel('Wavenumber k $[m^{-1}]$')
ax4.set_ylabel('$|A|$')
# plt.legend()
plt.suptitle('Cleaning Out the N01S3')
plt.tight_layout()
plt.show()
def plot_comparison(data_1, data_2, step=1, start=0, end=-1, handle=25000, clean_crit='amplitude', cell_width=0.23, cmap_name='Greys', alpha=0.4):
# iterate over timesteps
timesteps_1, intensities_1, pixels_1, meters_1 = format_data(data_1[start:end][::step])
timesteps_2, intensities_2, pixels_2, meters_2 = format_data(data_2[start:end][::step])
colors_1 = scalar_cmap(timesteps_1, cmap_name)
colors_2 = scalar_cmap(timesteps_2, cmap_name)
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
t_0 = timesteps_1[0]
for t, i in zip(timesteps_1, np.arange(len(timesteps_1))[::-1]):
wavelengthspace_1, wavelengths_1, intensities_clean_1, meters_clean_1, wavelengthspace_clean_1 \
= fourier_clean(intensities_1[i], handle=handle, cell_width=cell_width, clean_criterium=clean_crit)
wavelengthspace_2, wavelengths_2, intensities_clean_2, meters_clean_2, wavelengthspace_clean_2 \
= fourier_clean(intensities_2[i], handle=handle, cell_width=cell_width, clean_criterium=clean_crit)
ax1.plot( meters_clean_1, abs(intensities_clean_1), c=colors_1[i], alpha=alpha, label=dt2str(t, t_0) )
ax2.plot( wavelengths_1, abs(wavelengthspace_clean_1), c=colors_1[i], alpha=alpha, label=dt2str(t, t_0) )
ax3.plot( meters_clean_2, abs(intensities_clean_2), c=colors_2[i], alpha=alpha, label=dt2str(t, t_0) )
ax4.plot( wavelengths_2, abs(wavelengthspace_clean_2), c=colors_2[i], alpha=alpha, label=dt2str(t, t_0) )
t_0 = t
# now make it beautiful
pad = 1.5
wavelengthspace_1, wavelengths_1, _, _1, _2 = fourier_clean(intensities_1[-1])
wavelengthspace_2, wavelengths_2, _, _1, _2 = fourier_clean(intensities_2[-1])
ax1.set_title('waves data1')
ax2.set_title('spectrum data1')
ax3.set_title('waves data2')
ax4.set_title('spectrum data1')
ax2.set_xlim(0,wavelengths_1.max() / 10)
ax2.set_ylim(0,pad * abs(wavelengthspace_1[1:]).max())
ax4.set_xlim(0,wavelengths_2.max() / 10)
ax4.set_ylim(0,pad * abs(wavelengthspace_2[1:]).max())
# plt.legend()
plt.suptitle('Cleaning Out the N01S3')
plt.show()
def finger_count(intensities):
'''Take an array of intensities and look at the first and second derivative. Assuning that at every maximum there
is a finger the first derivative needs to be zero and the second negative. Store an array of the same dimension with
a 1 at the location of the finger and a 0 everywhere there is not. Additionally return the count of fingers.'''
first_derivative = np.diff(intensities)
second_derivative = np.diff(intensities, n=2)
result_array = np.zeros(intensities.shape)
# result_array[ 0] = 0 # compensate, that you cannot look at all pixels
# result_array[-1] = 0
# result_array[-2] = 0
n_fingers = 0
# iterate over every pixel. -2 because len(sec_deriv) = len(intens) - 2. Start at 1, because the first pixel has no difference to the one before.
for pixel in np.linspace(1,len(intensities)-2, num=len(intensities)-3):
if np.diff(np.sign(first_derivative))[pixel-1] < 0 and np.sign(second_derivative)[pixel-1] == -1:
result_array[pixel] = 1
n_fingers += 1
# else:
# result_array[pixel] = 0
return result_array, n_fingers
def time_diffs(timesteps):
t_0 = datetime.datetime.fromtimestamp(timesteps[0]).replace(microsecond=0)
time_names = []
for i in np.arange(len(timesteps)):
t = datetime.datetime.fromtimestamp(timesteps[i]).replace(microsecond=0)
time_names.append(t - t_0)
return time_names
def plot_fingercount(all_data, step=1, start=0, end=-1, handle=25000, clean_crit='amplitude', cell_width=0.23, cmap_name='Greys', alpha=0.4):
timesteps, intensities, pixels, meters = format_data(all_data[start:end][::step])
t_0 = datetime.datetime.fromtimestamp(timesteps[0]).replace(microsecond=0)
time_names = []
for i in np.arange(len(timesteps)):
t = datetime.datetime.fromtimestamp(timesteps[i]).replace(microsecond=0)
time_names.append(t - t_0)
colors = scalar_cmap(timesteps, cmap_name)
n_fing = np.zeros(timesteps.shape)
for i in np.arange(len(timesteps)):
_,__,intensities_clean,____,_____ = fourier_clean(intensities[i], handle=handle, cell_width=cell_width, clean_criterium=clean_crit)
_, N_f = finger_count(intensities_clean)
n_fing[i] = N_f
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(timesteps, n_fing, '-')
labelstep = int(5)
ax1.set_xticks(timesteps[::labelstep])
ax1.set_xticklabels(time_names[::labelstep])
plt.show()
def plot_fingers(finger_data, step=1, start=0, end=-1, cmap_name='cool', alpha=0.8, bar_width=10):
'''show a bar wherever a finger might be!'''
timesteps, intensities, pixels, meters = format_data(finger_data[start:end][::step])
colors = scalar_cmap(timesteps, cmap_name)
bar = bar_width*meters.max() / len(meters)
fig = plt.figure()
ax = fig.add_subplot(111)
for t, i in zip(timesteps, np.arange(len(timesteps))[::-1]):
ax.bar(meters, intensities[i], width=bar, color=colors[i], alpha=alpha)
ax.set_xlim(0,meters[-1])
ax.set_ylim(0,intensities.max()*1.1)
plt.show()
def plot_fingers_in_picture(path_to_data, data_fingers,
patch, norm_patch, norm_patch_2, norm_crit='linear',
step=1, start=0, end=None,
lower_border=0, upper_border=100, gauss=True,
save=True, show=False, savename='num',
reference=0, norm_reference=0):
timesteps, intensities, pixels, meters = format_data(data_fingers[start:end][::step])
# get the names of all images
all630s = jim.listdir_nohidden(path_to_data + '/' + '630_nm')
# create darkframe
mean_dark = imop.mean_image(path_to_data + '/' + 'dark', rot=True)
# create difference pictures (image - reference)
Diff_630, Quot_630 = imop.global_difference(path_to_data + '/' + '630_nm',
ref=reference,
nref=norm_reference, normpatch=norm_patch, normpatch_2=norm_patch_2,
norm_crit=norm_crit, darkframe=mean_dark, rot=True,
start=start, end=end, step=step)
print 'storing false color images with some nice bars inside!'
if not os.path.isdir(path_to_data + '/' + norm_crit + '/color_630'): # see if the directories already exist, else create them
os.makedirs(path_to_data + '/' + norm_crit + '/color_630')
if not os.path.isdir(path_to_data + '/' + norm_crit + '/color_630/Differences_with_fingers'):
os.makedirs(path_to_data + '/' + norm_crit + '/color_630/Differences_with_fingers')
if not os.path.isdir(path_to_data + '/' + norm_crit + '/color_630/Quotients_with_fingers'):
os.makedirs(path_to_data + '/' + norm_crit + '/color_630/Quotients_with_fingers')
print '=> 630nm diff...'
for image, i in zip(Diff_630, np.arange(len(all630s))):
# create a patch to overlay over the image
x1, x2, y1, y2 = patch
xdim, ydim = x2-x1, y2-y1
overlay_patch = np.zeros((ydim,xdim,3))
for j, val in zip(np.arange(len(intensities[i])), intensities[i]):
for k in np.arange(val):
overlay_patch[k,j-1,0] = 10
overlay_patch[k,j-1,1] = 10
overlay_patch[k,j-1,2] = 10
overlay_patch[k,j,0] = 10
overlay_patch[k,j,1] = 10
overlay_patch[k,j,2] = 10
overlay_patch[k,j+1,0] = 10
overlay_patch[k,j+1,1] = 10
overlay_patch[k,j+1,2] = 10
# add it to the picture
image[y1:y2,x1:x2] += overlay_patch
# plot
fig1 = plt.figure()
ax = plt.subplot(1, 1, 1)
ax.imshow(np.mean(image,axis=-1))
title = jim.get_timestamp(all630s[i])
plt.title(title)
if save:
if savename == 'num':
fig1.savefig(savename + str(i)+'.png')
if savename == 'time':
fig1.savefig(savename + title + '.png')
if show:
plt.show()
plt.close()
print '\nWe are finished with the 630nm stuff!'
def plot_finger_growth(savename, data_fingers, parts=(0,None), px2cm=None, all_Files=None, xlims=(0,None), cmap='cool'):
'''Assuming all fingers grow with the same rate, also their mean length grows with that rate.'''
timesteps, intensities, pixels, meters = format_data(data_fingers)#[start:end][::step])
x_min, x_max = xlims
if all_Files == None:
timenames = time_diffs(timesteps)
dt = timesteps[1] - timesteps[0]
else:
times = [jim.get_timestamp(t, human_readable=False) for t in all_Files]
t_0=jim.get_timestamp(all_Files[0], human_readable=False)
timenames = [dt2str(t_0, t) for t in times]
# dt /= 10**6
mean_lengths = [np.array([]) for i in np.arange(len(parts)-1)]
growths = [np.array([]) for i in np.arange(len(parts)-1)]
for i in np.arange(len(parts) - 1):
start = parts[i]
end = parts[i+1]
used_ints = intensities[:,start:end]
mean_lengths[i] = [px2cm * np.mean(timestep_ints[timestep_ints > 0]) for timestep_ints in used_ints] # first iterate over all timesteps, then take the average
growths[i] = np.zeros(len(mean_lengths[i]))
if i == 0:
dt = 0
else:
dt = timesteps[i] - timesteps[i-1]
growths[i][1:] = np.diff(mean_lengths[i]) / dt
print len(timesteps), len(mean_lengths[0]), len(growths[0])
fig = plt.figure()
ax1 = fig.add_subplot(111)
# ax2 = fig.add_subplot(122)
colors = scalar_cmap(mean_lengths, cmap_name=cmap)
for data, grow, i in zip(mean_lengths, growths, np.arange(len(mean_lengths))):
if px2cm == None:
ax1.plot(timesteps, data, label=str(i), c=colors[i])
# ax2.plot(timesteps, grow, label=str(i))
else:
ax1.plot(timesteps, data, label=str(i), c=colors[i])
# ax2.plot(timesteps, grow, label=str(i))
print data
ax1.set_title(u'Mittlere Fingerlänge')
ax1.set_xlabel(u'Zeit')
if px2cm == None:
ax1.set_ylabel(u'Mittlere Fingerlänge $[px]$')
# ax2.set_ylabel('mean difference [pixel/s]')
else:
ax1.set_ylabel(u'Mittlere Fingerlänge $[cm]$')
# ax2.set_ylabel('mean difference [cm/s]')
ax1.set_xticklabels(timenames[::5], rotation=45)
print jim.get_timestamp(all_Files[x_min]), jim.get_timestamp(all_Files[x_max])
ax1.set_xlim(timesteps[x_min], timesteps[x_max])
ax1.legend(loc=2)
# ax2.set_title('mean differences')
# ax2.set_xlabel('time [h:m:s]')
# ax2.set_xticklabels(timenames, rotation=45)
# ax2.legend()
# plt.show()
fig.savefig(savename, dpi=300, bbox_inches='tight')
def main():
path_to_data = u'/Users/jhaux/Desktop/Bachelorarbeit/Measurements/BCG_nopor_Test01/measurement_2015-02-02_14-03-19/images'
# # path_to_file = u'/Users/jhaux/Desktop/Bachelorarbeit/Measurements/measurement_2015-02-02_14-03-19/measurement_2015-02-02_14-03-19/images/intensities_630.csv'
path_to_diff = u'/Users/jhaux/Desktop/Bachelorarbeit/Measurements/BCG_nopor_Test01/measurement_2015-02-02_14-03-19/images/normcrit_linear/intensities_Diff_630.csv'
path_to_quot = u'/Users/jhaux/Desktop/Bachelorarbeit/Measurements/BCG_nopor_Test01/measurement_2015-02-02_14-03-19/images/normcrit_linear/intensities_Quot_630.csv'
# path_to_diff = u'/Users/jhaux/Desktop/Bachelorarbeit/Measurements/BCG_nopor_Test01/measurement_2015-02-02_14-03-19/images/intensities_Diff_630.csv'
# path_to_quot = u'/Users/jhaux/Desktop/Bachelorarbeit/Measurements/BCG_nopor_Test01/measurement_2015-02-02_14-03-19/images/intensities_Quot_630.csv'
data_diff = np.genfromtxt(path_to_diff)
data_quot = np.genfromtxt(path_to_quot)
path_to_fingers = u'/Users/jhaux/Desktop/Bachelorarbeit/Measurements/BCG_nopor_Test01/measurement_2015-02-02_14-03-19/images/linear/fingers_diff_630.csv'
data_fingers = np.genfromtxt(path_to_fingers)
# plot_wavelengthspace(data_diff, step=10, start=1, end=32, cmap_name='cool', handle=150, clean_crit='wavelength', alpha=0.5, f_bars=True)
# plot_wavelengthspace(data_quot, step=2, start=15, end=16, cmap_name='cool', handle=1000000000000, clean_crit='wavelength', alpha=0.5, f_bars=False)
# plot_comparison(data_diff, data_quot, step=2, start=0, end=100, cmap_name='cool', handle=150, clean_crit='wavelength', alpha=0.5)
# plot_fingercount(data_diff, start=1, handle=150, clean_crit='wavelength')
# plot_fingers(data_fingers, start=8, end=9, step=1)
patch = ( 643, 1779, 1550, 2000) # this part of the image will be analysed
np1 = (1190, 1310, 60, 160) # there is a paper on the background
np2 = ( 500, 600, 300, 400) # an area that is not to bright
# plot_fingers_in_picture(path_to_data, data_fingers,patch=(643, 1779, 1550, 2000), norm_patch=np1, norm_patch_2=np2,
# start=1,end=32, step=10, save=True, show=True,
# savename=path_to_data+'/linear/Differences_with_fingers/')
timesteps, intensities, pixels, meters = format_data(data_fingers)#[start:end][::step])
sep = int(len(intensities[0])/3)
plot_finger_growth(data_fingers[5:16], parts=(0,sep,sep*2,None))
return 0
if __name__ == '__main__':
main()
``` |
{
"source": "jhaux/triplet-reid",
"score": 2
} |
#### File: edflow_implementations/deepfashion/eval_tsne.py
```python
import sys
sys.path.append(".")
import yaml, os, json
from triplet_reid.edflow_implementations.deepfashion.data import (
FromCSVWithEmbedding, FromCSVWithMultiEmbedding)
from tqdm import trange, tqdm
import numpy as np
import matplotlib.pyplot as plt
from triplet_reid.excluders.diagonal import Excluder as DiagonalExcluder
from scipy.spatial.distance import cdist
from sklearn.metrics import average_precision_score
def make_tsne_plot(outpath, dataset):
indices = np.random.permutation(len(dataset))
N = 1000
indices = indices[:N]
data = list()
for i in tqdm(indices):
data.append(dataset[i]["embedding"])
data = np.stack(data)
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, random_state=0, verbose = 1, perplexity = 40, n_iter=300)
data_2d = tsne.fit_transform(data)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(data_2d[:,0], data_2d[:,1])
fig.savefig(outpath, dpi = 300)
print("Wrote ", outpath)
def make_combined_tsne_plot(outpath, dataset1, dataset2, label1, label2):
indices1 = np.random.permutation(len(dataset1))
indices2 = np.random.permutation(len(dataset2))
N = 1000
indices1 = indices1[:N]
indices2 = indices2[:N]
data = list()
for i in tqdm(indices1):
data.append(dataset1[i]["embedding"])
for i in tqdm(indices2):
data.append(dataset2[i]["embedding"])
data = np.stack(data)
print(data.shape)
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, random_state=0, verbose = 1)
data_2d = tsne.fit_transform(data)
print(data_2d.shape)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
colors = ["r", "g"]
markers = ["+", "x"]
alphas = [1.0, 1.0]
ax.scatter(
data_2d[:N,0], data_2d[:N,1],
c = colors[0], label = label1, marker = markers[0], alpha = alphas[0])
ax.scatter(
data_2d[N:,0], data_2d[N:,1],
c = colors[1], label = label2, marker = markers[1], alpha = alphas[1])
ax.legend()
fig.savefig(outpath, dpi = 300)
print("Wrote ", outpath)
def run(embedding_root, postfixes):
joint_config = {
"spatial_size": 256,
"data_root": "data/deepfashion/images",
"embedding_root": embedding_root,
"embedding_postfixes": postfixes,
"data_csv": "data/deepfashion/test_reconstruction.csv",
"z_size": None}
joint_dataset = FromCSVWithMultiEmbedding(joint_config)
marginal_config = {
"spatial_size": 256,
"data_root": "data/deepfashion/images",
"embedding_root": embedding_root,
"embedding_postfixes": postfixes,
"data_csv": "data/deepfashion/test_transfer.csv",
"z_size": None}
marginal_dataset = FromCSVWithMultiEmbedding(marginal_config)
print(len(joint_dataset))
print(len(marginal_dataset))
for name, dataset in zip(["joint", "marginal"], [joint_dataset, marginal_dataset]):
out_path = "tsne_" + name + ".png"
out_path = os.path.join(embedding_root, out_path)
make_tsne_plot(out_path, dataset)
out_path = "tsne_" + "combined" + ".png"
out_path = os.path.join(embedding_root, out_path)
make_combined_tsne_plot(out_path, joint_dataset, marginal_dataset, "joint", "marginal")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("embedding_root")
parser.add_argument("--postfixes", nargs = "+", required = True)
opt = parser.parse_args()
run(opt.embedding_root, opt.postfixes)
``` |
{
"source": "jha-vikas/pyTorch-implementations",
"score": 3
} |
#### File: jha-vikas/pyTorch-implementations/word2vec.py
```python
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.utils import shuffle
from datetime import datetime
import os
import sys
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def init_weights(shape):
return np.random.randn(*shape).astype(np.float32) / np.sqrt(sum(shape))
class Model:
def __init__(self, D: int, V: int, context_sz: int):
self.D = D
self.V = V
self.context_sz = context_sz
def _get_pnw(self, X):
flat_word_list = [word for sentence in X for word in sentence]
tokens = [flat_word_list[0], flat_word_list[-1]]
flat_word_list = flat_word_list[1:-1]
word_count = len(flat_word_list)
word_freq = dict(Counter(flat_word_list))
word_freq = {k: (v / word_count)**0.75 for k, v in word_freq.items()}
self.Pnw = list(word_freq.values)
self.Pnw.insert(0,0)
self.Pnw.append(0)
return self.Pnw
def _get_negative_samples(self, context, num_neg_samples):
``` |
{
"source": "jhavl/ropy",
"score": 2
} |
#### File: ropy/examples/ETS1G.py
```python
import ropy as rp
import spatialmath as sm
import numpy as np
import time
import random
import qpsolvers as qp
class Exp(object):
def __init__(self):
# Make Robots
self.rQ = rp.UR5()
self.rQ.name = 'Quad UR5'
self.rR = rp.UR5()
self.rR.name = 'RRMC UR5'
# Set joint limits
self.n = self.rQ.n
self.qlim = self.rQ.qlim.copy()
self.rang = np.abs(self.qlim[0, :]) + np.abs(self.qlim[1, :])
# Set base locations
self.rQ.base = sm.SE3.Ty(0.3)
self.rR.base = sm.SE3.Ty(-0.3)
# Init robot variables
self.rQ.failt = 0
self.rR.failt = 0
self.rQ.arrivedt = 0
self.rR.arrivedt = 0
self.rQ.s = False
self.rR.s = False
self.rQ.st = 0
self.rR.st = 0
self.rQ.mt = []
self.rR.mt = []
self.rQ.mft = []
self.rR.mft = []
self.rQ.missed = 0
self.rR.missed = 0
# Launch Sim
self.env = rp.backend.Sim()
self.env.launch()
# Add robots
self.env.add(self.rQ)
self.env.add(self.rR)
time.sleep(1)
# Timestep
self.dt = 50
self.itmax = 250
def step(self, action):
# Step the quadratic robot
if not self.rQ.arrived and not self.rQ.fail:
try:
self.step_q(self.rQ, self.TQ)
except np.linalg.LinAlgError:
self.rQ.fail = True
self.rQ.s = True
else:
self.rQ.qd = np.zeros(self.n)
# Step the rrmc robot
if not self.rR.arrived and not self.rR.fail:
try:
self.step_r(self.rR, self.TR)
except np.linalg.LinAlgError:
self.rR.fail = True
self.rR.s = True
else:
self.rR.qd = np.zeros(self.n)
# Step the environment
self.env.step(self.dt)
self.it += 1
if ((self.rQ.arrived or self.rQ.fail)
and (self.rR.arrived or self.rR.fail)) \
or self.it > self.itmax:
self.done = True
self.finished([self.rQ, self.rR])
self.mean([self.rQ, self.rR])
else:
self.done = False
def finished(self, robots):
rarr = 0
for robot in robots:
if robot.arrived:
robot.arrivedt += 1
rarr += 1
elif robot.fail:
robot.failt += 1
if robot.s:
robot.st += 1
if not robot.arrived and not robot.fail:
robot.missed += 1
m = robot.m / robot.it
mf = robot.manipulability()
print("{0}: {1}, mean: {2}, final: {3}, singular: {4}".format(
robot.name, robot.arrived, np.round(m, 4),
np.round(mf, 4), robot.s))
if rarr == len(robots):
for robot in robots:
robot.mt.append(robot.m / robot.it)
robot.mft.append(robot.manipulability())
def mean(self, robots):
print("Dual success: {0}".format(len(robots[0].mt)))
for robot in robots:
mm = np.sum(robot.mt) / len(robot.mt)
mmf = np.sum(robot.mft) / len(robot.mft)
print("{0}: fails: {1}, mmean: {2}, mfinal: {3},"
" singulars: {4}, missed: {5}".format(
robot.name, robot.failt,
np.round(mm, 4), np.round(mmf, 4), robot.st,
robot.missed))
def reset(self):
# Set initial joint angles
q_init = self._rand_q()
self.rQ.q = q_init.copy()
self.rR.q = q_init.copy()
# Set joint velocities to 0
self.rQ.qd = np.zeros(self.n)
self.rR.qd = np.zeros(self.n)
# Robot stats
self.rQ.it = 0
self.rQ.s = False
self.rQ.m = 0
self.rQ.arrived = False
self.rQ.fail = False
self.rR.it = 0
self.rR.m = 0
self.rR.arrived = False
self.rR.fail = False
self.rR.s = False
self.done = False
self.it = 0
# Set desired poses
self.TQ, self.TR = self._find_pose((self.rQ, self.rR))
def step_q(self, robot, Ts):
ps = 0.05
pi = 0.9
e, m, _ = self.state(robot, Ts)
v, robot.arrived = rp.p_servo(robot.fkine(), Ts, 1, threshold=0.17)
Y = 0.01
Ain = np.zeros((12, 12))
bin = np.zeros(6 + 6)
for i in range(robot.n):
if robot.q[i] - self.qlim[0, i] <= pi:
bin[i] = -1.0 * (((self.qlim[0, i] - robot.q[i]) + ps) / (pi - ps))
Ain[i, i] = -1
if self.qlim[1, i] - robot.q[i] <= pi:
bin[i] = ((self.qlim[1, i] - robot.q[i]) - ps) / (pi - ps)
Ain[i, i] = 1
Q = np.eye(6 + 6)
Q[:6, :6] *= Y
Q[6:, 6:] = (1 / e) * np.eye(6)
Aeq = np.c_[robot.jacobe(), np.eye(6)]
beq = v.reshape((6,))
c = np.r_[-robot.jacobm().reshape((6,)), np.zeros(6)]
qd = qp.solve_qp(Q, c, Ain, bin, Aeq, beq)
if np.any(np.isnan(qd)):
robot.fail = True
robot.s = True
robot.qd = robot.qz
else:
robot.qd = qd[:6]
robot.m += m
robot.it += 1
if self._check_limit(robot):
robot.fail = True
def step_r(self, robot, Ts):
e, m, _ = self.state(robot, Ts)
v, robot.arrived = rp.p_servo(robot.fkine(), Ts, 1, threshold=0.17)
if np.linalg.matrix_rank(robot.jacobe()) < 6:
robot.s = True
robot.fail = True
robot.qd = np.linalg.inv(robot.jacobe()) @ v
robot.m += m
robot.it += 1
if self._check_limit(robot):
robot.fail = True
def state(self, robot, Ts):
arrived = False
eTep = robot.fkine().inv() * Ts
e = np.sum(np.abs(np.r_[eTep.t, eTep.rpy() * np.pi/180]))
m = robot.manipulability()
if e < 0.1:
arrived = True
return e, m, arrived
def restart(self):
pass
def render(self):
pass
def _rand_q(self, k=0.15):
q = np.zeros(self.n)
for i in range(self.n):
off = k * self.rang[i]
q[i] = random.uniform(self.qlim[0, i] + off, self.qlim[1, i] - off)
return q
def _find_pose(self, robots):
q = self._rand_q()
return robots[0].fkine(q), robots[1].fkine(q)
def _check_limit(self, robot):
limit = False
off = 0.00
for i in range(self.n-2):
if robot.q[i] <= (self.qlim[0, i] + off):
if robot.name == "Quad UR5":
print(str(robot.q[i]) + " below ---------------------------------------------------------")
return True
elif robot.q[i] >= (self.qlim[1, i] - off):
if robot.name == "Quad UR5":
print(str(robot.q[i]) + " above ---------------------------------------------------------")
return True
return limit
def envfun(e):
while not e.done:
e.step(0)
if __name__ == '__main__':
e = Exp()
for i in range(100000):
e.reset()
print()
print('Interation: {0}'.format(i))
envfun(e)
```
#### File: backend/PyPlot/PyPlot.py
```python
import ropy as rp
import numpy as np
from ropy.backend.Connector import Connector
import matplotlib
import matplotlib.pyplot as plt
import signal
from ropy.backend.PyPlot.RobotPlot import RobotPlot
from ropy.backend.PyPlot.EllipsePlot import EllipsePlot
from spatialmath.base.argcheck import getvector
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
plt.style.use('ggplot')
matplotlib.rcParams['font.size'] = 7
matplotlib.rcParams['lines.linewidth'] = 0.5
matplotlib.rcParams['xtick.major.size'] = 1.5
matplotlib.rcParams['ytick.major.size'] = 1.5
matplotlib.rcParams['axes.labelpad'] = 1
plt.rc('grid', linestyle="-", color='#dbdbdb')
class PyPlot(Connector):
def __init__(self):
super(PyPlot, self).__init__()
self.robots = []
self.ellipses = []
def launch(self, name=None, limits=None):
'''
env = launch() launchs a blank 3D matplotlib figure
'''
super().launch()
self.limits = limits
if limits is not None:
self.limits = getvector(limits, 6)
projection = 'ortho'
labels = ['X', 'Y', 'Z']
if name is not None:
self.fig = plt.figure()
else:
self.fig = plt.figure()
self.fig.subplots_adjust(left=-0.09, bottom=0, top=1, right=0.99)
# Create a 3D axes
self.ax = self.fig.add_subplot(
111, projection='3d', proj_type=projection)
self.ax.set_facecolor('white')
self.ax.set_xbound(-0.5, 0.5)
self.ax.set_ybound(-0.5, 0.5)
self.ax.set_zbound(0.0, 0.5)
self.ax.set_xlabel(labels[0])
self.ax.set_ylabel(labels[1])
self.ax.set_zlabel(labels[2])
if limits is not None:
self.ax.set_xlim3d([limits[0], limits[1]])
self.ax.set_ylim3d([limits[2], limits[3]])
self.ax.set_zlim3d([limits[4], limits[5]])
plt.ion()
plt.show()
# Set the signal handler and a 0.1 second plot updater
signal.signal(signal.SIGALRM, self._plot_handler)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0.1)
def step(self, dt=50):
'''
state = step(args) triggers the external program to make a time step
of defined time updating the state of the environment as defined by
the robot's actions.
The will go through each robot in the list and make them act based on
their control type (position, velocity, acceleration, or torque). Upon
acting, the other three of the four control types will be updated in
the internal state of the robot object. The control type is defined
by the robot object, and not all robot objects support all control
types.
'''
super().step()
self._step_robots(dt)
plt.ioff()
self._draw_ellipses()
self._draw_robots()
self._set_axes_equal()
plt.ion()
self._update_robots()
def reset(self):
'''
state = reset() triggers the external program to reset to the
original state defined by launch
'''
super().reset()
def restart(self):
'''
state = restart() triggers the external program to close and relaunch
to thestate defined by launch
'''
super().restart()
def close(self):
'''
close() closes the plot
'''
super().close()
signal.setitimer(signal.ITIMER_REAL, 0)
plt.close(self.fig)
#
# Methods to interface with the robots created in other environemnts
#
def add(
self, ob, readonly=False, display=True,
jointaxes=True, eeframe=True, shadow=True, name=True):
'''
id = add(robot) adds the robot to the external environment. robot must
be of an appropriate class. This adds a robot object to a list of
robots which will act upon the step() method being called.
'''
super().add()
if isinstance(ob, rp.SerialLink) or isinstance(ob, rp.ETS):
self.robots.append(
RobotPlot(
ob, self.ax, readonly, display,
jointaxes, eeframe, shadow, name))
self.robots[len(self.robots) - 1].draw()
elif isinstance(ob, EllipsePlot):
ob.ax = self.ax
self.ellipses.append(ob)
self.ellipses[len(self.ellipses) - 1].draw()
self._set_axes_equal()
def remove(self):
'''
id = remove(robot) removes the robot to the external environment.
'''
super().remove()
def hold(self): # pragma: no cover
signal.setitimer(signal.ITIMER_REAL, 0)
plt.ioff()
plt.show()
#
# Private methods
#
def _step_robots(self, dt):
for rpl in self.robots:
robot = rpl.robot
if rpl.readonly or robot.control_type == 'p':
pass # pragma: no cover
elif robot.control_type == 'v':
for i in range(robot.n):
robot.q[i] += robot.qd[i] * (dt / 1000)
elif robot.control_type == 'a':
pass
else: # pragma: no cover
# Should be impossible to reach
raise ValueError(
'Invalid robot.control_type. '
'Must be one of \'p\', \'v\', or \'a\'')
def _update_robots(self):
pass
def _draw_robots(self):
for i in range(len(self.robots)):
self.robots[i].draw()
def _draw_ellipses(self):
for i in range(len(self.ellipses)):
self.ellipses[i].draw()
def _plot_handler(self, sig, frame):
plt.pause(0.001)
def _set_axes_equal(self):
'''
Make axes of 3D plot have equal scale so that spheres appear as
spheres, cubes as cubes, etc.. This is one possible solution to
Matplotlib's ax.set_aspect('equal') and ax.axis('equal') not
working for 3D.
'''
if self.limits is not None:
return
self.ax.autoscale(enable=True, axis='both', tight=False)
x_limits = self.ax.get_xlim3d()
y_limits = self.ax.get_ylim3d()
z_limits = self.ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
self.ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
self.ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
self.ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
```
#### File: backend/PyPlot/RobotPlot2.py
```python
import numpy as np
from ropy.backend.PyPlot.RobotPlot import RobotPlot
class RobotPlot2(RobotPlot):
def __init__(
self, robot, ax, readonly, display=True,
eeframe=True, name=True):
super(RobotPlot2, self).__init__(
robot, ax, readonly, display=display,
jointaxes=False, shadow=False, eeframe=eeframe, name=name
)
```
#### File: urdf/tests/test_urdf.py
```python
import unittest
import ropy as rp
from ropy.backend import URDF, Link, Joint, Transmission, xacro
import numpy as np
import numpy.testing as nt
class TestURDF(unittest.TestCase):
def test_urdfpy(self):
# Load
u = URDF.load('ropy/backend/urdf/tests/data/ur5.urdf')
self.assertIsInstance(u, URDF)
for j in u.joints:
self.assertIsInstance(j, Joint)
for ln in u.links:
self.assertIsInstance(ln, Link)
for t in u.transmissions:
self.assertIsInstance(t, Transmission)
def test_urdf_visuals(self):
urdf_string = xacro.main(
"ropy/models/xacro/panda/robots/panda_arm_hand.urdf.xacro")
urdf = URDF.loadstr(
urdf_string,
"ropy/models/xacro/panda/robots/panda_arm_hand.urdf.xacro")
urdf.links[0].visuals[0].name = "Lonk"
self.assertTrue(urdf.links[0].visuals[0].name == "Lonk")
self.assertTrue(
isinstance(
urdf.links[0].visuals[0].origin,
np.ndarray))
urdf.links[0].visuals[0].geometry.box = rp.backend.urdf.Box([1, 2, 3])
self.assertTrue(
isinstance(
urdf.links[0].visuals[0].geometry.geometry,
rp.backend.urdf.Box))
urdf.links[0].visuals[0].geometry.cylinder = \
rp.backend.urdf.Cylinder(1, 2)
urdf.links[0].visuals[0].geometry.sphere = \
rp.backend.urdf.Sphere(2)
nt.assert_array_almost_equal(
urdf.links[0].visuals[0].geometry.box.size,
[1, 2, 3])
self.assertEqual(
urdf.links[0].visuals[0].geometry.cylinder.radius,
1)
self.assertEqual(
urdf.links[0].visuals[0].geometry.sphere.radius,
2)
self.assertTrue(
isinstance(
urdf.links[0].visuals[0].geometry.mesh,
rp.backend.urdf.Mesh))
try:
xacro.main("")
except BaseException:
pass
def test_urdf_load(self):
rp.wx250s()
rp.UR5()
rp.PandaURDF()
try:
xacro.main("")
except BaseException:
pass
def test_urdf_collisions(self):
urdf_string = xacro.main(
"ropy/models/xacro/panda/robots/panda_arm_hand.urdf.xacro")
urdf = URDF.loadstr(
urdf_string,
"ropy/models/xacro/panda/robots/panda_arm_hand.urdf.xacro")
urdf.links[0].collisions[0].name = "Lonk"
self.assertTrue(urdf.links[0].collisions[0].name == "Lonk")
self.assertTrue(
isinstance(
urdf.links[0].collisions[0].origin,
np.ndarray))
try:
xacro.main("")
except BaseException:
pass
def test_urdf_dynamics(self):
urdf_string = xacro.main(
"ropy/models/xacro/panda/robots/panda_arm_hand.urdf.xacro")
urdf = URDF.loadstr(
urdf_string,
"ropy/models/xacro/panda/robots/panda_arm_hand.urdf.xacro")
self.assertEqual(urdf.joints[0].limit.effort, 87.0)
self.assertEqual(urdf.joints[0].limit.velocity, 2.175)
try:
xacro.main("")
except BaseException:
pass
```
#### File: ropy/models/Panda.py
```python
import numpy as np
from ropy.robot.ETS import ETS
from ropy.robot.ET import ET
from ropy.robot.ELink import ELink
class Panda(ETS):
"""
Create model of Franka-Emika Panda manipulator
panda = Panda() creates a robot object representing the Franka-Emika
Panda robot arm. This robot is represented using the elementary
transform sequence (ETS).
ETS taken from [1] based on
https://frankaemika.github.io/docs/control_parameters.html
:references:
- Kinematic Derivatives using the Elementary Transform
Sequence, <NAME> and <NAME>
"""
def __init__(self):
deg = np.pi/180
mm = 1e-3
tool_offset = (103)*mm
l0 = ELink(
[ET.Ttz(0.333), ET.TRz()],
name='link0',
parent=None
)
l1 = ELink(
[ET.TRx(-90*deg), ET.TRz()],
name='link1',
parent=l0
)
l2 = ELink(
[ET.TRx(90*deg), ET.Ttz(0.316), ET.TRz()],
name='link2',
parent=l1
)
l3 = ELink(
[ET.Ttx(0.0825), ET.TRx(90*deg), ET.TRz()],
name='link3',
parent=l2
)
l4 = ELink(
[ET.Ttx(-0.0825), ET.TRx(-90*deg), ET.Ttz(0.384), ET.TRz()],
name='link4',
parent=l3
)
l5 = ELink(
[ET.TRx(90*deg), ET.TRz()],
name='link5',
parent=l4
)
l6 = ELink(
[ET.Ttx(0.088), ET.TRx(90*deg), ET.Ttz(0.107), ET.TRz()],
name='link6',
parent=l5
)
ee = ELink(
[ET.Ttz(tool_offset), ET.TRz(-np.pi/4)],
name='ee',
parent=l6
)
ETlist = [l0, l1, l2, l3, l4, l5, l6, ee]
super(Panda, self).__init__(
ETlist,
name='Panda',
manufacturer='<NAME>')
self._qz = np.array([0, 0, 0, 0, 0, 0, 0])
self._qr = np.array([0, -0.3, 0, -2.2, 0, 2.0, np.pi/4])
@property
def qz(self):
return self._qz
@property
def qr(self):
return self._qr
```
#### File: ropy/robot/ELink.py
```python
import numpy as np
from spatialmath import SE3
from spatialmath.base.argcheck import getvector, verifymatrix, isscalar
import ropy as rp
class ELink(object):
"""
A link superclass for all link types. A Link object holds all information
related to a robot joint and link such as kinematics parameters,
rigid-body inertial parameters, motor and transmission parameters.
:param ETS: kinematic - The elementary transforms which make up the link
:type ETS: list (ET)
:param qlim: joint variable limits [min max]
:type qlim: float ndarray(2)
:param m: dynamic - link mass
:type m: float
:param r: dynamic - position of COM with respect to link frame
:type r: SE3
:param I: dynamic - inertia of link with respect to COM
:type I: float ndarray(3,3)
:param Jm: dynamic - motor inertia
:type Jm: float
:param B: dynamic - motor viscous friction
:type B: float
:param Tc: dynamic - motor Coulomb friction (1x2 or 2x1)
:type Tc: float ndarray(2)
:param G: dynamic - gear ratio
:type G: float
:references:
- Kinematic Derivatives using the Elementary Transform Sequence,
<NAME> and <NAME>
"""
def __init__(
self,
ET_list=[],
name='',
parent=None,
qlim=np.zeros(2),
m=0.0,
r=None,
I=np.zeros((3, 3)), # noqa
Jm=0.0,
B=0.0,
Tc=np.zeros(2),
G=1.0,
geometry=[],
collision=[]):
super(ELink, self).__init__()
self.STATIC = 0
self.VARIABLE = 1
self._ets = ET_list
self._q_idx = []
self._name = name
if isinstance(parent, ELink):
parent = [parent]
elif parent is None:
parent = []
elif not isinstance(parent, list):
raise TypeError('The parent link must be of type ELink or list of Elink')
self._parent = parent
self._child = []
# Number of transforms in the ETS
self._M = len(self._ets)
# Initialise joints
for i in range(self.M):
if ET_list[i].jtype is not ET_list[i].STATIC:
ET_list[i].j = len(self._q_idx)
self._q_idx.append(i)
if len(self._q_idx) > 1:
raise ValueError(
"An elementary link can only have one joint variable")
elif len(self._q_idx) == 0:
self._jtype = self.STATIC
self._q_idx = None
else:
self._jtype = self.VARIABLE
self._q_idx = self._q_idx[0]
self.qlim = qlim
self.geometry = geometry
self.collision = collision
# Dynamic Parameters
self.m = m
self.r = r
self.I = I # noqa
self.Jm = Jm
self.B = B
self.Tc = Tc
self.G = G
@property
def collision(self):
return self._collision
@property
def geometry(self):
return self._geometry
@property
def jtype(self):
return self._jtype
@property
def ets(self):
return self._ets
@property
def name(self):
return self._name
# @property
# def parent_name(self):
# return self._parent_name
# @property
# def child_name(self):
# return self._child_name
@property
def parent(self):
return self._parent
@property
def child(self):
return self._child
@property
def M(self):
return self._M
@property
def qlim(self):
return self._qlim
@property
def m(self):
return self._m
@property
def r(self):
return self._r
@property
def I(self): # noqa
return self._I
@property
def Jm(self):
return self._Jm
@property
def B(self):
return self._B
@property
def Tc(self):
return self._Tc
@property
def G(self):
return self._G
@property
def q_idx(self):
return self._q_idx
@collision.setter
def collision(self, coll):
new_coll = []
if isinstance(coll, list):
for gi in coll:
if isinstance(gi, rp.Shape):
new_coll.append(gi)
else:
raise TypeError('Collision must be of Shape class')
elif isinstance(coll, rp.Shape):
new_coll.append(coll)
else:
raise TypeError('Geometry must be of Shape class or list of Shape')
self._collision = new_coll
@geometry.setter
def geometry(self, geom):
new_geom = []
if isinstance(geom, list):
for gi in geom:
if isinstance(gi, rp.Shape):
new_geom.append(gi)
else:
raise TypeError('Geometry must be of Shape class')
elif isinstance(geom, rp.Shape):
new_geom.append(geom)
else:
raise TypeError('Geometry must be of Shape class or list of Shape')
self._geometry = new_geom
@qlim.setter
def qlim(self, qlim_new):
self._qlim = getvector(qlim_new, 2)
@m.setter
def m(self, m_new):
self._m = m_new
@r.setter
def r(self, T):
if not isinstance(T, SE3):
T = SE3(T)
self._r = T
@I.setter
def I(self, I_new): # noqa
# Try for Inertia Matrix
try:
verifymatrix(I_new, (3, 3))
except (ValueError, TypeError):
# Try for the moments and products of inertia
# [Ixx Iyy Izz Ixy Iyz Ixz]
try:
Ia = getvector(I_new, 6)
I_new = np.array([
[Ia[0], Ia[3], Ia[5]],
[Ia[3], Ia[1], Ia[4]],
[Ia[5], Ia[4], Ia[2]]
])
except ValueError:
# Try for the moments of inertia
# [Ixx Iyy Izz]
Ia = getvector(I_new, 3)
I_new = np.diag(Ia)
self._I = I_new
@Jm.setter
def Jm(self, Jm_new):
self._Jm = Jm_new
@B.setter
def B(self, B_new):
if isscalar(B_new):
self._B = B_new
else:
raise TypeError("B must be a scalar")
@Tc.setter
def Tc(self, Tc_new):
try:
# sets Coulomb friction parameters to [F -F], for a symmetric
# Coulomb friction model.
Tc = getvector(Tc_new, 1)
Tc_new = np.array([Tc[0], -Tc[0]])
except ValueError:
# [FP FM] sets Coulomb friction to [FP FM], for an asymmetric
# Coulomb friction model. FP>0 and FM<0. FP is applied for a
# positive joint velocity and FM for a negative joint
# velocity.
Tc_new = getvector(Tc_new, 2)
self._Tc = Tc_new
@G.setter
def G(self, G_new):
self._G = G_new
def __str__(self):
"""
Pretty prints the ETS Model of the link. Will output angles in degrees
:return: Pretty print of the robot link
:rtype: str
"""
return str(self._ets)
def _copy(self):
# Copy the Link
link = ELink( # noqa
ET_list=self.ets,
qlim=self.qlim,
m=self.m,
r=self.r,
I=self.I,
Jm=self.Jm,
B=self.B,
Tc=self.Tc,
G=self.G)
return link
def dyn(self):
"""
Show inertial properties of link
s = dyn() returns a string representation the inertial properties of
the link object in a multi-line format. The properties shown are mass,
centre of mass, inertia, friction, gear ratio and motor properties.
:return s: The string representation of the link dynamics
:rtype s: string
"""
s = "m = {:.2f} \n" \
"r = {:.2f} {:.2f} {:.2f} \n" \
" | {:.2f} {:.2f} {:.2f} | \n" \
"I = | {:.2f} {:.2f} {:.2f} | \n" \
" | {:.2f} {:.2f} {:.2f} | \n" \
"Jm = {:.2f} \n" \
"B = {:.2f} \n" \
"Tc = {:.2f}(+) {:.2f}(-) \n" \
"G = {:.2f} \n" \
"qlim = {:.2f} to {:.2f}".format(
self.m,
self.r.t[0], self.r.t[1], self.r.t[2],
self.I[0, 0], self.I[0, 1], self.I[0, 2],
self.I[1, 0], self.I[1, 1], self.I[1, 2],
self.I[2, 0], self.I[2, 1], self.I[2, 2],
self.Jm,
self.B,
self.Tc[0], self.Tc[1],
self.G,
self.qlim[0], self.qlim[1]
)
return s
def A(self, q=None):
"""
Link transform matrix
T = A(q) is the link homogeneous transformation matrix (4x4)
corresponding to the link variable q
:param q: Joint coordinate (radians or metres). Not required for links
with no variable
:type q: float
:return T: link homogeneous transformation matrix
:rtype T: SE3
"""
j = 0
tr = SE3()
if self.q_idx is not None and q is None:
raise ValueError("q is required for variable joints")
for k in range(self.M):
if self.ets[k].jtype == self.ets[k].VARIABLE:
T = self.ets[k].T(q)
j += 1
else:
T = self.ets[k].T()
tr = tr * T
return tr
def islimit(self, q):
"""
Checks if the joint is exceeding a joint limit
:return: True if joint is exceeded
:rtype: bool
"""
if q < self.qlim[0] or q > self.qlim[1]:
return True
else:
return False
def nofriction(self, coulomb=True, viscous=False):
"""
l2 = nofriction(coulomb, viscous) copies the link and returns a link
with the same parameters except, the Coulomb and/or viscous friction
parameter to zero.
l2 = nofriction() as above except the the Coulomb parameter is set to
zero.
:param coulomb: if True, will set the coulomb friction to 0
:type coulomb: bool
:param viscous: if True, will set the viscous friction to 0
:type viscous: bool
"""
# Copy the Link
link = self._copy()
if viscous:
link.B = 0.0
if coulomb:
link.Tc = [0.0, 0.0]
return link
def friction(self, qd):
"""
tau = friction(qd) Calculates the joint friction force/torque (n)
for joint velocity qd (n). The friction model includes:
- Viscous friction which is a linear function of velocity.
- Coulomb friction which is proportional to sign(qd).
:param qd: The joint velocity
:type qd: float
:return tau: the friction force/torque
:rtype tau: float
:notes:
- The friction value should be added to the motor output torque,
it has a negative value when qd > 0.
- The returned friction value is referred to the output of the
gearbox.
- The friction parameters in the Link object are referred to the
motor.
- Motor viscous friction is scaled up by G^2.
- Motor Coulomb friction is scaled up by G.
- The appropriate Coulomb friction value to use in the
non-symmetric case depends on the sign of the joint velocity,
not the motor velocity.
- The absolute value of the gear ratio is used. Negative gear
ratios are tricky: the Puma560 has negative gear ratio for
joints 1 and 3.
"""
tau = self.B * np.abs(self.G) * qd
if qd > 0:
tau += self.Tc[0]
elif qd < 0:
tau += self.Tc[1]
# Scale up by gear ratio
tau = -np.abs(self.G) * tau
return tau
```
#### File: ropy/tests/test_et.py
```python
import numpy.testing as nt
import numpy as np
import ropy as rp
import spatialmath.base as sm
import unittest
class TestET(unittest.TestCase):
# def test_fail(self):
# self.assertRaises(ValueError, rp.ET.TRx)
def test_TRx(self):
fl = 1.543
nt.assert_array_almost_equal(rp.ET.TRx(fl).T().A, sm.trotx(fl))
nt.assert_array_almost_equal(rp.ET.TRx(-fl).T().A, sm.trotx(-fl))
nt.assert_array_almost_equal(rp.ET.TRx(0).T().A, sm.trotx(0))
def test_TRy(self):
fl = 1.543
nt.assert_array_almost_equal(rp.ET.TRy(fl).T().A, sm.troty(fl))
nt.assert_array_almost_equal(rp.ET.TRy(-fl).T().A, sm.troty(-fl))
nt.assert_array_almost_equal(rp.ET.TRy(0).T().A, sm.troty(0))
def test_TRz(self):
fl = 1.543
nt.assert_array_almost_equal(rp.ET.TRz(fl).T().A, sm.trotz(fl))
nt.assert_array_almost_equal(rp.ET.TRz(-fl).T().A, sm.trotz(-fl))
nt.assert_array_almost_equal(rp.ET.TRz(0).T().A, sm.trotz(0))
def test_Ttx(self):
fl = 1.543
nt.assert_array_almost_equal(rp.ET.Ttx(fl).T().A, sm.transl(fl, 0, 0))
nt.assert_array_almost_equal(rp.ET.Ttx(-fl).T().A, sm.transl(-fl, 0, 0))
nt.assert_array_almost_equal(rp.ET.Ttx(0).T().A, sm.transl(0, 0, 0))
def test_Tty(self):
fl = 1.543
nt.assert_array_almost_equal(rp.ET.Tty(fl).T().A, sm.transl(0, fl, 0))
nt.assert_array_almost_equal(rp.ET.Tty(-fl).T().A, sm.transl(0, -fl, 0))
nt.assert_array_almost_equal(rp.ET.Tty(0).T().A, sm.transl(0, 0, 0))
def test_Ttz(self):
fl = 1.543
nt.assert_array_almost_equal(rp.ET.Ttz(fl).T().A, sm.transl(0, 0, fl))
nt.assert_array_almost_equal(rp.ET.Ttz(-fl).T().A, sm.transl(0, 0, -fl))
nt.assert_array_almost_equal(rp.ET.Ttz(0).T().A, sm.transl(0, 0, 0))
def test_str(self):
rx = rp.ET.TRx(1.543)
ry = rp.ET.TRy(1.543)
rz = rp.ET.TRz(1.543)
tx = rp.ET.Ttx(1.543)
ty = rp.ET.Tty(1.543)
tz = rp.ET.Ttz(1.543)
self.assertEqual(str(rx), 'Rx(88.4074)')
self.assertEqual(str(ry), 'Ry(88.4074)')
self.assertEqual(str(rz), 'Rz(88.4074)')
self.assertEqual(str(tx), 'tx(1.543)')
self.assertEqual(str(ty), 'ty(1.543)')
self.assertEqual(str(tz), 'tz(1.543)')
self.assertEqual(str(rx), repr(rx))
self.assertEqual(str(ry), repr(ry))
self.assertEqual(str(rz), repr(rz))
self.assertEqual(str(tx), repr(tx))
self.assertEqual(str(ty), repr(ty))
self.assertEqual(str(tz), repr(tz))
def test_str_q(self):
rx = rp.ET.TRx()
ry = rp.ET.TRy()
rz = rp.ET.TRz()
tx = rp.ET.Ttx()
ty = rp.ET.Tty()
tz = rp.ET.Ttz()
rx.j = 86
ry.j = 86
rz.j = 86
tx.j = 86
ty.j = 86
tz.j = 86
self.assertEqual(str(rx), 'Rx(q86)')
self.assertEqual(str(ry), 'Ry(q86)')
self.assertEqual(str(rz), 'Rz(q86)')
self.assertEqual(str(tx), 'tx(q86)')
self.assertEqual(str(ty), 'ty(q86)')
self.assertEqual(str(tz), 'tz(q86)')
self.assertEqual(str(rx), repr(rx))
self.assertEqual(str(ry), repr(ry))
self.assertEqual(str(rz), repr(rz))
self.assertEqual(str(tx), repr(tx))
self.assertEqual(str(ty), repr(ty))
self.assertEqual(str(tz), repr(tz))
def test_T_real(self):
fl = 1.543
rx = rp.ET.TRx(fl)
ry = rp.ET.TRy(fl)
rz = rp.ET.TRz(fl)
tx = rp.ET.Ttx(fl)
ty = rp.ET.Tty(fl)
tz = rp.ET.Ttz(fl)
nt.assert_array_almost_equal(rx.T().A, sm.trotx(fl))
nt.assert_array_almost_equal(ry.T().A, sm.troty(fl))
nt.assert_array_almost_equal(rz.T().A, sm.trotz(fl))
nt.assert_array_almost_equal(tx.T().A, sm.transl(fl, 0, 0))
nt.assert_array_almost_equal(ty.T().A, sm.transl(0, fl, 0))
nt.assert_array_almost_equal(tz.T().A, sm.transl(0, 0, fl))
def test_T_real(self):
fl = 1.543
rx = rp.ET.TRx()
ry = rp.ET.TRy()
rz = rp.ET.TRz()
tx = rp.ET.Ttx()
ty = rp.ET.Tty()
tz = rp.ET.Ttz()
rx.j = 86
ry.j = 86
rz.j = 86
tx.j = 86
ty.j = 86
tz.j = 86
nt.assert_array_almost_equal(rx.T(fl).A, sm.trotx(fl))
nt.assert_array_almost_equal(ry.T(fl).A, sm.troty(fl))
nt.assert_array_almost_equal(rz.T(fl).A, sm.trotz(fl))
nt.assert_array_almost_equal(tx.T(fl).A, sm.transl(fl, 0, 0))
nt.assert_array_almost_equal(ty.T(fl).A, sm.transl(0, fl, 0))
nt.assert_array_almost_equal(tz.T(fl).A, sm.transl(0, 0, fl))
if __name__ == '__main__':
unittest.main()
```
#### File: ropy/tests/test_models.py
```python
import ropy as rp
import unittest
class TestModels(unittest.TestCase):
def test_puma(self):
puma = rp.Puma560()
puma.qr
puma.qz
puma.qs
puma.qn
def test_frankie(self):
frankie = rp.Frankie()
frankie.qr
frankie.qz
def test_PandaURDF(self):
panda = rp.PandaURDF()
panda.qr
panda.qz
def test_UR5(self):
ur = rp.UR5()
ur.qr
ur.qz
def test_wx250s(self):
wx = rp.wx250s()
wx.qr
wx.qz
```
#### File: ropy/tests/test_PyPlot2.py
```python
import ropy as rp
# import spatialmath as sm
import unittest
class TestPyPlot(unittest.TestCase):
def test_PyPlot(self):
panda = rp.PandaMDH()
env = rp.backend.PyPlot2()
env.launch()
env.add(panda)
env.step()
env._plot_handler(None, None)
env.close()
def test_PyPlot_invisible(self):
panda = rp.PandaMDH()
env = rp.backend.PyPlot2()
env.launch()
env.add(panda, display=False)
env.step()
env._plot_handler(None, None)
env.close()
def test_unimplemented(self):
# TODO remove these as implemented
env = rp.backend.PyPlot2()
env.reset()
env.restart()
env.remove()
``` |
{
"source": "jhavl/spatialgeometry",
"score": 2
} |
#### File: spatialgeometry/spatialgeometry/Shape.py
```python
from spatialmath import SE3
from spatialmath.base.argcheck import getvector
from spatialmath.base import r2q
import numpy as np
import copy
_mpl = False
try:
from matplotlib import colors as mpc
_mpl = True
except ImportError: # pragma nocover
pass
CONST_RX = SE3.Rx(np.pi / 2).A
class Shape:
def __init__(self, base=None, color=None, stype=None):
# These three are static attributes which can never be changed
# If these are directly accessed and re-written, segmentation faults
# will follow very soon after
# wT and sT cannot be accessed and set by users by base can be
# modified through its setter
self._wT = np.eye(4)
self._sT = np.eye(4)
self._sq = np.zeros(4)
self._base = np.eye(4)
self.base = base
self.stype = stype
self.v = np.zeros(6)
self.color = color
self._collision = False
def copy(self):
"""
Copy of Shape object
:return: Shallow copy of Shape object
:rtype: Shape
"""
# print("Hello")
new = copy.copy(self)
# print(self._base)
# new = Shape(self.base, self.color, self.stype)
for k, v in self.__dict__.items():
if k.startswith("_") and isinstance(v, np.ndarray):
setattr(new, k, np.copy(v))
return new
def _to_hex(self, rgb):
rgb = (np.array(rgb) * 255).astype(int)
return int("0x%02x%02x%02x" % (rgb[0], rgb[1], rgb[2]), 16)
def to_dict(self):
"""
to_dict() returns the shapes information in dictionary form
:returns: All information about the shape
:rtype: dict
"""
self._to_hex(self.color[0:3])
if self.stype == "cylinder":
fk = self._sT @ CONST_RX
else:
fk = self._sT
q = r2q(fk[:3, :3]).tolist()
q = [q[1], q[2], q[3], q[0]]
shape = {
"stype": self.stype,
"t": fk[:3, 3].tolist(),
"q": q,
"v": self.v.tolist(),
"color": self._to_hex(self.color[0:3]),
"opacity": self.color[3],
}
return shape
def fk_dict(self):
"""
fk_dict() outputs shapes pose in dictionary form
:returns: The shape pose in translation and quternion form
:rtype: dict
"""
if self.stype == "cylinder":
fk = self._sT @ CONST_RX
else:
fk = self._sT
q = r2q(fk[:3, :3]).tolist()
q = [q[1], q[2], q[3], q[0]]
shape = {"t": fk[:3, 3].tolist(), "q": q}
return shape
def __repr__(self): # pragma nocover
return f"{self.stype},\n{self.base}"
@property
def collision(self):
return self._collision
@property
def v(self):
return self._v
@v.setter
def v(self, value):
self._v = getvector(value, 6)
@property
def color(self):
"""
shape.color returns a four length tuple representing (red, green, blue, alpha)
where alpha represents transparency. Values returned are in the range [0-1].
"""
return self._color
@color.setter
def color(self, value):
"""
shape.color(new_color) sets the color of a shape.
The color format is (red, green, blue, alpha).
Color can be set with a three length list, tuple or array which
will only set the (r, g, b) values and alpha will be set to maximum.
Color can be set with a four length list, tuple or array which
will set the (r, g, b, a) values.
Note: the color is auto-normalising. If any value passed is greater than
1.0 then all values will be normalised to the [0-1] range assuming the
previous range was [0-255].
"""
default_color = (0.95, 0.5, 0.25, 1.0)
if isinstance(value, str):
if _mpl:
try:
value = mpc.to_rgba(value)
except ValueError:
print(f"{value} is an invalid color name, using default color")
value = default_color
else: # pragma nocover
value = default_color
print(
"Color only supported when matplotlib is installed\n"
"Install using: pip install matplotlib"
)
elif value is None:
value = default_color
else:
value = np.array(value)
if np.any(value > 1.0):
value = value / 255.0
if value.shape[0] == 3:
value = np.r_[value, 1.0]
value = tuple(value)
self._color = value
def set_alpha(self, alpha):
"""
Convenience method to set the opacity/alpha value of the robots color.
"""
if alpha > 1.0:
alpha /= 255
new_color = np.r_[self._color[:3], alpha]
self._color = tuple(new_color)
@property
def wT(self):
return self._sT
@wT.setter
def wT(self, T):
self._wT[:] = T
self._sT[:] = self._wT @ self._base
self._sq[:] = r2q(self._sT[:3, :3], order="xyzs")
@property
def base(self):
return SE3(np.copy(self._base), check=False)
@base.setter
def base(self, T):
if not isinstance(T, SE3):
T = SE3(T)
self._base[:] = T.A
self._sT[:] = self._wT @ self._base
self._sq[:] = r2q(self._sT[:3, :3], order="xyzs")
class Axes(Shape):
"""An axes whose center is at the local origin.
Parameters
:param length: The length of each axis.
:type length: float
:param base: Local reference frame of the shape
:type base: SE3
"""
def __init__(self, length, **kwargs):
super(Axes, self).__init__(stype="axes", **kwargs)
self.length = length
@property
def length(self):
return self._length
@length.setter
def length(self, value):
self._length = float(value)
def to_dict(self):
"""
to_dict() returns the shapes information in dictionary form
:returns: All information about the shape
:rtype: dict
"""
shape = super().to_dict()
shape["length"] = self.length
return shape
``` |
{
"source": "Jhawk1196/CS3250PythonProject",
"score": 3
} |
#### File: CS3250PythonProject/tests/testfontSelect.py
```python
import unittest
from tkinter import *
import src.fontSelect as fontSelect
from tkinter import font
from mock import patch
class TestFontStyle(unittest.TestCase):
@patch('tkinter.font.Font', autospec=True)
def test_calls_config(
self, mock_font):
"""
Test font_style method correctly called config to change font family
"""
root = Tk()
custom = mock_font(root, family='Helvetica', size=12)
fontSelect.font_style(custom, 'Times')
custom.config.assert_called_with(family='Times')
def test_configs_font(
self):
"""
Test for font_style and font_size methods correctly changing
appropriate attribute (family, size)
"""
root = Tk()
custom = font.Font(root, family='Helvetica', size=12)
self.assertEqual(custom.cget('family'), 'Helvetica')
fontSelect.font_style(custom, 'Times')
self.assertEqual(custom.cget('family'), 'Times')
fontSelect.font_size(custom, 18)
self.assertEqual(custom.cget('size'), 18)
def test_fontColor(
self):
"""
Test font_color method correctly changes fg attribute of Message
called label
"""
root = Tk()
label = Message(root, fg='Red')
label.pack()
self.assertEqual('Black', fontSelect.font_color(label, 'Black')
.cget("fg"))
``` |
{
"source": "jhawkey/IS_mapper",
"score": 3
} |
#### File: IS_mapper/scripts/create_output.py
```python
import os, logging
import operator
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from run_commands import run_command
class RemovedHit(object):
def __init__(self, left_flank, right_flank):
# reason for removal
self.reason = ''
# coordinates of left and right flanks
self.left_flank = str(left_flank[0]) + '-' + str(left_flank[1])
self.right_flank = str(right_flank[0]) + '-' + str(right_flank[1])
# BED comparison that this removal has come from
self.comparison_type = ''
# put in percent ID and coverage if this was a BLAST check
self.per_id = ''
self.coverage = ''
class ISHit(object):
def __init__(self, left_pos, right_pos):
# hit type will either be novel or known
self.hit_type = None
# left and right features are the actual feature surrounding the hit
# so can access info from the qualifiers
self.left_feature = None
self.right_feature = None
# These will be integers of either 1 or -1 to tell you the strand of the
# genes surrounding the hit
self.left_strand = None
self.right_strand = None
# These will be integers detailing the distance from the IS hit to
# either the left or right genes
self.left_distance = None
self.right_distance = None
# these are the gene IDs (locus_tags) closest to the IS hit
self.gene_left = None
self.gene_right = None
# interrupted only becomes true if gene left and right are the same
self.interrupted = False
# confidence level is either confident, imprecise (*) or unpaired (?)
self.confidence_level = None
# x and y coordinates of the hit
# x is always the smaller and y the larger, REGARDLESS of orientation!
self.x = left_pos
self.y = right_pos
# set this to true when the hit has come from the intersect file
self.overlap = False
# will be either forward or reverse
self.orientation = None
# the distance between the coordinates
self.gap = None
# store the blast values if its a known hit
self.per_id = ''
self.coverage = ''
def get_gap_distance(self):
self.gap = self.y - self.x
if self.overlap:
self.gap = -self.gap
return self
def determine_interrupted(self):
if self.gene_left == self.gene_right:
self.interrupted = True
return self
def get_flanking_genes(self, genbank_obj, feature_list):
'''
:param genbank_obj: The parsed genbank object
:param feature_list: The parsed features list from the genbank file for easy searching
:param is_hit: The IS hit object
:return: The modified IS hit object containing the features of the left and right genes, their strand, and
the distance from the IS hit to each gene
'''
# Find the correct indexes
left_feature_index = self.binary_search(feature_list, 'L')
right_feature_index = self.binary_search(feature_list, 'R')
# Print out information if returning one of the error codes
if type(left_feature_index) != int or type(right_feature_index) != int:
print('left index')
print(left_feature_index)
print('right index')
print(right_feature_index)
print('left position: ' + str(self.left_pos))
print('right position: ' + str(self.right_pos))
# Extract the SeqFeature object that corresponds to that index
left_feature = genbank_obj.features[left_feature_index]
right_feature = genbank_obj.features[right_feature_index]
# Add the SeqFeatures to the IS hit object, and strand information
self.left_feature = left_feature
self.right_feature = right_feature
self.left_strand = left_feature.strand
self.right_strand = right_feature.strand
# Get the distances from the IS hit to the left and right genes
# The distance to the left gene is the endmost position of the feature - the left IS coord
left_dist = abs(max(left_feature.location.start, left_feature.location.end) - self.x)
# The distance to the right gene is the startmost position of the feature - the right IS coord
right_dist = abs(min(right_feature.location.start, right_feature.location.end) - self.y)
# Here is probably a good place to check if we've got a position that wraps around from start
# to end of the reference
# Get the size of the reference genome
genome_size = len(genbank_obj.seq)
# If we've got a distance that is close to the size of the reference, then we know we need to
# alter it
if left_dist in range(int(round(genome_size * 0.9)), int(round(genome_size * 1.1))):
# The the left hand feature is at the end of the genome
# Distance from IS position to start of the genome is the
# position itself
dist_to_start = self.x
# Distance from the end of the final gene to the end of the
# genome
dist_to_end = abs(left_feature.location.end - genome_size)
# So the total distance is those two added together
left_dist = dist_to_start + dist_to_end
elif right_dist in range(int(round(genome_size * 0.9)), int(round(genome_size * 1.1))):
# Then the right hand feature is at the start of the genome
# Distance from the IS position to the end of the genome
dist_to_end = abs(genome_size - self.y)
# Distance from the start of the genome to the start of the first feature
# is the start position of the first feature
dist_to_feature = right_feature.location.start
# So the total distance is those two added together
right_dist = dist_to_end + dist_to_feature
self.left_distance = left_dist
self.right_distance = right_dist
# add the gene names
self.gene_left = self.left_feature.qualifiers['locus_tag'][0]
self.gene_right = self.right_feature.qualifiers['locus_tag'][0]
return self
def binary_search(self, features, direction):
if direction == 'R':
isPosition = self.y
else:
isPosition = self.x
min = 0
max = len(features) - 1
while True:
# If the min has exceeded the max, then the IS position is not
# inside a feature, and m will now be pointing to a
# feature next to the IS position.
if max < min:
if direction == 'R':
return self.findFeatureAfterPosition(features, isPosition, m)
else:
return self.findFeatureBeforePosition(features, isPosition, m)
# Find the midpoint and save the feature attributes
m = (min + max) // 2
featureStart = features[m][0]
featureEnd = features[m][1]
featureIndex = features[m][2]
# If the IS position is after the feature, move the minimum to
# be after the feature.
if featureEnd < isPosition:
min = m + 1
# If the IS position is before the feature, move the maximum to
# be before the feature.
elif featureStart > isPosition:
max = m - 1
# If the IS position is inside the feature, return only that feature
elif isPosition >= featureStart and isPosition <= featureEnd:
return featureIndex
else:
return "1 - THIS SHOULDN'T HAPPEN!"
def findFeatureBeforePosition(self, features, isPosition, m):
# If we are looking for the feature to the left of the
# IS position, then either m-1 or m is our answer
# If the start of the m feature is after the IS position,
# then m is after the IS and m-1 is the correct feature
if features[m][0] > isPosition:
return features[m - 1][2]
# If both m and m+1 features are before the IS position,
# then m will be closer to the IS and is the correct feature
elif features[m - 1][1] < isPosition and features[m][1] < isPosition:
return features[m][2]
else:
return "2 - THIS SHOULDN'T HAPPEN!"
def findFeatureAfterPosition(self, features, isPosition, m):
# If we are looking for the feature to the right of the
# IS position, then either m or m+1 is our answer
# an index error will occur if m is the final feature, so just check that the first part is true
# and return m
try:
features[m + 1]
except IndexError:
if features[m][0] > isPosition:
return features[m][2]
# otherwise we must be after the final position, so need to
# return the start position of the very first feature
else:
return features[0][2]
# If the end of the m feature is before the IS position,
# then m is before the IS and m+1 is the correct feature
if features[m][1] < isPosition:
index = m + 1
if index >= len(features):
return features[0][2]
return features[m + 1][2]
# If both m and m+1 features are after the IS position,
# then m will be closer to the IS and is the correct feature
elif features[m][0] > isPosition and features[m + 1][0] > isPosition:
return features[m][2]
else:
return "3 - THIS SHOULDN'T HAPPEN!"
def get_features(genbank_object):
feature_list = []
feature_count_list = 0
feature_types = ["CDS", "tRNA", "rRNA"]
for feature in genbank_object.features:
if feature.type in feature_types:
feature_list.append([int(feature.location.start), int(feature.location.end), feature_count_list])
feature_count_list += 1
else:
feature_count_list += 1
feature_list = sorted(feature_list, key=operator.itemgetter(0))
return(feature_list)
def check_hit_within_hit(intersect_left, left_range, intersect_right, right_range):
if (intersect_left[0] in right_range and intersect_left[1] in right_range):
return True
elif (intersect_right[0] in left_range and intersect_right[1] in left_range):
return True
else:
return False
def get_qualifiers(cds_qualifiers, trna_qualifiers, rrna_qualifiers, feature):
'''
Takes a list of possible qualifier IDs and attempts
to find them in the feature given.
If the qualifier is present, appends to a list, otherwise
skips and keeps going.
Returns a list of qualfiers found in that feature.
'''
return_quals = []
if feature.type == 'CDS':
qualifier_list = cds_qualifiers
elif feature.type == 'tRNA':
qualifier_list = trna_qualifiers
elif feature.type == 'rRNA':
qualifier_list = rrna_qualifiers
for qual in qualifier_list:
try:
return_quals.append(feature.qualifiers[qual][0])
except KeyError:
pass
return return_quals
def get_orientation(left_coords, right_coords):
'''
:param left_coords: list of coordinates for left end of hit
:param right_coords: list of coordinates for right end of hit
:return: return ISHit object, intialised with orienation and left/right positions
'''
# x must always be the smallest position, and y the largest position
# regardless of orientation
if left_coords[0] < right_coords[0] or left_coords[1] < right_coords[1]:
smallest = min(right_coords[0], left_coords[1])
biggest = max(right_coords[0], left_coords[1])
new_hit = ISHit(smallest, biggest)
# we are in forward orientation
new_hit.orientation = 'F'
else:
smallest = min(left_coords[0], right_coords[1])
biggest = max(left_coords[0], right_coords[1])
new_hit = ISHit(smallest, biggest)
# we are in reverse orientation
new_hit.orientation = 'R'
return new_hit
def doBlast(blast_input, blast_output, database):
'''
Perform a BLAST using the NCBI command line tools
in BioPython.
'''
run_command(['makeblastdb', '-dbtype nucl', '-in', database], shell=True)
run_command(['blastn', '-query', blast_input, '-db', database, '-outfmt "6 qseqid qlen sacc pident length slen sstart send evalue bitscore qcovs"', '>', blast_output], shell=True)
def check_seq_between(genbank_seq, insertion, start, end, name, temp):
'''
Check the sequence between two ends to see
if it matches the IS query or not, and what
the coverage and %ID to the query.
:param genbank_seq: Whole sequence from genbank file
:param insertion: IS query object to BLAST against
:param start: Smallest coordinate, to extract sequence
:param end: Largest coordinate, to extract sequence
:param name: prefix for the file of this sequence
:param temp: folder for the file of this sequence to go to
:return: If there is a BLAST hit, return a dictionary with the 'coverage' and 'per_id' values, else return
an empty dict
'''
# Get sequence between left and right ends
seq_between = genbank_seq[start:end]
# Turn the sequence into a fasta file
seq_between = SeqRecord(Seq(str(seq_between)), id=name)
out_seq_between = os.path.join(temp, name + '.fasta')
out_insertion = os.path.join(temp, name + 'ISseq.fasta')
SeqIO.write(seq_between, out_seq_between, 'fasta')
SeqIO.write(insertion, out_insertion, 'fasta')
blast_out = os.path.join(temp, name + '_out.txt')
# Perform the BLAST
doBlast(out_seq_between, blast_out, out_insertion)
# Only want the top hit, so set count variable to 0
first_result = 0
# Open the BLAST output file
with open(blast_out) as summary:
for line in summary:
# Get coverage and % ID for top hit
if first_result == 0:
info = line.strip().split('\t')
hit = {'coverage': float(info[-1]), 'per_id': float(info[3])}
first_result += 1
return hit
# If there is no hit, just return zeros
return {'coverage': 0, 'per_id': 0}
def check_unpaired_hits(line_check, ref_gbk_obj, ref_feature_list, is_query_obj, min_range, max_range, novel_gap_size,
tmp_output_folder):
# intialise a list of all the hits found in this file
IS_hit_list = []
# intialise list of hits to remove
removed_hit_list = []
# get length of IS
is_query_length = len(is_query_obj.seq)
# go through each line
for info in line_check:
# get the distance between the hits
gap = int(info[6])
# separate out info on the left and right sides of the hit
intersect_left = [int(info[1]), int(info[2])]
intersect_right = [int(info[4]), int(info[5])]
# TODO: check_hit_within_hit
# get the orientation and the IS hit object
new_hit = get_orientation(intersect_left, intersect_right)
# if the gap is small, it's a novel hit
if gap <= novel_gap_size:
new_hit.hit_type = 'novel'
new_hit.confidence_level = 'unpaired'
new_hit.get_flanking_genes(ref_gbk_obj, ref_feature_list)
IS_hit_list.append(new_hit)
# if the gap is big enough, could be the IS itself, so do a BLAST check
elif float(gap) / is_query_length >= min_range and float(gap) / is_query_length <= max_range:
new_hit = get_orientation(intersect_left, intersect_right)
seq_check_results = check_seq_between(ref_gbk_obj.seq, is_query_obj, new_hit.x,
new_hit.y, 'tmp_seq', tmp_output_folder)
# if it's a good hit, add it
if len(seq_check_results) != 0 and seq_check_results['per_id'] >= 80 and seq_check_results[
'coverage'] >= 80:
# get the flanking genes
new_hit.get_flanking_genes(ref_gbk_obj, ref_feature_list)
# make sure its a confident, novel hit
new_hit.hit_type = 'known'
new_hit.confidence_level = 'unpaired'
new_hit.per_id = str(seq_check_results['per_id'])
new_hit.coverage = str(seq_check_results['coverage'])
# add it to the list
IS_hit_list.append(new_hit)
# if the thresholds are low, then mark it as a possible related IS
elif len(seq_check_results) != 0 and seq_check_results['per_id'] >= 50 and seq_check_results[
'coverage'] >= 50:
new_hit.get_flanking_genes(ref_gbk_obj, ref_feature_list)
# mark it as a possible related IS, but confident
new_hit.hit_type = 'possible related IS'
new_hit.confidence_level = 'unpaired'
new_hit.per_id = str(seq_check_results['per_id'])
new_hit.coverage = str(seq_check_results['coverage'])
# add it to the list
IS_hit_list.append(new_hit)
# otherwise this is a spurious result, remove
else:
removed_hit = RemovedHit(intersect_left, intersect_right)
removed_hit.reason = 'Sequence between does not match IS query'
removed_hit.comparison_type = 'BED closest, unpaired'
removed_hit.per_id = str(seq_check_results['per_id'])
removed_hit.coverage = str(seq_check_results['coverage'])
removed_hit_list.append(removed_hit)
# the gap is too small to be the IS, but larger than a novel hit
elif float(gap) / is_query_length <= min_range and float(gap) / is_query_length < max_range:
new_hit = get_orientation(intersect_left, intersect_right)
# add the relevant information
new_hit.hit_type = 'novel'
new_hit.confidence_level = 'unpaired'
new_hit.get_flanking_genes(ref_gbk_obj, ref_feature_list)
# add it to the list
IS_hit_list.append(new_hit)
# otherwise remove!
else:
removed_hit = RemovedHit(intersect_left, intersect_right)
removed_hit.reason = 'Sequence between is not large enough to be IS query'
removed_hit.comparison_type = 'BED closest, unpaired'
removed_hit_list.append(removed_hit)
return IS_hit_list, removed_hit_list
def write_typing_output(IShits, removedhits, cds_feature_info, rrna_feature_info, trna_feature_info, output_table):
with open(output_table, 'w') as out:
# set the header and write it to the output file
header = ["region", "orientation", "x", "y", "gap", "call", "percent_ID", "percent_cov", "left_gene", "left_description", "left_strand",
"left_distance", "right_gene", "right_description", "right_strand", "right_distance", "gene_interruption"]
out.write('\t'.join(header) + '\n')
# if there are no hits, record this and exit the function
if len(IShits) == 0:
out.write('No hits found')
out.close()
return
# sort IS hits by left position, ascending order
IShits.sort(key=lambda x: x.x)
# loop through each hit
region = 1
for IShit in IShits:
region_num = 'region_%s' % region
region += 1
call_type = IShit.hit_type
if IShit.confidence_level == 'imprecise':
call_type = call_type + '*'
elif IShit.confidence_level == 'unpaired':
call_type = call_type + '?'
# calculate gap distance
IShit.get_gap_distance()
# determine if gene is interrupted or not
IShit.determine_interrupted()
# get qualifiers for left and right genes
# TODO: make sure this qualifier call is robust
if IShit.left_feature.type == 'CDS':
try:
left_description = IShit.left_feature.qualifiers[cds_feature_info][0]
except KeyError:
left_description = ''
logging.warning('No qualifier was found for gene %s. By default, this is the "product" qualifier for the feature. '
'If you would like to use a different qualifier, '
'please supply it to --cds.' % IShit.left_feature.qualifiers['locus_tag'][0])
elif IShit.left_feature.type == 'rRNA':
try:
left_description = IShit.left_feature.qualifiers[rrna_feature_info][0]
except KeyError:
left_description = ''
logging.warning(
'No qualifier was found for gene %s. By default, this is the "product" qualifier for the feature. '
'If you would like to use a different qualifier, '
'please supply it to --rrna.' % IShit.left_feature.qualifiers['locus_tag'][0])
elif IShit.left_feature.type == 'tRNA':
try:
left_description = IShit.left_feature.qualifiers[trna_feature_info][0]
except KeyError:
left_description = ''
logging.warning(
'No qualifier was found for gene %s. By default, this is the "product" qualifier for the feature. '
'If you would like to use a different qualifier, '
'please supply it to --trna.' % IShit.left_feature.qualifiers['locus_tag'][0])
if IShit.right_feature.type == 'CDS':
try:
right_description = IShit.right_feature.qualifiers[cds_feature_info][0]
except KeyError:
right_description = ''
logging.warning(
'No qualifier was found for gene %s. By default, this is the "product" qualifier for the feature. '
'If you would like to use a different qualifier, '
'please supply it to --cds.' % IShit.right_feature.qualifiers['locus_tag'][0])
elif IShit.right_feature.type == 'rRNA':
try:
right_description = IShit.right_feature.qualifiers[rrna_feature_info][0]
except KeyError:
right_description = ''
logging.warning(
'No qualifier was found for gene %s. By default, this is the "product" qualifier for the feature. '
'If you would like to use a different qualifier, '
'please supply it to --rrna.' % IShit.right_feature.qualifiers['locus_tag'][0])
elif IShit.right_feature.type == 'tRNA':
try:
right_description = IShit.right_feature.qualifiers[trna_feature_info][0]
except KeyError:
right_description = ''
logging.warning(
'No qualifier was found for gene %s. By default, this is the "product" qualifier for the feature. '
'If you would like to use a different qualifier, '
'please supply it to --trna.' % IShit.right_feature.qualifiers['locus_tag'][0])
# put together row
line_list = [region_num, IShit.orientation, str(IShit.x), str(IShit.y), str(IShit.gap),
call_type, IShit.per_id, IShit.coverage, IShit.gene_left, left_description,
str(IShit.left_strand), str(IShit.left_distance), IShit.gene_right,
right_description, str(IShit.right_strand), str(IShit.right_distance),
str(IShit.interrupted)]
# write out the information
out.write('\t'.join(line_list) + '\n')
# close the file
out.close()
# if there are hits that have been removed, then write them out
if len(removedhits) != 0:
removed_hits_file = output_table.split('.txt')[0]
with open(removed_hits_file, 'w') as removed_out:
# write the header
header = ['left_flank', 'right_flank', 'removal_reason', 'per_id', 'coverage', 'comparison_type']
removed_out.write('\t'.join(header) + '\n')
# loop through each hit and write out
for removedhit in removedhits:
line_list = [removedhit.left_flank, removedhit.right_flank, removedhit.reason, removedhit.per_id,
removedhit.coverage, removedhit.comparison_type]
removed_out.write('\t'.join(line_list) + '\n')
# close the file
removed_out.close()
# exit the function
return
def create_typing_output(filenames, ref_gbk_obj, is_query_obj, min_range, max_range, novel_gap_size, cds_feature_info,
rrna_feature_info, trna_feature_info, tmp_output_folder, sample_prefix):
# first we need all the input files so we can match hits up
intersect_file = filenames['intersect']
closest_file = filenames['closest']
left_unp = filenames['left_unpaired']
right_unp = filenames['right_unpaired']
#left_bedfile = filenames['left_merged_bed']
#right_bedfile = filenames['right_merged_bed']
# we also need to know the name of the table file where we'll write the final output to
final_table_file = filenames['table']
# final list of IS hit objects to make into a table at the end
IS_hits = []
# list of removed hits
removed_hits = []
# If both intersect and closest bed files are empty, there are no hits
# write out an empty file and record this in the log file
if os.stat(intersect_file)[6] == 0 and os.stat(closest_file)[6] == 0:
write_typing_output(IS_hits, removed_hits, cds_feature_info, rrna_feature_info, trna_feature_info, final_table_file)
logging.info('Both the intersect and closest BED files were empty.')
logging.info('No hits found for sample %s', sample_prefix)
return
# If there are hits, read in the genbank file we're mapping to,
# and create feature list for searching
ref_feature_list = get_features(ref_gbk_obj)
all_intersect_left = []
all_intersect_right = []
all_closest_left = []
all_closest_right = []
# Start with the intersect file (novel hits)
if os.stat(intersect_file)[6] != 0:
with open(intersect_file) as bed_intersect:
# loop through each set of hits
for line in bed_intersect:
# extract the information
info = line.strip().split('\t')
# separate out info on the left and right sides of the hit
intersect_left = [int(info[1]), int(info[2])]
intersect_right = [int(info[4]), int(info[5])]
# add this information to the master lists, for checking against unpaired hits later
all_intersect_left.append(intersect_left)
all_intersect_right.append(intersect_right)
# get the gap between the hits, as determined by bedtools
gap = int(info[6])
# if the gap is small, then lets process this hit
if gap <= novel_gap_size:
left_range = range(min(intersect_left), max(intersect_left))
right_range = range(min(intersect_right), max(intersect_right))
# check if one hit is actually within the other hit
# if it is we need to remove it
hit_within_hit = check_hit_within_hit(intersect_left, left_range, intersect_right, right_range)
if hit_within_hit:
removed_hit = RemovedHit(intersect_left, intersect_right)
removed_hit.reason = 'one flank entirely within other flank'
removed_hit.comparison_type = 'BED intersect'
removed_hits.append(removed_hit)
# otherwise we need to process the hit
else:
# determine orientation and coordinates of hit
# process hit
new_hit = get_orientation(intersect_left, intersect_right)
# add the relevant information to the hit that we already know
new_hit.hit_type = 'novel'
new_hit.confidence_level = 'confident'
# make sure we note that this is overlapping because it's the intersect file
new_hit.overlap = True
# determine the features flanking the hit, and add the details to the hit object
new_hit.get_flanking_genes(ref_gbk_obj, ref_feature_list)
# append the hit to our list
IS_hits.append(new_hit)
# If the gap is too big, we need to remove this hit
else:
removed_hit = RemovedHit(intersect_left, intersect_right)
removed_hit.reason = 'gap too large'
removed_hit.comparison_type = 'BED intersect'
removed_hits.append(removed_hit)
# For the next section, grab the IS query length
is_query_length = len(is_query_obj.seq)
# Move on to the hits found in the closest bed file (known or imprecise hits)
if os.stat(closest_file)[6] != 0:
with open(closest_file) as bed_closest:
# loop through each line
for line in bed_closest:
# extract all the information
info = line.strip().split('\t')
# if the fourth column contains a -1, there are no hits in this file
# sometimes the fourth column is actually an empty string as there is an extra \t delimiter
if info[3] == '-1' or info[3] == '':
# exit the file and do not process further
logging.info('One or more flanking read files were empty, no hits found for sample %s', sample_prefix)
write_typing_output(IS_hits, removed_hits, cds_feature_info, rrna_feature_info, trna_feature_info, final_table_file)
return IS_hits
# get the distance between the hits
gap = int(info[6])
# separate out info on the left and right sides of the hit
intersect_left = [int(info[1]), int(info[2])]
intersect_right = [int(info[4]), int(info[5])]
# add this information to the master lists, for checking against unpaired hits later
all_closest_left.append(intersect_left)
all_closest_right.append(intersect_right)
# If the gap distance is 0, then this hit will be in the intersect file, so ignore
if gap == 0:
pass
# If the gap distance is small, this is likely a novel hit where no overlap was detected
elif gap <= novel_gap_size:
new_hit = get_orientation(intersect_left, intersect_right)
# add the relevant information to the hit that we already know
new_hit.hit_type = 'novel'
new_hit.confidence_level = 'confident'
# determine the features flanking the hit, and add the details to the hit object
new_hit.get_flanking_genes(ref_gbk_obj, ref_feature_list)
# append the hit to our list
IS_hits.append(new_hit)
# The gap size is within the range of the actual IS query size, and so probably indicates a known hit
# Need to BLAST the sequence here to check it matches the IS
elif float(gap) / is_query_length >= min_range and float(gap) / is_query_length <= max_range:
new_hit = get_orientation(intersect_left, intersect_right)
#genbank_seq, insertion, start, end, name, temp
seq_check_results = check_seq_between(ref_gbk_obj.seq, is_query_obj, new_hit.x,
new_hit.y, 'tmp_seq', tmp_output_folder)
# if it's a good hit, add it
if len(seq_check_results) != 0 and seq_check_results['per_id'] >= 80 and seq_check_results['coverage'] >= 80:
# get the flanking genes
new_hit.get_flanking_genes(ref_gbk_obj, ref_feature_list)
# make sure its a confident, novel hit
new_hit.hit_type = 'known'
new_hit.confidence_level = 'confident'
new_hit.per_id = str(seq_check_results['per_id'])
new_hit.coverage = str(seq_check_results['coverage'])
# add it to the list
IS_hits.append(new_hit)
# if the thresholds are low, then mark it as a possible related IS
elif len(seq_check_results) != 0 and seq_check_results['per_id'] >=50 and seq_check_results['coverage'] >= 50:
new_hit.get_flanking_genes(ref_gbk_obj, ref_feature_list)
# mark it as a possible related IS, but confident
new_hit.hit_type = 'possible related IS'
new_hit.confidence_level = 'confident'
new_hit.per_id = str(seq_check_results['per_id'])
new_hit.coverage = str(seq_check_results['coverage'])
# add it to the list
IS_hits.append(new_hit)
# otherwise this is a spurious result, remove
else:
removed_hit = RemovedHit(intersect_left, intersect_right)
removed_hit.reason = 'Sequence between was not a match for IS query'
removed_hit.comparison_type = 'BED closest'
try:
removed_hit.per_id = str(seq_check_results['per_id'])
removed_hit.coverage = str(seq_check_results['coverage'])
except KeyError:
removed_hit.per_id = 'not a match'
removed_hit.coverage = 'unknown'
removed_hits.append(removed_hit)
# The gap size here is smaller than the actual IS query, but larger than expected for a novel hit
# This is an imprecise hit
elif float(gap) / is_query_length <= min_range and float(gap) / is_query_length < max_range:
new_hit = get_orientation(intersect_left, intersect_right)
# add the relevant information
new_hit.hit_type = 'novel'
new_hit.confidence_level = 'imprecise'
new_hit.get_flanking_genes(ref_gbk_obj, ref_feature_list)
# add it to the list
IS_hits.append(new_hit)
# This hit is way too big and doesn't fit any of the other criteria, so needs to be recorded as removed
else:
removed_hit = RemovedHit(intersect_left, intersect_right)
removed_hit.reason = 'Sequence between is not large enough to be IS query'
removed_hit.comparison_type = 'BED closest'
removed_hits.append(removed_hit)
# Looking for unpaired hits which are not in the merged/closest bed files
# Possibly unpaired because the pair is low coverage and didn't pass
# depth cutoff
# Start with the left hand unpaired file
# FIRST, remove any positions which match regions we have ALREADY processed, by comparing to the left hand
# master lists
line_check = []
with open(left_unp) as left_bed:
for line in left_bed:
info = line.strip().split('\t')
left_coords = [int(info[1]), int(info[2])]
if left_coords not in all_intersect_left and left_coords not in all_closest_left:
line_check.append(line.strip().split('\t'))
if len(line_check) != 0:
all_new_hits, new_removed_hits = check_unpaired_hits(line_check, ref_gbk_obj, ref_feature_list, is_query_obj,
min_range, max_range, novel_gap_size, tmp_output_folder)
# add them to our current list
IS_hits = IS_hits + all_new_hits
removed_hits = removed_hits + new_removed_hits
# Then check the right hand unpaired file, again removing positions already processed
line_check = []
with open(right_unp) as right_bed:
for line in right_bed:
info = line.strip().split('\t')
right_coords = [int(info[4]), int(info[5])]
if right_coords not in all_intersect_right and right_coords not in all_closest_right:
line_check.append(line.strip().split('\t'))
if len(line_check) != 0:
all_new_hits, new_removed_hits = check_unpaired_hits(line_check, ref_gbk_obj, ref_feature_list, is_query_obj,
min_range, max_range, novel_gap_size, tmp_output_folder)
# add them to our current list
IS_hits = IS_hits + all_new_hits
removed_hits = removed_hits + new_removed_hits
write_typing_output(IS_hits, removed_hits, cds_feature_info, rrna_feature_info, trna_feature_info, final_table_file)
return IS_hits
```
#### File: IS_mapper/scripts/read_grouping.py
```python
import re
import pathlib
# Grouping reads and classes for storing this information
READ_PAIRING_REGEXS = [
# Matches:
# prefix_R1.fastq prefix_R2.fastq, or
# prefix_R1.fastq.gz prefix_R2.fastq.gz
re.compile(r'^(.+?)_R[12]\.(fastq(?:\.gz)?)$'),
# Matches:
# prefix_R1_001.fastq prefix_R2_001.fastq, or
# prefix_R1_001.fastq.gz prefix_R2_001.fastq.gz
re.compile(r'^(.+?)_R[12]_[0-9]+?\.(fastq(?:\.gz)?)$'),
# Matches:
# prefix_1.fastq prefix_2.fastq, or
# prefix_1.fastq.gz prefix_2.fastq.gz
re.compile(r'^(.+?)_[12]\.(fastq(?:\.gz)?)$'),
# Matches:
# prefix.fastq prefix.fastq, or
# prefix.fastq.gz prefix.fastq.gz
# Note: Final attempt, will match single readsets
re.compile(r'^(.+?)\.(fastq(?:\.gz)?)$')
]
class ReadSet():
def __init__(self, prefix, suffix, filepath):
# Set variables
self.prefix = prefix
self.suffix = suffix
self.filepath = filepath
# Precalculate some useful things
self.no_ext = self.filepath.name.replace('.%s' % self.suffix, '')
def __str__(self):
return str(self.filepath)
class ReadGroup():
def __init__(self, prefix, unpaired=None, forward=None, reverse=None):
# Check that we don't have conflicting arguments
if bool(forward) ^ bool(reverse):
raise ValueError('You must pass both forward and reverse reads')
if unpaired and forward:
raise ValueError('You cannot pass both unpaired and forward/ reverse reads')
# Ensure that all reads have the same prefix and, where applicable, the same suffix
if forward and reverse:
assert forward.prefix == reverse.prefix
assert forward.suffix == reverse.suffix
# Set the instance variables
self.prefix = prefix
self.unpaired = unpaired
self.forward = forward
self.reverse = reverse
# Convenience functions
@property
def unpaired_fp(self):
return self.unpaired_reads.filepath
@property
def reverse_fp(self):
return self.reverse_reads.filepath
@property
def forward_fp(self):
return self.forward_reads.filepath
class ReadGroups():
def __init__(self, paired, unpaired):
self.paired = paired
self.unpaired = unpaired
def all_groups(self):
yield from self.paired
yield from self.unpaired
def group_reads(short_read_fps):
# Get a map of short read prefixes to ReadSet instances
read_map = create_prefix_map(short_read_fps)
# With the remaining short reads, create read groups
paired_reads = list()
unpaired_reads = list()
for prefix, read_sets in read_map.items():
if len(read_sets) == 1:
read_group = ReadGroup(prefix=prefix, unpaired=read_sets[0])
unpaired_reads.append(read_group)
elif len(read_sets) == 2:
forward_reads, reverse_reads = sorted(read_sets, key=lambda k: k.filepath)
read_group = ReadGroup(prefix=prefix, forward=forward_reads, reverse=reverse_reads)
paired_reads.append(read_group)
else:
# Something has gone wrong
msg_str = ('Too many reads with the same prefix, expected '
'either two or one read sets but got: %s')
raise ValueError(msg_str % ', '.join([str(rd.filepath) for rd in read_sets]))
# Return instance of the ReadGroups namedtuple
return ReadGroups(paired_reads, unpaired_reads)
def create_prefix_map(read_fps):
# Using this method should be O(n * m) at worst;
# n: number of reads; m: number of regexs
read_map = dict()
for read_fp in read_fps:
# Find suitable regex
for regex in READ_PAIRING_REGEXS:
# Apply regex
re_result = regex.match(read_fp.name)
# If it works, break
if re_result:
break
else:
# If no valid regex found, exit and report
raise ValueError('No regex found for %s (%s), unable to pair reads' % (read_fp.name, read_fp))
# Create and add read_attr to read_pairs using the common prefix as a key
read_set = ReadSet(re_result.group(1), re_result.group(2), read_fp)
try:
read_map[re_result.group(1)].append(read_set)
except KeyError:
read_map[re_result.group(1)] = [read_set]
return read_map
# for testing
def main():
# groups reads into paired and unpaired
test = group_reads([pathlib.Path("~/Desktop/ismap_v2/reads/9262_1#29_1.fastq.gz"), pathlib.Path("~/Desktop/ismap_v2/reads/9262_1#29_2.fastq.gz")])
# ISMapper will only want to work with paired reads
print(test.paired[0].forward.filepath)
for group in test.paired:
print(group.prefix)
print(group.forward)
print(group.reverse)
# But should probably output something sensible if the reads are unpaired
#print(test.unpaired)
if not test.unpaired:
print(test.unpaired)
# this will show us ALL groups
#print(test.all_groups())
# There were no unpaired examples in the first set, here is an example
#test2 = group_reads([pathlib.Path("~/Desktop/ismap_v2/reads/9262_1#29_1.fastq.gz")])
#print(test2.unpaired)
#for group in test2.unpaired:
# print(group.prefix)
# print(group.unpaired)
if __name__ == '__main__':
main()
```
#### File: IS_mapper/scripts/run_commands.py
```python
from subprocess import call
from subprocess import check_output, CalledProcessError, STDOUT
import logging
# Exception classes
class CommandError(Exception):
pass
class BedtoolsError(Exception):
pass
def run_command(command, **kwargs):
"""
Execute a shell command and check the exit status and any O/S exceptions.
"""
command_str = ' '.join(command)
logging.info('Running: {}'.format(command_str))
try:
exit_status = call(command_str, **kwargs)
except OSError as e:
message = "Command '{}' failed due to O/S error: {}".format(command_str, str(e))
raise CommandError({"message": message})
if exit_status == 139 and command[0] == 'closestBed':
raise BedtoolsError({'message':'One or more bed files are empty. Writing out empty results table.'})
if exit_status != 0:
message = "Command '{}' failed with non-zero exit status: {}".format(command_str, exit_status)
raise CommandError({"message": message})
def check_command(command_call, command_name):
'''
Check that the dependency is installed.
Exits the program if it can't be found.
- command_list is the command to run to determine the version.
- command_name is the name of the command to show in the error message.
'''
try:
command_stdout = check_output(command_call, stderr=STDOUT)
logging.info('Found dependency %s', command_name)
except OSError as e:
logging.error("Failed command: %s", command_call)
logging.error(str(e))
logging.error("Do you have %s installed in your PATH?", command_name)
raise CommandError
except CalledProcessError as e:
# some programs such as samtools return a non zero exit status
# when you ask for the version. We ignore it here.
command_stdout = e.output
logging.info('Found dependency %s', command_name)
def make_directories(dir_list):
"""
Take a list of folders and make each directory.
"""
for directory in dir_list:
run_command(['mkdir', '-p', directory], shell=True)
def remove_files(file_list):
"""
Take a directory name or list of files and remove them
"""
for file in file_list:
run_command(['rm -rf', file], shell=True)
``` |
{
"source": "jhawthornesandiego/usd560",
"score": 4
} |
#### File: usd560/FixLangs/CreateTranslations.py
```python
import sys
import os
def AddImport(file_name):
import_str = "import static com.eveningoutpost.dexdrip.xdrip.gs;\n"
with open(file_name) as f:
lines = f.readlines()
# The code below assumes that imports have already been sorted.
replaced = False
with open(file_name, "w") as f:
for line in lines:
if import_str == line:
continue;
if import_str > line or line.startswith("package") or replaced:
f.write(line)
continue
f.write(import_str)
replaced = True
f.write(line)
def ReplaceString(file_name, id, string):
content = open(file_name).read()
full_string = '"%s"' %string
new_string = 'gs(R.string.%s)' % id
print('replacing ', full_string, new_string)
if full_string in content:
print('yeeeeeeeee')
content = content.replace(full_string, new_string)
file = open(file_name , "w")
file.write(content)
file.close()
def FileContainsString(file, string):
#print(file)
full_string = '"%s"' %string
if full_string in open(file).read():
return True
return False
def FindFileContaingString(id, string):
arr = []
for d,r,f in os.walk("..\\..\\"):
for file in f:
if file.endswith("java") and "generated" not in file and not "PebbleDisplay" in file :
arr.append(os.path.join(d,file))
for file in arr:
if file.startswith("..\\..\\wear"):
continue
if not FileContainsString(file, string):
continue
print(file)
ReplaceString(file, id, string)
AddImport(file)
def ReadFile(file_name):
with open(file_name) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
for line in content:
if line.strip() == '':
continue
if line.startswith("#"):
continue
if line.startswith('"') and line.endswith('"'):
line = line[1:-1]
header = line.lower().replace(' ','_')
header = header.replace('\\n','_')
header = header.replace('!','')
header = header.replace(',','')
header = header.replace(':','')
header = header.replace('?','')
header = header.replace('.','')
header = header.replace('+','')
header = header.replace('-','')
header = header.replace('(','')
header = header.replace(')','')
header = header.replace("'",'')
print (' <string name="',header,'">', line,'</string>', sep='')
FindFileContaingString(header, line)
ReadFile(sys.argv[1])
``` |
{
"source": "JHay0112/jmath",
"score": 4
} |
#### File: jmath/jmath/discrete.py
```python
from typing import Union, List
# - Classes
class Node:
"""
Node of a graph.
Parameters
----------
id
Unique ID string describing the object.
weight
Weighting of node.
"""
def __init__(self, id: str, weight: float = 1):
self.id = id
self.weight = weight
self.neighbours = {}
def __str__(self) -> str:
"""String representation"""
return f"[{self.id}:{self.weight}]"
def __repr__(self) -> str:
"""Programming Representation"""
return f"Node('{self.id}', {self.weight})"
def neighbours_list(self) -> list:
"""Returns the IDs of the neighbouring nodes"""
return(list(self.neighbours.keys()))
def add_neighbour(self, neighbour: "Node", weight: float = 1, two_way: bool = False):
"""
Adds a neighbouring node
Parameters
----------
neighbour
Node object describing the neighbour.
weight
The weighting of the relationship.
two_way
Whether the relationship goes both ways.
"""
self.neighbours[neighbour] = weight
if two_way:
neighbour.add_neighbour(self, weight)
def remove_neighbour(self, neighbour: "Node"):
"""
Removes the relationship between the nodes.
Parameters
----------
neighbour
Node object describing the neighbour node
Notes
-----
Only removes the neighbourship from THIS NODE.
"""
self.neighbours.pop(neighbour)
def relationships(self) -> str:
"""Returns human readable string of relationships between node and neighbours."""
relationship = f"{self}"
for neighbour, weight in self.neighbours.items():
relationship += "\n"
relationship += f" -{weight}-{neighbour}"
print(relationship)
class Graph:
"""
Graph object defined by a set of nodes.
"""
def __init__(self):
self.nodes = []
def add_nodes(self, *nodes):
"""
Adds node/s to the graph
Parameters
----------
\*nodes
Graph nodes to be added.
"""
# Check if first "node" is a Node
if(isinstance(nodes[0], Node)):
# Therefore *nodes is being used as expected
# Extend nodes list with *nodes tuple
self.nodes.extend(nodes)
else:
# Else an iterable is being passed as nodes[0]
self.nodes.extend(nodes[0])
def get_node(self, id) -> Union[Node, None]:
"""
Returns a node object in the graph based upon its ID. Returns None if not found.
Parameters
----------
id
The ID of the node
"""
# Loop through the nodes
for node in self.nodes:
# Check if name matches
if node.id == id:
# If so return
return node
# No node found so return none
return None
def relationships(self) -> str:
"""Prints a human readable description of the relationship between all nodes"""
for node in self.nodes:
node.relationships()
def walk(self, start: Node, stop: Node = None, neighbour: int = 0, default_neighbour: int = 0) -> "Loop":
"""
"Walks" a loop around the graph, intended for generating loops for self.loops()
Parameters
----------
start
Node the walk starts at.
stop
Node the walk stops at.
neighbour
Initial neighbour to move to.
default_neighbour
Neighbour to move to on consequential moves.
"""
if stop == None:
stop = start
loop = [start]
node = start.neighbours_list()[neighbour]
while node != stop:
loop.append(node)
if len(node.neighbours) != 0:
node = node.neighbours_list()[default_neighbour]
else:
# Dead end
return None
# Generate graph representation
loop = Loop(loop)
return loop
def intersections(self) -> List[Node]:
"""Returns a list of nodes that have more than one connection"""
return [node for node in self.nodes if len(node.neighbours) > 1]
def loops(self) -> List["Loop"]:
"""Finds loops in the graph"""
loops = []
intersections = [node for node in self.nodes if len(node.neighbours) > 1 and self.nodes.index(node) != 0]
# For every neighbour on the primary node
for i in range(len(self.nodes[0].neighbours)):
loops.append(self.walk(self.nodes[0], neighbour = i))
# For the rest of the intersections do not walk the zeroth option
for i in range(len(intersections)):
# For every intersection
for j in range(1, len(intersections[i].neighbours)):
# For every neighbour on the intersection except 0
walk = self.walk(intersections[i], self.nodes[0], neighbour = j)
if walk != None:
loops.append(walk)
return loops
class Loop(Graph):
"""
A sub-graph structure generated by Graph.loops(), represents a distinct path around the graph.
Parameters
----------
nodes
List of node objects that define the loop.
"""
def __init__(self, nodes: List[Node]):
super().__init__()
super().add_nodes(nodes)
def relationships(self) -> str:
"""Prints a human readable representation of the relationship between nodes"""
relationships = f"{self.nodes[0]}"
for i in range(1, len(self.nodes)):
relationships += "-"
# Only print if there is a weight
if self.nodes[i - 1].neighbours[self.nodes[i]] != 0:
relationships += f"{self.nodes[i - 1].neighbours[self.nodes[i]]}"
relationships += "-"
relationships += f"{self.nodes[i]}"
print(relationships)
def reorder(self, node: Node):
"""
Reorders the loop to start at the specified node.
Parameters
----------
node
Node to reconfigure to start at.
"""
# If the loop does not start at the reference node
if (node != self.nodes[0]):
# Rearrange to be in terms of the reference node
index = self.nodes.index(node)
self.nodes = self.nodes[index:] + self.nodes[:index]
```
#### File: jmath/jmath/exceptions.py
```python
class VectorsNotSameSize(Exception):
"""Exception thrown for operations on vectors of different sizes"""
def __init__(self, message = "Operation invalid for vectors of different sizes."):
self.message = message
super().__init__(self.message)
class ZeroDistance(Exception):
"""Exception thrown for calculations with zero distance between objects."""
def __init__(self, message = "Invalid operation! Zero distance between objects."):
self.message = message
super().__init__(self.message)
class OutOfRange(Exception):
"""
Exception thrown for values that are not within expected bounds.
Parameters
----------
num_input
The input number
lower_bound
The lower boundary
upper_bound
The upper boundary
message
Appended additional message
"""
def __init__(self, num_input: float, lower_bound: float, upper_bound: float, message: str = ""):
self.message = f"'{num_input}' outside of range '{lower_bound}' to '{upper_bound}' inclusive. {message}"
super().__init__(self.message)
```
#### File: jmath/linearalgebra/planes.py
```python
from ..exceptions import VectorsNotSameSize
from typing import TypeVar
# - Globals
# Typing for Vectors and Points
Point = TypeVar("Point")
Vector = TypeVar("Vector")
class Plane:
"""
Defines a plane
Parameters
----------
point
Point on the plane.
vector1
Direction vector.
vector2
Direction vector.
Raises
------
VectorsNotSameSize
If the vectors are not the same size this error will be raised.
"""
def __init__(self, point: Point, vector1: Vector, vector2: Vector):
self.point = point
self.vectors = [vector1, vector2]
# Throw error if vectors different sizes
if len(point) != len(vector1) != len(vector2):
raise VectorsNotSameSize()
def __len__(self):
# Size of point vector
return len(self.point)
```
#### File: jmath/linearalgebra/vectors.py
```python
import math
from functools import wraps
from typing import List, Callable, Any, Union
from ..exceptions import VectorsNotSameSize
from .lines import Line
from .planes import Plane
# - Classes
class Vector:
"""
n-dimensional Vectors
Parameters
----------
components
Scalar vector components
Examples
--------
>>> Vector(3, 2, 1) + Vector(2, 1, 1)
Vector(5, 3, 2)
>>> Vector(3, 1, 0) - Vector(9, 8, 8)
Vector(-6, -7, -8)
>>> 3 * Vector(1, 2, -4)
Vector(3, 6, -12)
>>> Vector(3, 6, 9)/3
Vector(1, 2, 3)
>>> Vector(10, 2, 1) @ Vector(1, 2, 3)
17
>>> Vector(1, 1).magnitude()
2
"""
def __init__(self, *components: List[float]):
# If components[0] is list store that list
if(type(components[0]) == list):
self.components = components[0]
else:
# Else it's *args
self.components = list(components)
def __repr__(self) -> str:
"""Programming Representation"""
return f"Vector{self.__str__()}"
def __str__(self) -> str:
"""String representation"""
string = "("
for component in self.components:
string += f"{component}, "
string = string[:-2] + ")"
return string
def __getitem__(self, index: int) -> float:
"""Subscripting."""
return self.components[index]
def __same_size(func: Callable[["Vector", "Vector"], Any]) -> Callable[["Vector", "Vector"], Any]:
"""
Wrapper that checks if vectors are the same size.
Parameters
----------
func
Function to check
Raises
------
VectorsNotSameSize
If the vectors are not the same size this error will be raised.
"""
@wraps(func)
def inner(*args, **kwargs):
if len(args[0]) == len(args[1]):
return func(*args, **kwargs)
else:
raise VectorsNotSameSize()
return inner
def __eq__(self, vector: "Vector") -> bool:
"""Tests equality of vectors"""
if isinstance(vector, Vector) or isinstance(vector, Point):
return vector.components == self.components
else:
return False
@__same_size
def __add__(self, vector: "Vector") -> "Vector":
"""Add vectors together"""
# Add the foreign components to local components and return
return Vector([i + j for i, j in zip(self.components, vector.components)])
@__same_size
def __sub__(self, vector: "Vector") -> "Vector":
"""Subtract vectors from each other"""
# Subtract the foreign components from local components and return
return Vector([i - j for i, j in zip(self.components, vector.components)])
@__same_size
def __matmul__(self, vector: "Vector") -> float:
"""The dot product of two vectors"""
return sum([i * j for i, j in zip(self.components, vector.components)])
def __mul__(self, scalar: float) -> "Vector":
"""Scalar multiplication"""
new_components = [scalar * component for component in self.components]
return Vector(new_components)
def __rmul__(self, scalar: float) -> "Vector":
"""Reverse scalar multiplication"""
return self * scalar
def __truediv__(self, scalar: float) -> "Vector":
"""Scalar division"""
return 1/scalar * self
def __rtruediv__(self, scalar: float) -> "Vector":
"""Reverse scalar division"""
return self * 1/scalar
def __len__(self) -> int:
"""Amount of components in vector"""
return len(self.components)
def unit(self) -> "Vector":
"""Returns a unit vector in the same direction as the vector."""
return self/self.magnitude()
def negative(self) -> "Vector":
"""Returns a vector of the same magnitude pointing in the opposite direction."""
neg_comp = [-component for component in self.components]
return Vector(neg_comp)
@__same_size
def projection(self, vector: Union["Vector", "Line", "Plane"]) -> "Vector":
"""
Returns projection of current vector onto the passed vector or line.
Parameters
----------
vector
Vector or line to calculate the projection onto.
Raises
------
VectorsNotSameSize
If the vectors are not the same size this error will be raised.
"""
if isinstance(vector, Plane):
plane = vector
# Compute projections onto both vectors and add
return self.projection(plane.vectors[0]) + self.projection(plane.vectors[1])
else:
# Not plane, check for line
if isinstance(vector, Line):
vector = vector.vector
# Compute projection
return (self @ vector)/(vector @ vector) * vector
def magnitude(self) -> float:
"""Calculates the vector magnitude."""
# Store magnitude while computing
magnitude = 0
# For every vector component
for c in self.components:
# Pythagorean theorom
# hypotenuse = sqrt(a**2 + b**2)
magnitude = math.sqrt(magnitude ** 2 + c ** 2)
return(magnitude)
@__same_size
def angle_between(self, vector: "Vector") -> float:
"""
Determines the angle (in radians) between two vectors to 5 d.p.
Parameters
----------
vector
Vector or line to calculate angle between.
Raises
------
VectorsNotSameSize
If the vectors are not the same size this error will be raised.
"""
if isinstance(vector, Line):
vector = vector.vector
return round(math.acos((self @ vector)/(self.magnitude() * vector.magnitude())), 5)
class Point(Vector):
"""
Points based on vector framework. Can be thought of as a vector from origin.
Parameters
----------
components
Coordinates in n-space
"""
def __init__(self, *components: List[float]):
return super().__init__(*components)
def on_line(self, line: "Line") -> bool:
"""
Determines whether a point is on a line, returns bool.
Parameters
----------
line
Line to determine if on
"""
results = []
# For every component in both
for i in range(len(self)):
scalor = (self.components[i] - line.point.components[i])/line.vector.components[i]
results.append(round(scalor, 3))
# Go through results, if any don't match, return false
return all(result == results[0] for result in results)
```
#### File: jmath/jmath/modular.py
```python
from typing import Tuple, Optional
# - Functions
def extended_gcd(a: int, b: int) -> Tuple[int, int, int]:
"""
Recursive extended euclidean algorithm to find the greatest common denominator and its linear combination
Returns g, m, n such that gcd(a, b) = g = m*a + n*b
Parameters
----------
a
Number to find the greatest common denominator of with b
b
Number to find the greatest common denominator of with a
"""
if a == 0:
# Trivial case
return (b, 0, 1)
else:
# Call self with a and b % a since this is equivalent as per euclidean algorithm
g, m, n = extended_gcd(b % a, a)
# Return gcd, n, and m
return (g, n - (b // a)*m, m)
def modular_inverse(a: int, set_size: int) -> Optional[int]:
"""
Finds the modular inverse of a number in a set.
Parameters
----------
a
The number to find the modular inverse of
set_size
The size of the set to find the inverse in
"""
g, m, _ = extended_gcd(a, set_size)
if g != 1:
# Not relatively prime, not invertible
return None
else:
# Relatively prime, find m inside the set
return m % set_size
```
#### File: jmath/physics/simulation.py
```python
from .mechanics import PhysEnv, PhysObj, Vector, Point
from ..graphics import Canvas, Shape, Rectangle
from tkinter import Canvas as TkCanvas
from typing import Callable, Any
# - Classes
class GraphEnv(Canvas, PhysEnv):
'''
The Graphical Environment for Physical Simulations
Parameters
----------
title
String to call the interface
width
Width in pixels
height
Height in pixels
pixels_per_metre
The amount of pixels that represent one metre
fullscreen
Make screen take up full width
**kwargs
Additional tkinter style configurations
'''
def __init__(self, title: str, width: int = 800, height: int = 800, pixels_per_metre: float = 1, fullscreen: bool = False, **kwargs):
super().__init__(title, width, height, fullscreen, **kwargs)
PhysEnv.__init__(self)
self.pixels_per_metre = pixels_per_metre
@property
def pixels_per_metre(self) -> float:
"""The amount of pixels that represent one metre."""
return self._pixels_per_metre
@pixels_per_metre.setter
def pixels_per_metre(self, new: float):
"""Sets the amount of pixels per metre."""
self._pixels_per_metre = new
# Calculate metres per pixel
self._metres_per_pixel = 1/self.pixels_per_metre
# Scale all objects by new factor
[object.scale(self.pixels_per_metre) for object in self.objects]
@property
def metres_per_pixel(self) -> float:
"""The amount of metres for every pixel."""
return self._metres_per_pixel
@metres_per_pixel.setter
def metres_per_pixel(self, new: float):
"""Sets the amount of metres per pixel."""
self._metres_per_pixel = new
# Calculate pixels per metre
self._pixels_per_metre = 1/self._metres_per_pixel
# Scale all objects by factor
[object.scale(self.pixels_per_metre) for object in self.objects]
def add_object(self, new_obj: "GraphObj"):
"""
Add a new Graphics Object to the environment
Parameters
----------
new_obj
Object to add to environment
"""
# Scale an object before adding it
new_obj.scale(self.pixels_per_metre)
super().add_object(new_obj)
def start(self, time_interval: int, rate: float = 1, func: Callable[[Any], Any] = lambda *args: (None,), *args):
"""
Begins the simulation.
Parameters
----------
time_interval
The time interval to simulate over in seconds
rate
Factor to speed or slow down time by
func
Additional function to run in mainloop
*args
Arguments to be passed to the function
"""
# Construct the mainloop
def mainloop(self, time_interval, *args):
self.increment_time(time_interval)
for object in self.objects:
object.increment_position(time_interval)
if self.width < object.position[0] or object.position[0] < 0 or self.height < object.position[1] or object.position[1] < 0:
self.canvas.delete(object.shape.canvas_obj)
self.objects.remove(object)
else:
self.draw(object)
# Run custom function
args = func(*args)
return self, time_interval, args
super().start(mainloop, int((time_interval * 1000)/rate), self, time_interval, *args)
def centre(self) -> Point:
"""Computes the centre of the environment."""
return self.metres_per_pixel * super().centre()
class GraphObj(PhysObj):
"""
Graphical Representation of a Physical Object
Parameters
----------
env
The graphical environment the object belongs to
shape
The shape of the object, note that position associated with the shape will be overriden
position
The position of the object
velocity
The initial velocity of the object
mass
The mass in kilograms
charge
The charge in coulombs
"""
def __init__(self, env: GraphEnv, shape: Shape, position: Point, velocity: Vector, mass: float = 0, charge: float = 0):
self.shape = shape
super().__init__(env, position, velocity, mass, charge)
@property
def position(self) -> Point:
"""The position of the object."""
return self._position
@position.setter
def position(self, new_position: Point):
"""Sets a new position."""
# Set position
self._position = new_position
# Calculate graphical position
self.shape.x = self.position[0] * self.env.pixels_per_metre
self.shape.y = self.env.height - (self.position[1] * self.env.pixels_per_metre)
# Redraw
self.env.draw(self.shape)
def draw(self, canvas: TkCanvas):
"""
Draw shape onto graphical environment
Parameters
----------
canvas
The graphical environment to draw upon
"""
self.shape.draw(canvas)
def scale(self, factor: float):
"""
Scale the object's size by a factor
Parameters
----------
factor
The amount to scale the object by
"""
self.shape.scale(factor)
```
#### File: jmath/tests/test_crypto_tools.py
```python
from ..jmath.cryptography.tools import character_frequencies
# - Tests
def test_character_frequency():
"""Tests that character frequency analysis function."""
# Known text with every english character, o appears most (4 times), all appear at least once
sample_text = "A quick brown fox jumped over the lazy dog"
sample_text = sample_text.replace(" ", "") # Strip out spaces
char_freq = character_frequencies(sample_text)
print(char_freq)
for char, count in char_freq.items():
print(char)
assert count >= 1 # All appear at least once
assert count <= 4 # 'o' appears 4 times
```
#### File: jmath/tests/test_linearalgebra.py
```python
from ..jmath.linearalgebra import Vector, Point, Line
from .tools import random_integer, random_integers, repeat
from typing import Tuple, List
from math import sqrt
# - Functions
def vector_component_pair(len: int = random_integer()) -> Tuple[Vector, List[int]]:
"""
Generates a vector and component pair randomly.
Parameters
----------
len
The length vector/components to generate
"""
components = random_integers(len)
return (Vector(components), components)
@repeat
def test_vector_equality():
"""Tests that vectors are equal as expected."""
v, c = vector_component_pair()
assert v == v
assert v == Vector(c)
assert v.components == c
@repeat
def test_vector_addition():
"""Tests vector addition."""
len = random_integer()
v1, c1 = vector_component_pair(len)
v2, c2 = vector_component_pair(len)
expected = Vector([i + j for i, j in zip(c1, c2)])
assert (v1 + v2) == expected
@repeat
def test_vector_subtraction():
"""Tests vector subtraction."""
len = random_integer()
v1, c1 = vector_component_pair(len)
v2, c2 = vector_component_pair(len)
expected = Vector([i - j for i, j in zip(c1, c2)])
assert (v1 - v2) == expected
@repeat
def test_vector_scaling():
"""Tests vector multiplication and division"""
# Produce initial conditions
length = random_integer(min = 3, max = 10)
scalor = random_integer(min = 1, max = 10)
v, c = vector_component_pair(length)
# Mult/div vectors
mult = v * scalor
div = v / scalor
# Compute expected
mult_expected = Vector([scalor * i for i in c])
div_expected = Vector([round(i / scalor, 5) for i in c])
# Round division vector to factor out floating point error
div.components = [round(i, 5) for i in div.components]
# Test multiply
assert mult_expected == mult
# Test divide
assert div_expected == div
@repeat
def test_dot_product():
"""Tests the dot product"""
# Generate vectors and components
len = random_integer()
v1, c1 = vector_component_pair(len)
v2, c2 = vector_component_pair(len)
# Compute dot product
dot = v1 @ v2
# Predict dot product
predicted_dot = sum([i * j for i, j in zip(c1, c2)])
assert dot == predicted_dot
def test_projection():
"""Tests projecting vectors"""
# Test with vector
vec1 = Vector(3, 4)
vec2 = Vector(1, 1)
expected = Vector(7/2, 7/2)
assert vec1.projection(vec2) == expected
# Test with line
line = Line(Point(0, 0), vec2)
assert vec1.projection(line) == expected
@repeat
def test_magnitude():
"""Tests vector magnitude"""
v, c = vector_component_pair()
# Square components, sum, and sqrt
predicted_magnitude = sqrt(sum([i ** 2 for i in c]))
assert round(predicted_magnitude, 5) == round(v.magnitude(), 5)
@repeat
def test_vector_size():
"""Tests that a vector will return the correct size"""
v, c = vector_component_pair()
assert len(v) == len(c)
def test_point_in_line():
"""Tests whether a point is in a line"""
# Test point that should be on line
point = Point(3, 4)
line = Line(Point(0, 0), 2 * point)
assert point.on_line(line)
# Test point that shouldn't be
point = Point(8, 1)
assert not point.on_line(line)
# Testing in 3-space for certainty
point = Point(1, 2, 3)
d_vector = Vector(4, 5, 6)
known_point_on_line = Point(9, 12, 15)
line = Line(point, d_vector)
assert known_point_on_line.on_line(line)
def test_angle_between():
"""Tests that angle between vectors is correct"""
vec1 = Vector(1, 1)
vec2 = Vector(2, 2)
assert vec1.angle_between(vec2) == 0
@repeat
def test_negative():
"""Test that a negative vector does indeed give one with all the components reversed"""
# Generate vector component pair
v, c = vector_component_pair()
# Make negative vector
v = v.negative()
# Make components negative
c = Vector([-i for i in c])
assert v == c
def test_unit_vector():
"""Tests that a unit vector is produced correctly"""
vec = Vector(1, 2)
unit_vec = vec.unit()
assert round(unit_vec.magnitude(), 10) == 1
assert vec.magnitude() * unit_vec == vec
@repeat
def test_vector_subscripts():
"""Tests that a vector can be subscripted."""
# Generate vector component pair
v, c = vector_component_pair()
# Iterate through components and check they match
for i in range(len(c)):
assert v[i] == c[i]
```
#### File: jmath/tests/test_modular.py
```python
from ..jmath.modular import extended_gcd, modular_inverse
from .tools import random_integer, repeat
from math import gcd
# - Tests
@repeat
def test_gcd():
"""Tests extended gcd function."""
n1 = random_integer(0, 100)
n2 = random_integer(0, 100)
g, m, n = extended_gcd(n1, n2)
assert g == gcd(n1, n2)
assert g == m*n1 + n*n2
@repeat
def test_modular_inverse():
"""Tests modular inverse calculator."""
a = random_integer(0, 100)
b = random_integer(a, 100)
a_inverse = modular_inverse(a, b)
# If a is not relatively prime with b then there is no inverse
if gcd(a, b) != 1:
assert a_inverse == None
else:
assert 1 == a*a_inverse % b
```
#### File: jmath/tests/test_uncertainties.py
```python
from ..jmath.uncertainties import *
from .tools import repeat, random_integer
import math
# - Functions
def test_addition():
"""Tests uncertainty addition"""
added = Uncertainty(2, 1) + Uncertainty(5, 2)
expected = Uncertainty(7, 3)
assert added.value == expected.value
assert added.abs_uncertainty() == expected.abs_uncertainty()
def test_subtraction():
"""Tests uncertainty subtraction"""
subtracted = Uncertainty(3, 1) - Uncertainty(20, 3)
expected = Uncertainty(-17, 4)
assert subtracted.value == expected.value
assert subtracted.abs_uncertainty() == expected.abs_uncertainty()
def test_multiplication():
"""Tests uncertainty multiplication"""
multiplied = Uncertainty(10, 1) * Uncertainty(3, 1)
expected = Uncertainty(30, 13)
assert multiplied.value == expected.value
assert multiplied.abs_uncertainty() == expected.abs_uncertainty()
def test_division():
"""Tests uncertainty division"""
divided = Uncertainty(3, 1) / Uncertainty(3, 2)
expected = Uncertainty(1, 1)
assert divided.value == expected.value
assert divided.abs_uncertainty() == expected.abs_uncertainty()
def test_function_application():
"""Tests applying a function to an uncertainty"""
result = Uncertainty(5, 2).apply(math.sqrt)
expected = Uncertainty(2.2, 0.4)
assert round(result.value, 1) == expected.value
assert round(result.abs_uncertainty(), 1) == expected.abs_uncertainty()
@repeat
def test_int_in_uncertainty():
"""Tests for an int inside an uncertain range."""
value = random_integer()
abs_unc = random_integer(1, 50)
unc = Uncertainty(value, abs_unc)
assert value in unc
assert (value + abs_unc) in unc
assert (value - abs_unc) in unc
assert (value + abs_unc + 1) not in unc
assert (value - abs_unc - 1) not in unc
def test_uncertainty_overlap():
"""Tests that uncertainties overlap as expected."""
u1 = Uncertainty(3, 2)
u2 = Uncertainty(5, 1)
u3 = Uncertainty(6, 0.5)
assert u1 in u2
assert u2 in u3
assert u3 not in u1
``` |
{
"source": "jhaydel/051616PoC",
"score": 3
} |
#### File: 051616PoC/helper_scripts/apply_udev.py
```python
import os
import re
import time
import argparse
import subprocess
parser = argparse.ArgumentParser(description='UDEV Remap Script -- Rename interfaces using UDEV Rules')
parser.add_argument('-v','--verbose', action='store_true',
help='enables verbose logging mode')
parser.add_argument('-a','--add', nargs=2, action='append',
help='Specify a mac address followed by an interface')
parser.add_argument('-d','--delete', action='append',
help='Specify a mac address to be removed from the exising UDEV rules.')
parser.add_argument('-s','--show', action='store_true',
help='Show the existing UDEV Rules.')
parser.add_argument('-nv','--no-vagrant-interface', action='store_true',
help='Using this option will not create a vagrant interface during the application of rules.')
parser.add_argument('-nd','--no-vagrant-default', action='store_true',
help='Using this option will not create a vagrant default route when applying the re-map.')
parser.add_argument('-vm','--vagrant_mapping', action='store_true',
help='Using this option will create the mapping for the vagrant interface that happens automatically during the apply option.')
parser.add_argument("--vagrant-name", default='vagrant',
help='The name of the vagrant interface (default "vagrant")')
parser.add_argument("--apply", action='store_true',
help='Apply the remap as it has been provided.')
def is_mac(mac):
if re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$",mac.lower()):
mac=mac.lower().replace("-",":")
elif re.match("[0-9a-f]{12}$",mac.lower()):
mac=mac.lower()
mac=':'.join(mac[i:i+2] for i in range(0,len(mac),2))
else:
print " ### ERROR: MAC address --> " + str(mac) + " is not valid."
exit(1)
return mac
def show_rules():
#Show Existing Rules
print "#### UDEV Rules (/etc/udev/rules.d/70-persistent-net.rules) ####"
if not os.path.isfile(udev_file):
if verbose: print " >>> No Rules Present or File Does Not Exist <<<"
return
rules=subprocess.check_output(["cat",udev_file]).split('\n')
for line in rules: print line
def parse_interfaces():
#Parse Interfaces
output=subprocess.check_output(["ip","link","show"]).split('\n')
ifindex=""
interface=""
mac=""
index_map={}
#parse ip link show output for interface, ifindex and MAC
for line in output:
if re.match("^.*LOOPBACK.*$",line): continue #skip loopbacks
elif re.match("^[0-9]+:.*$",line): #look for lines that start with an ifindex
cut_line=line.split()
ifindex=cut_line[0][:-1]
interface=cut_line[1][:-1]
elif re.match("^.*link/ether.*$",line): #look for lines that have link/ether
cut_line=line.split()
mac=cut_line[1]
if verbose: print "interface: " + interface + " index: " + str(ifindex) + " mac: " + mac
index_map[interface]={"index":ifindex,"mac":mac}
for interface in index_map:
if verbose: print "determining driver for interface: " + interface
success=False
#Method1
try:
ethtool_output=subprocess.check_output(["ethtool","-i",interface]).split('\n')
driver = ethtool_output[0].split(":")[1][1:]
except (subprocess.CalledProcessError, OSError), e:
#Method 2
try:
driver=subprocess.check_output(["basename $(readlink /sys/class/net/"+interface+"/device/driver/module) > /dev/null 2>&1"],shell=True).replace("\n","")
except subprocess.CalledProcessError, e:
try:
driver=subprocess.check_output(["basename $(readlink /sys/class/net/"+interface+"/device/driver) > /dev/null 2>&1"],shell=True).replace("\n","")
except subprocess.CalledProcessError, e:
print " ### ERROR Tried 3 methods to determine device driver. All Failed."
exit(1)
index_map[interface]["driver"]=driver
if verbose: print "interface: " + interface + " driver: " + driver
return index_map
def delete_rule(mac):
if not os.path.isfile(udev_file):
if verbose: print "WARN: delete of rule not possible, udev file does not exist."
return
#Delete rule with MAC address
if verbose:
print ">>> BEFORE"
show_rules()
remove_rule=subprocess.check_output(["sed -i '/"+mac+"/d' " + udev_file],shell=True).split('\n')
if verbose:
print "<<< AFTER"
show_rules()
def add_rule(mac,interface):
index_map=parse_interfaces()
print " INFO: Adding UDEV Rule: " + mac + " --> " + interface
mac_found=False
for interface_1 in index_map:
if index_map[interface_1]['mac'] == mac: mac_found = True
if not mac_found:
print " WARNING: this MAC address presently does not belong to any device on the system."
if verbose:
print "deleting any matching rules to be safe..."
delete_rule(mac)
with open("/etc/udev/rules.d/70-persistent-net.rules","a") as udev_file:
udev_file.write("""ACTION=="add", SUBSYSTEM=="net", ATTR{address}==\"""" + mac +"\", NAME=\""+interface+"\", SUBSYSTEMS==\"pci\" \n")
if verbose: show_rules()
def apply_remap():
global just_vagrant
index_map=parse_interfaces()
if not just_vagrant:
print " INFO: Applying new UDEV Rules..."
drivers={}
lowest_index=""
lowest_index_interface=""
#Determine Driver and lowest index
for interface in index_map:
if lowest_index == "":
lowest_index = index_map[interface]["index"]
lowest_index_interface = interface
elif int(index_map[interface]["index"]) < int(lowest_index):
#Confirm that it is a physical interface and not a logical device
try:
subprocess.check_call(["udevadm info -a -p /sys/class/net/"+interface+""" | grep 'SUBSYSTEMS=="pci"' > /dev/null"""],shell=True)
except subprocess.CalledProcessError, e:
continue
lowest_index = index_map[interface]["index"]
lowest_index_interface = interface
if verbose:
print interface
print " lowest_index: + " + str(lowest_index) + " --> " + str(lowest_index_interface)
print " index: " + index_map[interface]["index"]
print " mac: " + index_map[interface]["mac"]
print " driver: " + index_map[interface]["driver"]
if index_map[interface]["driver"] not in drivers: drivers[index_map[interface]["driver"]]= True
#Leave tunnel and bridge devices alone
if "tun" in drivers: del drivers["tun"]
if "bridge" in drivers: del drivers["bridge"]
if "vxlan" in drivers: del drivers["vxlan"]
if "bond" in drivers: del drivers["bond"]
if verbose:
print "lowest_index_interface: " + lowest_index_interface
print "lowest_index: " + str(lowest_index)
print drivers
global vagrant_name
if use_vagrant_interface:
add_rule(index_map[lowest_index_interface]["mac"], vagrant_name)
print " FYI: "+lowest_index_interface + " will become the vagrant interface"
if just_vagrant: return 0
for driver in drivers:
dead_drop=subprocess.check_output(["modprobe","-r",driver])
dead_drop=subprocess.check_output(["udevadm","control","--reload-rules"])
dead_drop=subprocess.check_output(["udevadm","trigger"])
time.sleep(4)
if use_vagrant_interface:
dead_drop=subprocess.check_output(["ifup vagrant"],shell=True)
time.sleep(1)
if use_vagrant_default:
dead_drop=subprocess.check_output(["ip route delete default dev vagrant"],shell=True)
output=subprocess.check_output(["ip","link","show"]).split('\n')
print "### PRESENT INTERFACES ###"
for line in output:
print line
def main():
global verbose
verbose=False
global udev_file
udev_file="/etc/udev/rules.d/70-persistent-net.rules"
global use_vagrant_interface
use_vagrant_interface=True
global use_vagrant_default
use_vagrant_default=True
add=False
show=False
delete=False
global just_vagrant
just_vagrant=False
global vagrant_name
apply=False
additions=[]
removals=[]
args = parser.parse_args()
if args.verbose: verbose=args.verbose
if args.add:
add=True
for mac,interface in args.add: additions.append([is_mac(mac),interface])
if args.delete:
delete=True
for mac in args.delete: removals.append(is_mac(mac))
if args.show: show=True
if args.no_vagrant_interface: use_vagrant_interface=False
if args.vagrant_mapping:
apply=True
just_vagrant=True
if args.no_vagrant_default: use_vagrant_default=False
if args.apply: apply=True
vagrant_name = args.vagrant_name
if verbose:
print "Arguments:"
print args
if show: show_rules()
elif delete == True:
for mac in removals: delete_rule(mac)
elif add == False: apply_remap()
elif add == True:
for mac,interface in additions: add_rule(mac,interface)
if __name__ == "__main__":
main()
exit(0)
``` |
{
"source": "jhaydter/graviton2-workshop",
"score": 2
} |
#### File: graviton2/lambda_graviton/function.py
```python
from aws_cdk import (
core,
aws_lambda as _lambda,
)
class CdkLambdaStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# Defines an AWS Lambda resource
my_lambda = _lambda.Function(
self, 'HelloHandler',
runtime=_lambda.Runtime.PYTHON_3_6,
code=_lambda.Code.from_asset('graviton2/lambda_graviton'),
handler='hello.handler',
)
core.CfnOutput(self, "LambdaName",value=my_lambda.function_name)
``` |
{
"source": "jhayer/cr_travis_exo",
"score": 4
} |
#### File: jhayer/cr_travis_exo/example.py
```python
def reverse_words(s):
"""
Reverses order or words in string s.
"""
words = s.split()
words_reversed = words[::-1]
return ' '.join(words_reversed)
def test_reverse_words():
assert reverse_words('dogs hate cats') == 'cats hate dogs'
assert reverse_words('dog eat dog') == 'dog eat dog'
assert reverse_words('one two three four') == 'four three two one'
def get_word_lengths(s):
"""
Returns a list of integers representing
the word lengths in string s.
"""
# uncomment next line in step 9
return [len(word) for word in s.split()]
return None
# uncomment this function in step 6
def test_get_word_lengths():
text = "Three tomatoes are walking down the street"
assert get_word_lengths(text) == [5, 8, 3, 7, 4, 3, 6]
def obscure_function():
"""
Example of a function that is never tested.
"""
do_something_strange()
``` |
{
"source": "jhazelwo/python-fileasobj",
"score": 4
} |
#### File: python-fileasobj/docs/examples.py
```python
from fileasobj import FileAsObj
def example_read_exists():
"""
Reading a file that already exists.
This will raise an exception if the file does not exist.
"""
my_file = FileAsObj('/tmp/example_file.txt')
def example_file_create():
"""
Creating an object for a file that does NOT exist but we wish to create.
If the file exists this will truncate it.
"""
my_file = FileAsObj()
my_file.filename = '/tmp/a_file.txt'
my_file.save()
def example_read_catch_errors():
""" Reading a file and catch errors. """
try:
my_file = FileAsObj()
my_file.read('/tmp/example_file.txt')
except Exception as msg:
print(msg)
def example_search_file_with_regex():
""" Find mail servers in a hosts file that have IPs starting with 172. """
my_file = FileAsObj('/etc/hosts')
result = my_file.egrep('^172.*mail[0-9]')
print(result)
def example_search_for_whole_line_using_contains():
""" Find a complete line in file. """
my_file = FileAsObj('/etc/hosts')
if '127.0.0.1 localhost' in my_file:
return True
def example_search_for_whole_line_using_check():
""" Shorter version, find a complete line in file. """
my_file = FileAsObj('/etc/hosts')
return my_file.check('127.0.0.1 localhost')
def example_search_for_word_using_grep():
""" Find a complete line in file. """
my_file = FileAsObj('/etc/hosts')
if my_file.grep('localhost'):
return True
def example_add_line_to_file():
""" Different methods to append a given line to the file, all work the same. """
my_file = FileAsObj('/tmp/example_file.txt')
my_file.add('foo')
my_file.append('bar')
# Add a new line to my_file that contains the word 'lol' and print True|False if my_file was changed.
print(my_file + 'lol')
# Add line even if it already exists in the file.
my_file.unique = False
my_file.add('foo')
def example_add_list_of_lines_to_file():
"""
Add a list() of strings, each on its own line.
Same as the previous example you can use .append() or '+'.
"""
my_file = FileAsObj('/tmp/example_file.txt')
lines_to_add = ['simultaneous', 'money shot', 'remedy']
my_file.add(lines_to_add)
def example_print_match_count():
""" Print number of lines in the file (as it exists in memory) """
my_file = FileAsObj('/tmp/example_file.txt')
print(len(my_file))
def example_remove_lines_matching_substring():
""" Remove all lines that CONTAIN 'bad string' """
my_file = FileAsObj('/tmp/example_file.txt')
my_file.rm(my_file.grep('bad string'))
my_file.save()
def example_remove_lines_matching_string():
""" Remove all lines that ARE '# T0DO: remove this line.' (This matches an entire line.) """
my_file = FileAsObj('/tmp/example_file.txt')
my_file.rm('# T0DO: remove this line.')
my_file.save()
def example_remove_lines_matching_string_with_sub():
""" Remove all lines that ARE "# T0DO: remove this line." using __sub__ shortcut. (This matches an entire line.) """
my_file = FileAsObj('/tmp/example_file.txt')
my_file - '# T0DO: remove this line.'
my_file.save()
def example_remove_lines_matching_string_with_print():
""" Remove all lines that ARE "# T0DO: remove this line." and print(True|False) if my_file was changed. """
my_file = FileAsObj('/tmp/example_file.txt')
print(my_file.rm('# T0DO: remove this line.'))
my_file.save()
def example_get_lines_matching_substring():
""" Get all lines that contain a # anywhere in the line. """
my_file = FileAsObj('/tmp/example_file.txt')
result = my_file.grep('#')
return result
def example_print_lines_matching_substring():
""" Print all lines that contain a # anywhere in the line. """
my_file = FileAsObj('/tmp/example_file.txt')
print(my_file.grep('#'))
def example_write_file_to_disk_if_changed():
""" Try to remove all comments from a file, and save it if changes were made. """
my_file = FileAsObj('/tmp/example_file.txt')
my_file.rm(my_file.egrep('^#'))
if my_file.changed:
my_file.save()
def example_show_files_change_log():
"""
All actions that FileAsObj takes are logged internally in the log sub-class, this is required by some audits.
You can view the log by calling __str__ on the sub-class. This example uses print()
"""
my_file = FileAsObj('/tmp/example_file.txt')
# ...any code that changes the file here...
print(my_file.log)
def example_manually_update_change_log():
""" You can inject an arbitrary message to the log sub-class by calling it. """
my_file = FileAsObj('/tmp/example_file.txt')
my_file.log('A manual log entry.')
def example_sort_in_place():
""" To sort contents in place after the file is read. """
my_file = FileAsObj('/tmp/example_file.txt')
my_file.sort()
def example_sort_during_read():
"""
To sort contents during read().
The .sorted attribute is checked every time contents are modified.
Whenever a change occurs if sorted is True the contents are sorted with self.sort().
"""
my_file = FileAsObj()
my_file.sorted = True
my_file.read('/tmp/example_file.txt')
def example_all():
"""
Use a bunch of methods on a file.
"""
my_file = FileAsObj()
my_file.filename = '/tmp/example_file.txt'
my_file.add('# First change!')
my_file.save()
my_file = FileAsObj('/tmp/example_file.txt')
my_file.unique = True
my_file.sorted = True
my_file.add('1')
my_file.add('1')
my_file.add('2')
my_file.add('20 foo')
my_file.add('200 bar')
my_file.add('# Comment')
my_file.unique = False
my_file.add('# Comment')
my_file.add('# Comment')
my_file.unique = True
my_file.rm(my_file.egrep('^#.*'))
my_file.rm(my_file.grep('foo'))
my_file.replace(my_file.egrep('^2'), 'This line was replaced.')
print(my_file)
print(my_file.log)
``` |
{
"source": "jhazelwo/python-netshow",
"score": 3
} |
#### File: python-netshow/netshow/netshow.py
```python
import re
import sys
import pwd
import os
import glob
import socket
import struct
import json
from pprint import pprint
class QuietError(Exception):
# All who inherit me shall not traceback, but be spoken of cleanly
pass
class RegexError(QuietError):
# Invalid regex pattern
pass
class NetShow(object):
""" Object to hold data about network connections. """
def __init__(self):
""" . """
self.use_header = True # Show header in __str__ output
self.as_json = False # Output as list of dicts.
self.as_dict = False # Output as iterable of JSON objects.
self.results = [] # Store filter results, if any.
self.contents = [] # Store complete network stack as list of dicts.
self.contents.extend(self.proc_to_dict('tcp'))
self.contents.extend(self.proc_to_dict('udp'))
self.contents.extend(self.proc_to_dict('tcp6'))
self.contents.extend(self.proc_to_dict('udp6'))
def usage(self):
return """
netshow.py, version 1.0
usage: netshow.py [--json|--dict|-s|-h] ['PATTERN' | WORDS]
--json : Output as iterable of JSON objects.
--dict : Output as list of dicts.
-s : Hide header
-h|--help : Help
Wrap regex in single quotes.
Words can be any whole-string match.
examples:
netshow.py 80
netshow.py tcp6
netshow.py tcp 22
netshow.py 10.2.3.4 53 'tcp|udp'
netshow.py '.*sshd$'
"""
def search_dict_values(self, pattern, d):
""" . """
pattern = str(pattern)
is_regex = False
special_charters = ['^', '*', '?', '[', '(', '|', '$']
for has in special_charters:
if has in pattern:
is_regex = True
if is_regex:
for v in d.values():
try:
if re.match(pattern, v):
return d
except Exception as e:
raise RegexError(e)
else:
if pattern in d.values():
return d
return False
def match_all_needles(self, needle, haystack):
""" . """
for n in needle:
if n not in haystack and not self.search_dict_values(n, haystack):
return False
return haystack
def filter(self, params):
""" . """
if not params:
return True
for connection in self.contents:
match = self.match_all_needles(params, connection)
if match:
self.results.append(match)
if not self.results:
return False
return True
def line_to_dict(self, line, protocol):
""" Construct dict of elements in {line}. """
d = {}
connection_states = {
'01':'ESTABLISHED',
'02':'SYN_SENT',
'03':'SYN_RECV',
'04':'FIN_WAIT1',
'05':'FIN_WAIT2',
'06':'TIME_WAIT',
'07':'CLOSE',
'08':'CLOSE_WAIT',
'09':'LAST_ACK',
'0A':'LISTEN',
'0B':'CLOSING' }
line_array = self._remove_empty(line.split(' '))
d['protocol'] = protocol
d['local_ip'], d['local_port'] = self._convert_ip_port(line_array[1])
d['remote_ip'], d['remote_port'] = self._convert_ip_port(line_array[2])
if 'tcp' in protocol:
d['state'] = connection_states[line_array[3]]
else:
d['state'] = ''
d['pid'] = self.pid_of_inode(line_array[9])
d['program'] = self.name_of_pid(d['pid'])
return d
def proc_to_dict(self, protocol):
""" Return list of dicts of /proc/net/{protocol}. """
if protocol not in ['tcp', 'tcp6', 'udp', 'udp6']:
raise TypeError('Unknown protocol {0}'.format(protocol))
l = []
with open('/proc/net/{0}'.format(protocol), 'r') as handle:
for line in handle:
line = line.rstrip('\n').strip(' ')
if ':' in line:
l.append(self.line_to_dict(line, protocol))
return l
def _convert_ip(self, address):
"""
Convert and squash addresses to familiar format.
ipv6 Convert '000080FE00000000FF565002BD69B1FE'
To 'fe80::250:56ff:feb1:69bd'
ipv4 Convert '8A8E1CAC'
To '172.28.142.138'
"""
if len(address) > 16:
## http://stackoverflow.com/questions/41940483
try:
address = address.decode('hex') # Python2
except AttributeError:
address = bytes.fromhex(address) # Python3
address = struct.unpack('>IIII', address)
address = struct.pack('@IIII', *address)
address = socket.inet_ntop(socket.AF_INET6, address).lower()
else:
address = '{0}.{1}.{2}.{3}'.format(
(self._hex2dec(address[6:8])),
(self._hex2dec(address[4:6])),
(self._hex2dec(address[2:4])),
(self._hex2dec(address[0:2]))
)
return address
def _hex2dec(self, this):
""" . """
return str(int(this,16))
def _remove_empty(self, this):
""" . """
return [x for x in this if x]
def _convert_ip_port(self, array):
""" Convert ipaddress and port from hex to decimal."""
host,port = array.split(':')
_port = self._hex2dec(port)
if _port == '0':
_port = '*'
return self._convert_ip(host),_port
def pid_of_inode(self, inode):
""" Find PID of process bound to given inode. """
for item in glob.glob('/proc/[0-9]*/fd/[0-9]*'):
try:
if '[{0}]'.format(inode) in os.readlink(item):
return item.split('/')[2]
except:
pass
return '' # TIME_WAIT
def name_of_pid(self, pid):
""" Return /name/of/program if possible. """
if pid:
try:
return os.readlink('/proc/{0}/exe'.format(pid))
except:
pass
return '' # No permission to see cmd (not owner or root)
def __str__(self):
""" Return contents as multi-line string similar to netstat. """
template = '{protocol:<5} {local_ip:>39} {local_port:<5} ' + \
'{remote_ip:>39} {remote_port:<5} {state:<11} {pid:>5} {program}\n'
s = ''
subject = self.contents
if netstat.results:
subject = self.results
if self.as_json:
return str(json.dumps(subject))
if self.as_dict:
return str(self.contents)
if self.use_header:
s = template.format(
protocol = 'Proto',
local_ip = 'Local Address',
local_port = 'Port',
remote_ip = 'Foreign Address',
remote_port = 'Port',
state = 'State',
pid = 'PID',
program = 'Program name'
)
for c in subject:
s += template.format(
protocol = c['protocol'],
local_ip = c['local_ip'],
local_port = c['local_port'],
remote_ip = c['remote_ip'],
remote_port = c['remote_port'],
state = c['state'],
pid = c['pid'],
program = c['program']
)
return s.rstrip('\n')
def quiet_hook(kind, message, traceback):
if QuietError in kind.__bases__:
print('{0}: {1}'.format(kind.__name__, message)) # Only print Error Type and Message
else:
sys.__excepthook__(kind, message, traceback) # Print Error Type, Message and Traceback
sys.excepthook = quiet_hook
if __name__ == '__main__':
netstat = NetShow()
args = sys.argv[1:]
if '--help' in args or '-h' in args:
print(netstat.usage())
exit(0)
if '--json' in args and '--dict' in args:
print('--json and --dict are mutually exclusive')
exit(1)
if '--json' in args:
netstat.as_json = True
args.remove('--json')
if '--dict' in args:
netstat.as_dict = True
args.remove('--dict')
if '-s' in args:
netstat.use_header = False
args.remove('-s')
if args and not netstat.filter(args):
exit(1)
print(netstat)
``` |
{
"source": "jhb187/modbus-tcp2mqtt",
"score": 2
} |
#### File: jhb187/modbus-tcp2mqtt/modbus-tcp2mqtt.py
```python
import json
import _thread
import datetime
import threading
import argparse
import logging
import logging.handlers
import time
import paho.mqtt.client as mqtt
import paho.mqtt.subscribe as subscribe
import sys
import configparser
import traceback
from pyModbusTCP.client import ModbusClient
parser = argparse.ArgumentParser(description='Bridge between Modbus TCP and MQTT')
parser.add_argument('--mqtt-host', default='localhost', help='MQTT server address. \
Defaults to "localhost"')
parser.add_argument('--mqtt-port', default='8883', type=int, help='MQTT server port. \
Defaults to 8883')
parser.add_argument('--mqtt-topic', default='', help='Topic prefix to be used for \
subscribing/publishing. Defaults to "modbus/"')
parser.add_argument('--modbus-host', help='Modbus server address')
parser.add_argument('--modbus-port', default='502', type=int, help='Modbus server port. \
Defaults to 502')
parser.add_argument('--registers', help='Register definition file. Required!')
parser.add_argument('--frequency', default='50', help='How often is the source \
checked for the changes, in seconds. Only integers. Defaults to 3')
parser.add_argument('--only-changes', default='False', help='When set to True then \
only changed values are published')
args = parser.parse_args()
# logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
handler = logging.FileHandler("log.txt")
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
# 把logger加到其他地方。
#logger.info("Start print log")
topic = args.mqtt_topic
# if not topic.endswith("/"):
# topic += "/"
frequency = int(args.frequency)
print("ModBusTcp2Mqtt App Started...Time:%s" % (datetime.datetime.now()))
mbClient = None
lastValue = {}
config = configparser.ConfigParser()
config.read(args.registers)
config01 = config['0x01']
config02 = config['0x02']
config03 = config['0x03']
config04 = config['0x04']
config05 = config['0x05']
config06 = config['0x06']
config0F = config['0x0F']
config10 = config['0x10']
config2B = config['0x2B']
# Any received value in the upper range (32768-65535)
# is interpreted as negative value (in the range -32768 to -1).
def reMap(value, maxInput=65535, minInput=64535, maxOutput=-1, minOutput=-1001):
# if value >= minInput:
# value = maxInput if value > maxInput else value
# value = minInput if value < minInput else value
#
# inputSpan = maxInput - minInput
# outputSpan = maxOutput - minOutput
#
# scaledThrust = float(value - minInput) / float(inputSpan)
#
# return minOutput + (scaledThrust * outputSpan)
# else:
return value
class Element:
def __init__(self, row):
self.topic = row[0]
self.value = row[1]
def publish(self):
try:
if self.value != lastValue.get(self.topic, 0) or args.only_changes == 'False':
lastValue[self.topic] = self.value
fulltopic = topic + self.topic
## mqClient.subscribe(fulltopic)
mqClient.publish(fulltopic, reMap(self.value), qos=1, retain=False)
except Exception as exc:
logging.info("Error reading " + self.topic + ": %s", exc)
def readMb():
#while True:
# open or reconnect TCP to server
logger.info("readMb Run...Time:%s" % (datetime.datetime.now()))
if not mbClient.is_open():
if not mbClient.open():
logging.error("unable to connect to " + SERVER_HOST + ":" + str(SERVER_PORT))
data = []
for key, value in config01.items():
# 读取时增加对指令key的过滤
if mbClient.is_open() and not str(key).__contains__('command'):
row = mbClient.read_coils(int(value))
if not row is None:
row.insert(0, key)
data.append(row)
for key, value in config02.items():
if mbClient.is_open() and not str(key).__contains__('command'):
row = mbClient.read_discrete_inputs(int(value))
if not row is None:
row.insert(0, key)
data.append(row)
for key, value in config03.items():
if mbClient.is_open() and not str(key).__contains__('command'):
row = mbClient.read_holding_registers(int(value))
if not row is None:
row.insert(0, key)
data.append(row)
for key, value in config04.items():
if mbClient.is_open() and not str(key).__contains__('command'):
row = mbClient.read_input_registers(int(value))
if not row is None:
row.insert(0, key)
data.append(row)
for row in data:
e = Element(row)
e.publish()
#time.sleep(int(frequency))
global timer
timer = threading.Timer(120, readMb)
timer.start()
logger.info("readMb Started...Time:%s" % (datetime.datetime.now()))
# 指令回执发回mqtt
def down_back(result):
callback_topic = "command/huailaiwaterworks/one/downback"
mqClient.publish(callback_topic, result, qos=1, retain=False)
print("publishCallBack-Msg:%s" % result)
logger.info("publishCallBack-Msg:%s" % result)
# call back msg
def msgCallback():
def on_message_print(client, userdata, message):
msg = str(message.payload)
newstr = msg.strip('b')
print("callback msg:%s" % (newstr))
logger.info("callback msg:%s" % (newstr))
if not mbClient.is_open():
if not mbClient.open():
logging.error("unable to connect to " + SERVER_HOST + ":" + str(SERVER_PORT))
mbClient.open()
print("reconnected to modbus finished...")
# 水厂泵开关
for key, value in config01.items():
if mbClient.is_open():
if newstr == key:
row = mbClient.read_coils(int(value))
print("coils-read-back:%s" % (row))
logger.info("coils-read-back:%s" % (row))
result1 = mbClient.write_single_coil(int(value), True)
row = mbClient.read_coils(int(value))
print("coils-write-back1:%s ,NOW Status:%s" % (result1, row))
logger.info("coils-write-back1:%s ,NOW Status:%s" % (result1, row))
time.sleep(2) # PLC反应时间
result2 = mbClient.write_single_coil(int(value), False)
row = mbClient.read_coils(int(value))
# 执行回执,也publish出去;
if result1 is not None:
if result1:
down_back(newstr + '/0000')
if result1 is None or row is None:
down_back(newstr + '/9999')
print("coils-write-back2:%s,NOW Status:%s" % (result2, row))
print(key + ":coils-operation-over...")
logger.info("coils-write-back2:%s,NOW Status:%s" % (result2, row))
logger.info(key + ":coils-operation-over...")
# 寄存器加压站 井
for key, value in config03.items():
if mbClient.is_open():
if newstr == key:
# 根据topic 构造write value
# 加压站 确定首位是不是零位。
# 地址顺序有误, 1号---会开4号;3号---会开1号;4号---3号,所以做调整
if 'station_pump4#start' in newstr:
write_value = 2
if 'station_pump4#stop' in newstr:
write_value = 4
if 'station_pump2#start' in newstr:
write_value = 8
if 'station_pump2#stop' in newstr:
write_value = 16
if 'station_pump1#start' in newstr:
write_value = 32
if 'station_pump1#stop' in newstr:
write_value = 64
if 'station_pump3#start' in newstr:
write_value = 128
if 'station_pump3#stop' in newstr:
write_value = 256
# 井 保留联动,优化选择到井的条件
if 'command/well' in newstr and 'pump#start' in newstr:
write_value = 1
if 'command/well' in newstr and 'pump#stop' in newstr:
write_value = 2
if 'command/well' in newstr and 'pump#linkact' in newstr:
write_value = 4
row = mbClient.read_holding_registers(int(value))
print("holding-Register-read-back:%s" % (row))
logger.info("holding-Register-read-back:%s" % (row))
result1 = mbClient.write_single_register(int(value), write_value)
row = mbClient.read_holding_registers(int(value))
print("holding-Register-write-back1:%s ,addr:%s ,writeValue:%s,NOW value:%s" % (
result1, value, write_value, row))
logger.info("holding-Register-write-back1:%s ,addr:%s ,writeValue:%s,NOW value:%s" % (
result1, value, write_value, row))
time.sleep(2)
result2 = mbClient.write_single_register(int(value), 0)
row = mbClient.read_holding_registers(int(value))
if result1 is not None:
if result1:
down_back(newstr + '/0000')
if result1 is None or row is None:
down_back(newstr + '/9999')
print("holding-Register-write-back2:%s,NOW Status:%s" % (result2, row))
print(key + ":holding-Register-operation-over...")
logger.info("holding-Register-write-back2:%s,NOW Status:%s" % (result2, row))
logger.info(key + ":holding-Register-operation-over...")
subscribe.callback(on_message_print, command_topic, hostname="192.168.127.12")
try:
mqClient = mqtt.Client()
# mqClient.connect("MacBook-Air.local", 1883) 上线时可以还原为这个地址
mqClient.connect("192.168.127.12", 1883)
mqClient.tls_set("cacert.pem", "client-cert.pem", "client-key.pem")
mqClient.loop_start()
# 订阅指令topic 增加topic复杂度,防止误触发
command_topic = "huailaiwater/ESLink/prod/command/"
mqClient.subscribe(command_topic)
print("SUBCRIBE " + command_topic + " Successfully")
mbClient = ModbusClient()
# define modbus server host, port
SERVER_HOST = args.modbus_host
SERVER_PORT = args.modbus_port
mbClient.host(SERVER_HOST)
mbClient.port(SERVER_PORT)
# 启动读modbus与订阅指令线程
# _thread.start_new_thread(readMb, ())
readMb()
_thread.start_new_thread(msgCallback, ())
except Exception as e:
# traceback.print_exc()+
logging.error("Unhandled error [" + str(e) + traceback.print_exc() + "]")
sys.exit(1)
while 1:
pass
``` |
{
"source": "jhbae159/FlapPyBird-MPC",
"score": 3
} |
#### File: jhbae159/FlapPyBird-MPC/gd.py
```python
import numpy as np
import matplotlib.pyplot as plt
PIPEGAPSIZE = 100 # gap between upper and lower pipe
PIPEWIDTH = 52
BIRDWIDTH = 34
BIRDHEIGHT = 24
BIRDDIAMETER = np.sqrt(BIRDHEIGHT**2 + BIRDWIDTH**2) # the bird rotates in the game, so we use it's maximum extent
SKY = 0 # location of sky
GROUND = (512*0.79)-1 # location of ground
PLAYERX = 57 # location of bird
def getPipeConstraints(x, y, lowerPipes):
constraints = [] # init pipe constraint list
for pipe in lowerPipes:
dist_from_front = pipe['x'] - x - BIRDDIAMETER
dist_from_back = pipe['x'] - x + PIPEWIDTH
if (dist_from_front < 0) and (dist_from_back > 0):
constraints += [y <= (pipe['y'] - BIRDDIAMETER)] # y above lower pipe
constraints += [y >= (pipe['y'] - PIPEGAPSIZE)] # y below upper pipe
return constraints
def solve(playery, playerVelY, lowerPipes):
pipeVelX = -4 # speed in x
playerAccY = 1 # players downward accleration
playerFlapAcc = -20 # players speed on flapping
``` |
{
"source": "jhbardwell/jhbardwell.github.io",
"score": 3
} |
#### File: jhbardwell.github.io/unit-tests/fruit.py
```python
from Tkinter import*
class Fruit:
def __init__(self, parent):
# variables
self.texture_option = StringVar()
self.climate_option = StringVar()
# layout
self.myParent = parent
self.main_frame = Frame(parent, background="light blue")
self.main_frame.pack(expand=YES, fill=BOTH)
texture_options = ["Soft", "Crunchy","?"]
climate_options = ["Temperate", "Tropical","?"]
self.texture_option.set("?")
self.climate_option.set("?")
self.texture_options_frame = Frame(self.main_frame, borderwidth=3, background="light blue")
self.texture_options_frame.pack(side=TOP, expand=YES, anchor=W)
Label(self.texture_options_frame, text="Texture:", relief=FLAT, font="bold", background="light blue").pack(side=LEFT,anchor=W)
for option in texture_options:
button = Radiobutton(self.texture_options_frame, text=str(option), indicatoron=0,
value=option, padx=5, variable=self.texture_option, background="light blue")
button.pack(side=LEFT)
self.climate_options_frame = Frame(self.main_frame, borderwidth=3, background="light blue")
self.climate_options_frame.pack(side=TOP, expand=YES, anchor=W)
Label(self.climate_options_frame, text="Climate:", relief=FLAT, font="bold", background="light blue").pack(side=LEFT,anchor=W)
for option in climate_options:
button = Radiobutton(self.climate_options_frame, text=str(option), indicatoron=0,
value=option, padx=5, variable=self.climate_option, background="light blue")
button.pack(side=LEFT)
#search button
self.search_frame = Frame(self.main_frame, borderwidth=5, height=50, background="light blue")
self.search_frame.pack(expand=NO)
self.enter = Entry(self.search_frame, width=30)
self.enter.pack(side=LEFT, expand=NO, padx=5, pady=5, ipadx=5, ipady=5)
self.searchbutton = Button(self.search_frame, text="Search", foreground="white", background="blue",
width=6, padx="2m", pady="1m")
self.searchbutton.pack(side=LEFT, pady=5)
self.searchbutton.bind("<Button-1>", self.searchbuttonclick)
self.searchbutton.bind("<Return>", self.searchbuttonclick)
def searchbuttonclick(self,event):
#fruit texture climate
fruit_bowl=[
('Apple', 'Crunchy','Temperate'),
('Apricot','Soft','Tropical'),
('Orange', 'Soft','Tropical'),
('Pawpaw','Soft','Temperate'),
('Pear','Crunchy','Temperate')]
for fruit in fruit_bowl:
i = fruit_bowl.index(fruit)
if self.enter.get()==fruit_bowl[i][0]:
self.texture_option.set(fruit_bowl[i][1])
self.climate_option.set(fruit_bowl[i][2])
root = Tk()
root.title("Fruit Bowl")
fruit = Fruit(root)
root.mainloop()
``` |
{
"source": "jhbarnett/quant",
"score": 2
} |
#### File: management/commands/template.py
```python
from django.core.management.base import BaseCommand, CommandError
class RunLive(BaseCommand):
def add_arguments(self, parser):
pass
def handle(self):
pass
``` |
{
"source": "jhbastek/InverseTrussDesign",
"score": 2
} |
#### File: InverseTrussDesign/src/loadDataset.py
```python
import torch
from torch.utils.data import TensorDataset
import pickle
import numpy as np
import pandas as pd
from train_parameters import *
from src.normalization import Normalization
from src.voigt_rotation import *
from src.model_utils import CPU_Unpickler
def exportTensor(name,data,cols, header=True):
df=pd.DataFrame.from_records(data.detach().numpy())
if(header):
df.columns = cols
print(name)
df.to_csv(name+".csv", header=header, index=False)
def exportList(name,data):
arr=np.array(data)
np.savetxt(name+".csv", [arr], delimiter=',')
def getNormalization(save_normalization=False):
data = pd.read_csv(dataPath)
# check for NaNs
assert not data.isnull().values.any()
F1_features = torch.tensor(data[F1_features_names].values)
R2 = torch.tensor(data[R2_names].values)
V = torch.tensor(data[V_names].values)
C_ort = torch.tensor(data[C_ort_names].values)
C = torch.tensor(data[C_names].values)
a,b,c = torch.split(R2,[1,1,1],dim=1)
R2_transposed = torch.cat((-a,b,c),dim=1)
unrotatedlabelTensor = direct_rotate(C,R2_transposed)
F1_features_scaling = Normalization(F1_features,F1_features_types,F1_features_scaling_strategy)
V_scaling = Normalization(V,V_types,V_scaling_strategy)
C_ort_scaling = Normalization(C_ort, C_ort_types,C_ort_scaling_strategy)
C_scaling = Normalization(C,C_types,C_scaling_strategy)
C_hat_scaling = Normalization(unrotatedlabelTensor,C_types,C_hat_scaling_strategy)
# should only be activated if framework is retrained with different dataset
if save_normalization:
with open('src/normalization/F1_features_scaling.pickle', 'wb') as file_:
pickle.dump(F1_features_scaling, file_, -1)
with open('src/normalization/V_scaling.pickle', 'wb') as file_:
pickle.dump(V_scaling, file_, -1)
with open('src/normalization/C_ort_scaling.pickle', 'wb') as file_:
pickle.dump(C_ort_scaling, file_, -1)
with open('src/normalization/C_scaling.pickle', 'wb') as file_:
pickle.dump(C_scaling, file_, -1)
with open('src/normalization/C_hat_scaling.pickle', 'wb') as file_:
pickle.dump(C_hat_scaling, file_, -1)
return F1_features_scaling, C_ort_scaling, C_scaling, V_scaling, C_hat_scaling
def getSavedNormalization():
F1_features_scaling = CPU_Unpickler(open("src/normalization/F1_features_scaling.pickle", "rb", -1)).load()
V_scaling = CPU_Unpickler(open("src/normalization/V_scaling.pickle", "rb", -1)).load()
C_ort_scaling = CPU_Unpickler(open("src/normalization/C_ort_scaling.pickle", "rb", -1)).load()
C_scaling = CPU_Unpickler(open("src/normalization/C_scaling.pickle", "rb", -1)).load()
C_hat_scaling = CPU_Unpickler(open("src/normalization/C_hat_scaling.pickle", "rb", -1)).load()
return F1_features_scaling, C_ort_scaling, C_scaling, V_scaling, C_hat_scaling
def getDataset(F1_features_scaling, V_scaling, C_ort_scaling, C_scaling):
data = pd.read_csv(dataPath)
print('Data: ',data.shape)
# check for NaNs
assert not data.isnull().values.any()
F1_features = torch.tensor(data[F1_features_names].values)
R1 = torch.tensor(data[R1_names].values)
R2 = torch.tensor(data[R2_names].values)
V = torch.tensor(data[V_names].values)
C_ort = torch.tensor(data[C_ort_names].values)
C = torch.tensor(data[C_names].values)
F1_features = F1_features_scaling.normalize(F1_features)
V = V_scaling.normalize(V)
C_ort = C_ort_scaling.normalize(C_ort)
C = C_scaling.normalize(C)
dataset = TensorDataset(F1_features.float(), R1.float(), V.float(), R2.float(), C_ort.float(), C.float())
l1 = round(len(dataset)*traintest_split)
l2 = len(dataset) - l1
print('train/test: ',[l1,l2],'\n\n')
train_set, test_set = torch.utils.data.random_split(dataset, [l1,l2], generator=torch.Generator().manual_seed(42))
return train_set, test_set
def getDataset_pred(C_scaling,E,dataPath_pred):
data = pd.read_csv(dataPath_pred)
print('Data: ',data.shape)
# check for NaNs
assert not data.isnull().values.any()
C = torch.tensor(data[C_names].values)
# normalize stiffness by Young's modulus of base material
C = torch.div(C,E)
C = C_scaling.normalize(C)
dataset = C.float()
return dataset
```
#### File: InverseTrussDesign/src/model_utils.py
```python
import torch
import torch.nn.functional as F
from train_parameters import *
from src.voigt_rotation import *
import pickle, io
# unpickle object also with a CPU-only machine, see issue: https://github.com/pytorch/pytorch/issues/16797
class CPU_Unpickler(pickle.Unpickler):
def find_class(self, module, name):
if module == 'torch.storage' and name == '_load_from_bytes':
return lambda b: torch.load(io.BytesIO(b), map_location='cpu')
else: return super().find_class(module, name)
def getActivation(activ):
if(activ == 'relu'):
sigma = torch.nn.ReLU()
elif(activ == 'tanh'):
sigma = torch.nn.Tanh()
elif(activ == 'sigmoid'):
sigma = torch.nn.Sigmoid()
elif(activ == 'leaky'):
sigma = torch.nn.LeakyReLU()
elif(activ == 'softplus'):
sigma = torch.nn.Softplus()
elif(activ == 'logsigmoid'):
sigma = torch.nn.LogSigmoid()
elif(activ == 'elu'):
sigma = torch.nn.ELU()
elif(activ == 'gelu'):
sigma = torch.nn.GELU()
elif(activ == 'none'):
sigma = torch.nn.Identity()
else:
raise ValueError('Incorrect activation function')
return sigma
def createNN(inputDim,arch,outputDim,bias=True):
model = torch.nn.Sequential()
currDim = inputDim
layerCount = 1
activCount = 1
for i in range(len(arch)):
if(type(arch[i]) == int):
model.add_module('layer '+str(layerCount),torch.nn.Linear(currDim,arch[i],bias=bias))
currDim = arch[i]
layerCount += 1
elif(type(arch[i]) == str):
model.add_module('activ '+str(activCount),getActivation(arch[i]))
activCount += 1
model.add_module('layer '+str(layerCount),torch.nn.Linear(currDim,outputDim,bias=bias))
return model
def softmax(input, t):
return F.log_softmax(input/t, dim=1)
def gumbel(input, t):
return F.gumbel_softmax(input, tau=t, hard=True, eps=1e-10, dim=1)
def assemble_F2_features(C_ort,R1,V,C_ort_scaling,method=None):
# scale C_ort to its original range
C_ort_unscaled = C_ort_scaling.unnormalize(C_ort)
# rotate C_ort (directly in Voigt notation)
C_tilde = direct_rotate(C_ort_unscaled,R1,orthotropic=True,method=method)
return torch.cat((C_tilde,V),dim=1)
def invModel_output(G1,G2,input,t,activation):
# continuous params: [stretch1, stretch2, stretch3, rot_stretch1, rot_stretch2, rot_stretch3, theta, rot_ax1, rot_ax2]
topology1,topology2,topology3,rep1,rep2,rep3 = torch.split(G1(input), [7,7,7,2,2,2], dim=1)
m = getActivation('sigmoid')
if(activation == 'one-hot'):
# enforce one-hot encoding by small temperature
t = 1.e-6
if(activation == 'softmax' or activation == 'one-hot'):
topology = torch.cat((softmax(topology1,t),softmax(topology2,t),softmax(topology3,t),softmax(rep1,t),softmax(rep2,t),softmax(rep3,t)), dim=1)
elif(activation == 'gumbel'):
topology1,topology2,topology3,rep1,rep2,rep3 = softmax(topology1,t),softmax(topology2,t),softmax(topology3,t),softmax(rep1,t),softmax(rep2,t),softmax(rep3,t)
topology = torch.cat((gumbel(topology1,t),gumbel(topology2,t),gumbel(topology3,t),gumbel(rep1,t),gumbel(rep2,t),gumbel(rep3,t)), dim=1)
else:
raise ValueError('Incorrect activation function')
features = torch.cat((topology, input), dim=1)
rho_U, V, rot1, rot2 = torch.split(G2(features), [4,3,6,6], dim=1)
# scale to [0,1] using sigmoid
rho_U, V = m(rho_U), m(V)
return rho_U, V, rot1, rot2, topology
def rotate_C(C_in,R,C_in_scaling,C_out_scaling,method=None):
temp = C_in_scaling.unnormalize(C_in)
temp = direct_rotate(temp,R,method=method)
C = C_out_scaling.normalize(temp)
return C
```
#### File: InverseTrussDesign/src/normalization.py
```python
import torch
import torch.nn.functional as F
import numpy as np
from train_parameters import *
class Normalization:
def __init__(self,data,dataType,strategy):
self.mu = torch.mean(data,dim=0)
self.std = torch.std(data,dim=0)
self.min = torch.min(data,dim=0)[0]
self.max = torch.max(data,dim=0)[0]
self.globalmin = torch.min(data)
self.globalmax = torch.max(data)
self.dataType = dataType
self.cols = data.size()[1]
self.strategy = strategy
def normalize(self, data):
list_index_cat = []
temp = torch.zeros(data.shape,device=data.device)
for i in range(0, self.cols):
if self.dataType[i] == 'continuous':
if(self.strategy == 'min-max-1'):
#scale to [0,1]
temp[:,i] = torch.div(data[:,i]-self.min[i], self.max[i]-self.min[i])
elif(self.strategy == 'global-min-max-1'):
#scale to [-1,1] based on min max of full dataset
temp[:,i] = torch.div(data[:,i]-self.globalmin, self.globalmax-self.globalmin)
elif(self.strategy == 'min-max-2'):
#scale to [-1,1]
temp[:,i] = 2.*torch.div(data[:,i]-self.min[i], self.max[i]-self.min[i])-1.
elif(self.strategy == 'global-min-max-2'):
#scale to [-1,1] based on min max of full dataset
temp[:,i] = 2.*torch.div(data[:,i]-self.globalmin, self.globalmax-self.globalmin)-1.
elif(self.strategy == 'mean-std'):
#scale s.t. mean=0, std=1
temp[:,i] = torch.div(data[:,i]-self.mu[i], self.std[i])
elif (self.strategy == 'none'):
temp[:,i] = data[:,i]
else:
raise ValueError('Incorrect normalization strategy')
elif self.dataType[i] == 'categorical':
#convert categorical features into binaries and append at the end of feature tensor
temp = torch.cat((temp,F.one_hot(data[:,i].to(torch.int64))),dim=1)
list_index_cat = np.append(list_index_cat,i)
else:
raise ValueError("Data type must be either continuous or categorical")
# delete original (not one-hot encoded) categorical features
j = 0
for i in np.array(list_index_cat, dtype=np.int64):
temp = torch.cat([temp[:,0:i+j], temp[:,i+1+j:]],dim=1)
j -= 1
return temp
def unnormalize(self, data):
temp = torch.zeros(data.shape,device=data.device)
for i in range(0, self.cols):
if self.dataType[i] == 'continuous':
if(self.strategy == 'min-max-1'):
temp[:,i] = torch.mul(data[:,i], self.max[i]-self.min[i]) +self.min[i]
elif(self.strategy == 'global-min-max-1'):
temp[:,i] = torch.mul(data[:,i], self.globalmax-self.globalmin) +self.globalmin
elif(self.strategy == 'min-max-2'):
temp[:,i] = torch.mul(0.5*data[:,i]+0.5, self.max[i]-self.min[i]) +self.min[i]
elif(self.strategy == 'global-min-max-2'):
temp[:,i] = torch.mul(0.5*data[:,i]+0.5, self.globalmax-self.globalmin) +self.globalmin
elif(self.strategy == 'mean-std'):
temp[:,i] = torch.mul(data[:,i], self.std[i]) + self.mu[i]
elif (self.strategy == 'none'):
temp[:,i] = data[:,i]
else:
raise ValueError('Incorrect normalization strategy')
elif self.dataType[i] == 'categorical':
temp[:,i] = data[:,i]
else:
raise ValueError("Data type must be either continuous or categorical")
return temp
# convert one-hot representation back to categorical integers
def decodeOneHot(data):
type1,type2,type3,rep1,rep2,rep3 = torch.split(data,[7,7,7,2,2,2],dim=1)
# we increment the repetition to convert from binary to [1,2] as defined in the publication
type1,type2,type3,rep1,rep2,rep3 = torch.argmax(type1, dim=1),torch.argmax(type2,dim=1),torch.argmax(type3,dim=1),torch.argmax(rep1,dim=1)+1,torch.argmax(rep2,dim=1)+1,torch.argmax(rep3, dim=1)+1
types = torch.stack((type1,type2,type3),dim=1)
reps = torch.stack((rep1,rep2,rep3),dim=1)
# sort by lattice number
sorted_types, indices = torch.sort(types)
sorted_reps = smart_sort(reps, indices)
# sort by repetitions if lattice numbers are equal
for i in range(data.size()[0]):
if sorted_types[i,0] == sorted_types[i,1] and sorted_types[i,1] == sorted_types[i,2]:
sorted_reps[i,:] = torch.sort(sorted_reps[i,:])[0]
elif sorted_types[i,0] == sorted_types[i,1]:
sorted_reps[i,0:2] = torch.sort(sorted_reps[i,0:2])[0]
elif sorted_types[i,1] == sorted_types[i,2]:
sorted_reps[i,1:3] = torch.sort(sorted_reps[i,1:3])[0]
else:
pass
return torch.cat((sorted_types,sorted_reps), dim=1)
def smart_sort(x, permutation):
d1, d2 = x.size()
ret = x[
torch.arange(d1).unsqueeze(1).repeat((1, d2)).flatten(),
permutation.flatten()
].view(d1, d2)
return ret
``` |
{
"source": "jhbdream/PyQt-Image-Viewer",
"score": 3
} |
#### File: jhbdream/PyQt-Image-Viewer/main.py
```python
from PyQt4 import QtCore, QtGui, uic
from actions import ImageViewer
import sys, os
import cv2
gui = uic.loadUiType("main.ui")[0] # load UI file designed in Qt Designer
VALID_FORMAT = ('.BMP', '.GIF', '.JPG', '.JPEG', '.PNG', '.PBM', '.PGM', '.PPM', '.TIFF', '.XBM') # Image formats supported by Qt
def getImages(folder):
''' Get the names and paths of all the images in a directory. '''
image_list = []
if os.path.isdir(folder):
for file in os.listdir(folder):
print file
if file.upper().endswith(VALID_FORMAT):
im_path = os.path.join(folder, file)
#im_path = unicode(im_path, "utf-8")
image_obj = {'name': file, 'path': im_path }
image_list.append(image_obj)
return image_list
class Iwindow(QtGui.QMainWindow, gui):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.cntr, self.numImages = -1, -1 # self.cntr have the info of which image is selected/displayed
self.image_viewer = ImageViewer(self.qlabel_image)
self.__connectEvents()
self.showMaximized()
def __connectEvents(self):
self.open_folder.clicked.connect(self.selectDir)
self.next_im.clicked.connect(self.nextImg)
self.prev_im.clicked.connect(self.prevImg)
self.qlist_images.itemClicked.connect(self.item_click)
self.save_im.clicked.connect(self.saveImg)
self.zoom_plus.clicked.connect(self.zoomPlus)
self.zoom_minus.clicked.connect(self.zoomMinus)
self.reset_zoom.clicked.connect(self.resetZoom)
self.toggle_line.toggled.connect(self.action_line)
self.toggle_rect.toggled.connect(self.action_rect)
self.toggle_move.toggled.connect(self.action_move)
self.undo.clicked.connect(self.image_viewer.funundo)
self.clear_all.clicked.connect(self.action_clear_all)
self.redo.clicked.connect(self.action_redo)
def selectDir(self):
''' Select a directory, make list of images in it and display the first image in the list. '''
# open 'select folder' dialog box
self.folder = unicode(QtGui.QFileDialog.getExistingDirectory(self, "Select Directory"))
print self.folder
if not self.folder:
QtGui.QMessageBox.warning(self, 'No Folder Selected', 'Please select a valid Folder')
return
self.logs = getImages(self.folder)
self.numImages = len(self.logs)
# make qitems of the image names
self.items = [QtGui.QListWidgetItem(log['name']) for log in self.logs]
for item in self.items:
self.qlist_images.addItem(item)
# display first image and enable Pan
self.cntr = 0
self.image_viewer.enablePan(False)
self.image_viewer.loadImage(self.logs[self.cntr]['path'])
self.qlist_images.setItemSelected(self.items[self.cntr], True)
# enable the next image button on the gui if multiple images are loaded
if self.numImages > 1:
self.next_im.setEnabled(True)
def resizeEvent(self, evt):
if self.cntr >= 0:
self.image_viewer.onResize()
def nextImg(self):
self.qlabel_image.setCursor(QtCore.Qt.CrossCursor)
self.image_viewer.funmode(4)
self.image_viewer.score="A"
self.image_viewer.enablePan(True)
def prevImg(self):
self.qlabel_image.setCursor(QtCore.Qt.CrossCursor)
self.image_viewer.funmode(4)
self.image_viewer.score="A+"
self.image_viewer.enablePan(True)
def zoomPlus(self):
self.qlabel_image.setCursor(QtCore.Qt.CrossCursor)
self.image_viewer.funmode(4)
self.image_viewer.score="A-"
self.image_viewer.enablePan(True)
def zoomMinus(self):
self.qlabel_image.setCursor(QtCore.Qt.CrossCursor)
self.image_viewer.funmode(4)
self.image_viewer.score="C"
self.image_viewer.enablePan(True)
def resetZoom(self):
self.qlabel_image.setCursor(QtCore.Qt.CrossCursor)
self.image_viewer.funmode(4)
self.image_viewer.score="B"
self.image_viewer.enablePan(True)
def saveImg(self):
#cv2.imwrite(self.logs[self.cntr]['path'],self.image_viewer.cvimage)
print self.logs[self.cntr]['path']
path = self.logs[self.cntr]['path'].split('.')
newpath = path[0] + "OK." + path[1]
print newpath
SAVEIMG = cv2.cvtColor(self.image_viewer.cvimage, cv2.COLOR_RGB2BGR)
#cv2.imwrite(newpath,SAVEIMG)
cv2.imencode('.jpg',SAVEIMG)[1].tofile(newpath)#保存图片
def item_click(self, item):
self.cntr = self.items.index(item)
self.image_viewer.loadImage(self.logs[self.cntr]['path'])
def action_line(self):
if self.toggle_line.isChecked():
self.qlabel_image.setCursor(QtCore.Qt.CrossCursor)
self.image_viewer.funmode(1)
self.image_viewer.enablePan(True)
def action_rect(self):
if self.toggle_rect.isChecked():
self.qlabel_image.setCursor(QtCore.Qt.CrossCursor)
self.image_viewer.funmode(2)
self.image_viewer.enablePan(True)
def action_move(self):
if self.toggle_move.isChecked():
self.qlabel_image.setCursor(QtCore.Qt.OpenHandCursor)
self.image_viewer.funmode(3)
self.image_viewer.enablePan(True)
def action_undo(self):
self.image_viewer.funundo()
def action_clear_all(self):
self.image_viewer.clear_all()
def action_redo(self):
self.image_viewer.funredo()
def main():
app = QtGui.QApplication(sys.argv)
app.setStyle(QtGui.QStyleFactory.create("Cleanlooks"))
app.setPalette(QtGui.QApplication.style().standardPalette())
parentWindow = Iwindow(None)
sys.exit(app.exec_())
if __name__ == "__main__":
print __doc__
main()
``` |
{
"source": "jhbez/focus",
"score": 2
} |
#### File: habit/controller/habitCtl.py
```python
from flask import render_template, g
from flask_mail import Message
class HabitCtl:
def __init__(self):
pass
@staticmethod
def index():
return render_template('habit/index.html')
@staticmethod
def reminder(mail, params={}):
print params
_template = render_template('habit/reminder.html',
user_name=params.get('user_name', '').decode('utf-8'),
habit_name=params.get('habit_name', ''))
try:
msg = Message(
sender=("Focus", "<EMAIL>"),
recipients=[params.get('user_email', False)],
subject="{} :)".format(params.get('habit_name', '').encode('utf-8')).upper()
)
msg.html = _template
mail.send(msg)
except Exception as e:
print e.message, e.args
```
#### File: project/model/projectCommentMdl.py
```python
from flask import g
class ProjectCommentMdl:
_table = 'project_comments'
_fields = {
u'comment': {
'typeof': 'string',
'required': True,
},
u'resource': {
'typeof': 'string',
'required': True,
},
u'resource_id': {
'required': True,
'typeof': 'int'
},
}
def __init__(self):
pass
def get_comment_by_resource(self, resource, resource_id):
_qrys_comment_by_resource = """
select u.name user_name, u.cover user_cover, pc.created_at, pc.comment
from project_comments pc inner join users u on pc.create_id = u.id
where pc.deleted_at is null and pc.resource = '{}' and pc.resource_id = {}
order by pc.created_at desc
""".format(resource, resource_id)
g.db_conn.execute(_qrys_comment_by_resource)
_collection_comments = []
if g.db_conn.count() > 0:
for user_name, user_cover, created_at, comment in g.db_conn.fetch():
_collection_comments.append(
{'user_name': user_name,
'user_cover': user_cover,
'created_at': created_at,
'comment': comment})
return _collection_comments
```
#### File: project/model/projectTaskMdl.py
```python
class ProjectTaskMdl:
_table = 'project_tasks'
_fields = {
u'name': {
'required': True,
'length': {'min': 3},
'typeof': 'str'
},
u'description': {
'length': {'min': 3},
'typeof': 'str'
},
u'project_id': {
'required': True,
'typeof': 'int'
},
# u'parent_id': {
# 'typeof': 'int'
# },
u'start_date_at': {
'typeof': 'date'
},
u'due_date_at': {
'typeof': 'date'
},
u'completed_at': {
'typeof': 'date'
},
}
def __init__(self):
pass
_query_get = """
SELECT array_to_json(array_agg(row_to_json(t) )) as collection FROM (
SELECT id,project_id, parent_id, name, description, start_date_at, due_date_at, completed_at
FROM project_tasks %s
)t;
"""
```
#### File: reminder/controllers/reminderCtl.py
```python
from flask_restful import Resource
from flask import request, g
from v.tools.exception import ExceptionRest
from v.tools.v import processing_rest_exception, processing_rest_success, type_of_insert_rest, type_of_update_rest
from v.tools.validate import validate_rest
from v.reminder.models.reminderMdl import ReminderMdl
from flask_babel import _
import json
class ReminderCtl(Resource, ReminderMdl):
def put(self, reminder_id):
_request = request.json
try:
_params = _request.get('params', None)
val_params = ''
if _params:
del _request['params']
val_params = " params=cast('{}' as json), ".format(json.dumps(_params))
_errors = validate_rest(fields=self._fields, request=_request, method="put")
if not _errors:
_val = type_of_update_rest(self._fields, _request)
_qrp = "UPDATE {} SET {} last_datetime_notify=NULL, {} WHERE id={};".format(self._table, val_params, _val, reminder_id)
print _qrp
g.db_conn.execute(_qrp)
if g.db_conn.count() > 0:
_put = processing_rest_success(status_code=200, message=_("The record was successfully updated"))
else:
raise ExceptionRest(status_code=404, message=_("Not found record"))
else:
raise ExceptionRest(status_code=400, errors=_errors)
except (Exception, ExceptionRest), e:
_put = processing_rest_exception(e)
return _put
@staticmethod
def insert(db, params):
try:
qry ="""
INSERT INTO reminder
(
create_id,
resource, resource_id,
every, by, due_date,
email_notify, time_notify
)
VALUES
(
{}, '{}',{},
{},'{}', current_timestamp + INTERVAL ' +20 days',1,'09:30:00'
);
""".format(params.get('create_id', 0), params.get('resource', 'fail'), params.get('resource_id',0),
params.get('every', 0), params.get('by', 'fail'))
db.execute(qry)
except Exception as e:
pass
class ReminderListCtl(Resource, ReminderMdl):
def get(self):
try:
_resource = request.args.get("resource", 'all')
if _resource == 'habit':
_resource_id = request.args.get("resource_id", 0)
_qrg = """
SELECT array_to_json(array_agg(row_to_json(t) )) as collection
FROM ( SELECT * FROM {} WHERE
deleted_at IS NULL and resource='habit' and resource_id ={})t;
""".format(self._table, _resource_id)
g.db_conn.execute(_qrg)
if g.db_conn.count() > 0:
_data = g.db_conn.one()[0]
if _data:
_get = processing_rest_success(data={self._table: _data})
else:
raise ExceptionRest(status_code=404, message=_("Not found record"))
else:
raise ExceptionRest(status_code=404, message=_("Not found record"))
else:
raise ExceptionRest(status_code=400, message=_("Bad request"))
except (Exception, ExceptionRest), e:
_get = processing_rest_exception(e)
return _get
``` |
{
"source": "JhBongard/surfmi",
"score": 3
} |
#### File: surfmi/surfmi/envi_raster.py
```python
import gdal
import numpy as np
import pandas as pd
import re
class EnviRaster:
"""
This class simplifies the handling of Envi raster data. Especially for in- and export using gdal.
In addition, arrays can be addressed and aggregated based on time or layername specifications.
The class instances are:
.header:
- pandas time series dataframe listing "layernames", "date" and "array_id"
.ndarray:
- stores the raster data as one- or multidimensional numpy array
.trans:
- contains information about the georeference (most north-western point & pixel size)
.proj:
- contains information about the projection
"""
def __init__(self, header, ndarray, trans, proj):
self.header = header
self.ndarray = ndarray
self.trans = trans
self.proj = proj
# Getter Functions
# get a layername from array_id
def get_name_fnr(self, array_id):
return self.header.layernames[array_id]
# get array_id from date
def get_nr_fdate(self, date):
return self.header["array_id"][self.header["date"] == date][0]
def get_nr_fname(self, layername):
return self.header["array_id"][self.header["layernames"] == layername][0]
# get array from layername
def get_array_fname(self, layername):
array_id = self.get_nr_fname(layername)
return self.ndarray[:, :, array_id]
# get array from array_id
def get_array_fnr(self, array_id):
return self.ndarray[:, :, array_id]
def import_envi(input_file, na_value=-99):
"""
Function imports Sentinel-1 Envi Data
:param input_file: Path specification to envi file.
:param na_value: Define NoData Value. Default is -99.
:return: Object of class Envi_Raster
"""
# Open and load the array
input_img = gdal.Open(input_file) # Import envi data as array
layers = [] # Get list of arrays
for i in range(1, input_img.RasterCount + 1):
layers.append(input_img.GetRasterBand(i).ReadAsArray())
# Stack the arrays and define NA value
array_stack = np.dstack(layers)
array_stack[array_stack == na_value] = np.nan
# Get layernames from header file
header = open(input_file + ".hdr") # Import Envi header to get the layernames as list
header = header.read()
header = re.split("band names =", header)[1]
# header = re.split("{|}", header)[1]
header = re.split("[{}]", header)[1]
header = header.replace("\n", "").replace(" ", "")
header = re.split(",", header)
header = pd.DataFrame({"layernames": header})
# Create dataframe for aggregated data or percentiles
if "agg" in header.layernames[0][41:44] or "per" in header.layernames[0][37:40]:
header["start_date"] = pd.to_datetime(header.layernames.str[15:23], format="%Y%m%d")
header["end_date"] = pd.to_datetime(header.layernames.str[24:32], format="%Y%m%d")
# add date used for zonal statistics
header["date"] = pd.to_datetime(header.layernames.str[15:23], format="%Y%m%d")
header.index = header["date"]
header["array_id"] = np.arange(len(header))
# Create time-series df for Sentinel-1 or moisture data
else:
header["date"] = pd.to_datetime(header.layernames.str[12:20], format="%Y%m%d")
header.index = header["date"]
header["array_id"] = np.arange(len(header))
return (EnviRaster(header=header,
ndarray=array_stack,
trans=input_img.GetGeoTransform(),
proj=input_img.GetProjection()))
def export_envi(self, outfile, na_value=-99):
"""
Function exports an Envi_Raster object.
:param outfile: Path specification
:return: Envi file in in float 32 format
:param na_value: Define NoData Value. Default is -99.
"""
# Export for one dimensional array
if len(self.ndarray.shape) == 2:
# define rows and columns
[cols, rows] = self.ndarray.shape
# Create file
outdriver = gdal.GetDriverByName("ENVI")
out_data = outdriver.Create(str(outfile), rows, cols, 1, gdal.GDT_Float32)
# Export Data
out_data.GetRasterBand(1).WriteArray(self.ndarray)
out_data.GetRasterBand(1).SetDescription(self.header["layernames"][0])
out_data.GetRasterBand(1).SetNoDataValue(na_value)
# Export for multidimensional arrays
else:
# define rows, columns and amount of layer
[cols, rows, z] = self.ndarray.shape
# Create file
outdriver = gdal.GetDriverByName("ENVI")
out_data = outdriver.Create(str(outfile), rows, cols, z, gdal.GDT_Float32)
# Write the arrays to the file (Different Index of GetRasterbands and array index)
i = 0
while i < len(self.ndarray[1, 1, :]):
out_data.GetRasterBand(i + 1).WriteArray(self.ndarray[:, :, i])
out_data.GetRasterBand(i + 1).SetDescription(self.header["layernames"][i])
out_data.GetRasterBand(i + 1).SetNoDataValue(na_value)
i += 1
# Write the geo-reference
out_data.SetGeoTransform(self.trans)
# Write the projection
out_data.SetProjection(self.proj)
# Close File
out_data = None
print("Exported!")
def percentile(self, percentile, start_date, end_date):
"""
Function calculates a percentile between two points in time of an Envi_Raster.
The resulting band name consists of:
"S1___IW_____VV_{YYYYmmdd}_{YYYYmmdd}_{ccc}_per{n}"
{YYYYmmdd} representing the start and end date for calibration
{ccc} number of scenes used for the calculation
{n} the percentile
:param percentile: Integer between 0-100
:param start_date: String containing a date in "YYYY-mm-dd" - format
:param end_date: String containing a date in "YYYY-mm-dd" - format
:return: Envi_Raster object containing the desired percentile
"""
start_date = pd.to_datetime(start_date)
end_date = pd.to_datetime(end_date)
# Get all S1-dates within the start-end-time span
datesrange = []
for d in self.header["date"]:
if start_date <= d <= end_date:
datesrange.append(d)
# Get the array_id of first and last date within the time span
idx_start = self.header["array_id"][self.header["date"] == datesrange[0]][0]
idx_end = self.header["array_id"][self.header["date"] == datesrange[-1]][0]
# Calculation of the desired percentile using the start/stop index & ignoring nan
perc_array = np.nanpercentile(self.ndarray[:, :, idx_start:idx_end], percentile, axis=2)
# create layername
nr_of_scenes = str(len(datesrange)).zfill(3)
layernames = "S1___IW_____VV_" \
+ str(start_date)[0:10].replace("-", "") + "_" \
+ str(end_date)[0:10].replace("-", "") + "_" \
+ nr_of_scenes \
+ "_per" + str(percentile)
# create header
header = pd.DataFrame({"layernames": [layernames], "start_date": [start_date], "end_date": [end_date]})
header["array_id"] = np.arange(len(header))
return (EnviRaster(header=header,
ndarray=perc_array,
trans=self.trans,
proj=self.proj))
def moisture(self, dry_ref, wet_ref, layername_suffix="_moist"):
"""
Function calculating the relative surface moisture algorithm based on Sentinel-1 Data of class Envi_Raster.
Algorithm based on:
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. & <NAME>.
(2018): Surface Moisture and Vegetation Cover Analysis for Drought Monitoring in the Southern Kruger National
Park Using Sentinel-1, Sentinel-2, and Landsat-8. – Remote Sensing 10, 9, 1482.
:param dry_ref: Lower percentile of class Envi_Raster
:param wet_ref: Upper percentile of class Envi_Raster
:param layername_suffix: Suffix for each layername, default is "_moist"
:return: Envi_Raster object
"""
# Create empty Envi_Raster based on the S1-input
s1_moist = EnviRaster(header=self.header.copy(),
ndarray=np.empty(self.ndarray.shape),
trans=self.trans,
proj=self.proj)
# Add suffix to the layernames
s1_moist.header["layernames"] = s1_moist.header["layernames"].astype(str) + layername_suffix
# Calculate the change detection algorithm
for i in range(0, len(s1_moist.ndarray[1, 1, :])):
s1_moist.ndarray[:, :, i] = ((self.ndarray[:, :, i] - dry_ref.ndarray) / (
wet_ref.ndarray - dry_ref.ndarray)) * 100
# set values lower than 0 to 0
s1_moist.ndarray[:, :, i][s1_moist.ndarray[:, :, i] < 0] = 0
# set values higher than 100 to 100
s1_moist.ndarray[:, :, i][s1_moist.ndarray[:, :, i] > 100] = 100
return s1_moist
def aggregation(self, agg):
"""
Function aggregates the layers by month, quarter or year and calculates the median within each aggregation step
The resulting band name consists of:
"S1___IW_____VV_{YYYY-mm-dd}_{YYYY-mm-dd}_{ccc}_agg_{a}"
{YYYY-mm-dd} representing the start and end date of the specific aggregation step
{ccc} number of scenes used in the aggregation
{a} the aggregation parameter ("M","Q" or "Y")
:param agg: "M", "Q", or "Y" for monthly, quarterly or yearly aggregation
:return: Envi_Raster object
"""
# get sequences containing min & max array_id of each aggregation step
min_idx = self.header.resample(agg).min()["array_id"].dropna()
max_idx = self.header.resample(agg).max()["array_id"].dropna()
# get sequences containing start & end date of each aggregation step using (MS,QS,YS) arguments
agg_start = (self.header.resample(str(agg) + "S").min()["array_id"].dropna()).index
agg_end = (self.header.resample(agg).min()["array_id"].dropna()).index
# count scenes in each aggregation step
count_scenes = self.header.resample(agg).count()["array_id"]
count_scenes = count_scenes[count_scenes != 0]
# Create empty array and dataframe
agg_array = np.empty([self.ndarray.shape[0], self.ndarray.shape[1], len(min_idx)])
layernames = []
# Calculate the median of each aggregation step
for i in range(0, len(min_idx)):
# adressing sm_stack[:,:,235:235] results in NA values
if int(min_idx[i]) == int(max_idx[i]):
agg_array[:, :, i] = self.ndarray[:, :, int(min_idx[i])]
else:
agg_array[:, :, i] = np.nanmedian(self.ndarray[:, :, int(min_idx[i]):int(max_idx[i])], axis=2)
# create a list with layernames
start_date = agg_start[i]
end_date = agg_end[i]
nr_of_scenes = str(count_scenes[i]).zfill(3)
layername = "S1___IW_____VV_" \
+ str(start_date)[0:10].replace("-", "") + "_" \
+ str(end_date)[0:10].replace("-", "") + "_" \
+ nr_of_scenes + \
"_agg_" + agg
layernames.append(layername)
# create the header
header = pd.DataFrame({"layernames": layernames, "start_date": agg_start, "end_date": agg_end})
header["array_id"] = np.arange(len(header))
return (EnviRaster(header=header,
ndarray=agg_array,
trans=self.trans,
proj=self.proj))
``` |
{
"source": "jhbrito/HelloWorlds",
"score": 4
} |
#### File: jhbrito/HelloWorlds/HelloWorldPython.py
```python
print("Hello World")
# Builtin types:
# Boolean Type - bool
# Numeric Types - int, float, complex
# Binary Sequence Types - bytes, bytearray, memoryview
# Sequence Types - list, tuple, range
# Text Sequence Type - str
# Set Types - set, frozenset
# Mapping Types — dict
a = 1
b = 2
c = a + b
print("c=", c)
c = 4
if c == 3:
print("entrei no if")
elif c == 4:
print("entrei no elif")
else:
print("entrei no else")
y = 1
for x in range(10):
print("x =", x)
y = y + x
print("y =", y)
x = 0
y = 0
while x < 10:
print("x =", x)
y = y + x
x = x + 1
print("y =", y)
a = 1
b = 3.1415
c = a + b
a = 0.1
b = 0.2
c = a + b
print(c)
m = "olá"
print(m)
print(m[0:1])
o = "adeus"
p = m + " " + o
print("p:", p)
def f1(x1, x2):
yf1 = x1 * x2
return yf1
q = f1(10, 3)
print("q:", q)
a = (1, 3, 5, 7, 9)
print("a:", a)
print(a[3])
print(a[2:4])
print(a[2:])
# a[1]=10
b = [2, 4, 6, 8, 10]
print("b:", b)
print(b[3])
print(b[2:4])
print(b[2:])
b[1] = 10
c = list(a)
print("c:", c)
print(c[3])
print(c[2:4])
print(c[2:])
c[1] = 12
# sets - unordered collection with no duplicate elements
d = {1, 2, 3, 4, 3, 5}
print(d)
for i in range(10):
if i in d:
print(str(i), " is in d")
# dictionaries
words = dict()
words["BCVT"] = "Brito Computer Vision Team"
words["LEEC"] = "Licenciatura em Engenharia Electrotécnica e de Computadores"
words["EST"] = "Escola Superior de Tecnologia"
words["IPCA"] = "Instituto Politécnico do Cávado e do Ave"
print("BCVT - ", words["BCVT"])
print("Goodbye World")
```
#### File: jhbrito/HelloWorlds/HelloWorldQtOpenCV.py
```python
import sys
from PyQt5 import QtWidgets, uic
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtCore import QTimer, qVersion
import cv2
import numpy as nd
def img2pixmap(image):
height, width, channel = image.shape
bytesPerLine = 3 * width
qimage = QImage(image.data, width, height, bytesPerLine, QImage.Format_BGR888)
pixmap = QPixmap.fromImage(qimage)
return pixmap
def grabFrame():
if not cap.isOpened():
cap.open(0)
window.labelText.setText("Turning Camera ON")
ret, image = cap.read()
# isz=image.shape
# ls = window.labelFrameInput.geometry()
# window.labelFrameInput.setGeometry(10, 10, isz[1], isz[0])
# window.labelFrameOutput.setGeometry(isz[1]+11, 10, isz[1], isz[0])
edges = cv2.Canny(image, 100, 200)
edges2 = nd.zeros( image.shape , nd.uint8)
edges2[:,:,0] = edges
edges2[:,:,1] = edges
edges2[:,:,2] = edges
window.labelFrameInput.setPixmap(img2pixmap(image))
window.labelFrameOutput.setPixmap(img2pixmap(edges2))
def on_cameraON_clicked():
window.labelText.setText("Turning Camera ON")
qtimerFrame.start(50)
def on_cameraOFF_clicked():
qtimerFrame.stop()
if cap.isOpened():
cap.release()
window.labelText.setText("Turning Camera OFF")
print("Qt version: " + str(qVersion()))
print("OpenCV Version:",cv2.__version__)
cap = cv2.VideoCapture()
app = QtWidgets.QApplication(sys.argv)
window = uic.loadUi("qtcv.ui")
window.botaoCameraOn.clicked.connect(on_cameraON_clicked)
window.botaoCameraOff.clicked.connect(on_cameraOFF_clicked)
#window.labelFrameInput.setScaledContents(False)
window.labelFrameInput.setScaledContents(True)
window.labelFrameOutput.setScaledContents(True)
qtimerFrame = QTimer()
qtimerFrame.timeout.connect(grabFrame)
window.show()
app.exec()
``` |
{
"source": "jhbruhn/ble_monitor",
"score": 3
} |
#### File: ble_monitor/ble_parser/ha_ble.py
```python
import logging
import struct
_LOGGER = logging.getLogger(__name__)
def to_int(value):
"""Convert to integer"""
return value & 0xFF
def unsigned_to_signed(unsigned, size):
"""Convert unsigned to signed"""
if (unsigned & (1 << size - 1)) != 0:
unsigned = -1 * ((1 << size - 1) - (unsigned & ((1 << size - 1) - 1)))
return unsigned
def to_sfloat(value):
"""Convert sfloat to integer"""
if len(value) != 2:
_LOGGER.debug("conversion to sfloat failed")
return 0
else:
byte_0 = value[0]
byte_1 = value[1]
mantissa = unsigned_to_signed(to_int(byte_0) + ((to_int(byte_1) & 0x0F) << 8), 12)
exponent = unsigned_to_signed(to_int(byte_1) >> 4, 4)
return mantissa * pow(10, exponent)
def parse_ha_ble(self, service_data_list, source_mac, rssi):
"""Home Assistant BLE parser"""
firmware = "HA BLE"
device_type = "HA BLE DIY"
ha_ble_mac = source_mac
result = {}
packet_id = None
for service_data in service_data_list:
if len(service_data) == service_data[0] + 1:
meas_type = (service_data[3] << 8) | service_data[2]
xobj = service_data[4:]
if meas_type == 0x2A4D and len(xobj) == 1:
(packet_id,) = struct.Struct("<B").unpack(xobj)
result.update({"packet": packet_id})
elif meas_type == 0x2A19 and len(xobj) == 1:
(batt,) = struct.Struct("<B").unpack(xobj)
result.update({"battery": batt})
elif meas_type == 0x2A6D and len(xobj) == 4:
(press,) = struct.Struct("<I").unpack(xobj)
result.update({"pressure": press * 0.001})
elif meas_type == 0x2A6E and len(xobj) == 2:
(temp,) = struct.Struct("<h").unpack(xobj)
result.update({"temperature": temp * 0.01})
elif meas_type == 0x2A6F and len(xobj) == 2:
(humi,) = struct.Struct("<H").unpack(xobj)
result.update({"humidity": humi * 0.01})
elif meas_type == 0x2A7B and len(xobj) == 1:
(dewp,) = struct.Struct("<b").unpack(xobj)
result.update({"dewpoint": dewp})
elif meas_type == 0x2A98 and len(xobj) == 3:
(flag, weight) = struct.Struct("<bH").unpack(xobj)
if flag << 0 == 0:
weight_unit = "kg"
factor = 0.005
elif flag << 0 == 1:
weight_unit = "lbs"
factor = 0.01
else:
weight_unit = "kg"
factor = 0.005
result.update({"weight": weight * factor, "weight unit": weight_unit})
elif meas_type == 0X2AE2 and len(xobj) == 1:
(value,) = struct.Struct("<B").unpack(xobj)
result.update({"binary": bool(value)})
elif meas_type == 0X2AEA and len(xobj) == 2:
(count,) = struct.Struct("<H").unpack(xobj)
if count == 0xFFFF:
count = "unknown"
result.update({"count": count})
elif meas_type == 0X2AEB and len(xobj) == 3:
count = int.from_bytes(xobj, "little")
if count == 0xFFFFFF:
count = "unknown"
result.update({"count": count})
elif meas_type == 0X2AF2 and len(xobj) == 4:
(enrg,) = struct.Struct("<I").unpack(xobj)
result.update({"energy": enrg * 0.001})
elif meas_type == 0X2AFB and len(xobj) == 3:
illu = int.from_bytes(xobj, "little")
result.update({"illuminance": illu * 0.01})
elif meas_type == 0x2B05 and len(xobj) == 3:
power = int.from_bytes(xobj, "little")
result.update({"power": power * 0.1})
elif meas_type == 0x2B18 and len(xobj) == 2:
(volt,) = struct.Struct("<H").unpack(xobj)
result.update({"voltage": volt / 64})
elif meas_type == 0x2BD6 and len(xobj) == 2:
pm25 = to_sfloat(xobj)
result.update({"pm2.5": pm25})
elif meas_type == 0x2BD7 and len(xobj) == 2:
pm10 = to_sfloat(xobj)
result.update({"pm10": pm10})
else:
_LOGGER.debug(
"Unknown data received from Home Assistant BLE DIY sensor device: %s",
service_data.hex()
)
if not result:
if self.report_unknown == "HA BLE":
_LOGGER.info(
"BLE ADV from UNKNOWN Home Assistant BLE DEVICE: RSSI: %s, MAC: %s, ADV: %s",
rssi,
to_mac(source_mac),
service_data_list
)
return None
# Check for duplicate messages
if packet_id:
try:
prev_packet = self.lpacket_ids[ha_ble_mac]
except KeyError:
# start with empty first packet
prev_packet = None
if prev_packet == packet_id:
# only process new messages
if self.filter_duplicates is True:
return None
self.lpacket_ids[ha_ble_mac] = packet_id
else:
result.update({"packet": "no packet id"})
# check for MAC presence in sensor whitelist, if needed
if self.discovery is False and ha_ble_mac not in self.sensor_whitelist:
_LOGGER.debug("Discovery is disabled. MAC: %s is not whitelisted!", to_mac(ha_ble_mac))
return None
result.update({
"rssi": rssi,
"mac": ''.join(f'{i:02X}' for i in ha_ble_mac),
"type": device_type,
"firmware": firmware,
"data": True
})
return result
def to_mac(addr: int):
"""Return formatted MAC address"""
return ':'.join(f'{i:02X}' for i in addr)
``` |
{
"source": "jhc660/TicTacToe_RL",
"score": 3
} |
#### File: jhc660/TicTacToe_RL/DeepTicTacToe_org.py
```python
import random
import csv
import os
from pathlib import Path
from tabulate import tabulate
from abc import abstractmethod
import keras.layers as Kl
import keras.models as Km
import numpy as np
import matplotlib.pyplot as plt
class TicTacToe():
def __init__(self, player1, player2, exp1=1, exp2=1):
self.state = '123456789'
player1 = globals()[player1]
self.player1 = player1(tag='X', exploration_factor=exp1)
player2 = globals()[player2]
self.player2 = player2(tag='O', exploration_factor=exp2)
self.winner = None
self.turn = 'X'
self.player_turn = self.player1
self.Xcount = 0
self.Ocount = 0
self.Tcount = 0
self.all_count = 0
def play_game(self):
if isinstance(self.player1, QAgent):
self.player1.exp_factor = 1
if isinstance(self.player2, QAgent):
self.player2.exp_factor = 1
while self.winner is None:
if type(self.player_turn) == Player:
print(self.turn)
self.print_game()
self.state = self.play_move()
self.game_winner()
if self.winner is not None:
break
self.print_game()
def play_to_learn(self, episodes):
for i in range(episodes):
print('Episode number: ' + str(i))
while self.winner is None:
self.state = self.play_move(learn=True)
self.game_winner()
if self.winner is not None:
break
self.state = self.play_move(learn=True)
self.game_winner()
# update last state
self.state = self.play_move(learn=True)
self.state = self.play_move(learn=True)
# update winning state
self.state = self.play_move(learn=True)
self.state = self.play_move(learn=True)
if i% 500 == 0:
self.print_bar()
print('-------------------')
self.player1.print_value = True
else:
self.player1.print_value = False
if i % 2000 == 0:
self.Xcount = 0
self.Ocount = 0
self.Tcount = 0
self.all_count = i
self.init_game()
self.print_summary()
self.player1.save_values()
self.player2.save_values()
def play_move(self, learn=False):
if self.turn == 'X':
if learn is True:
new_state = self.player1.make_move_and_learn(self.state, self.winner)
else:
new_state = self.player1.make_move(self.state, self.winner)
self.turn = 'O'
self.player_turn = self.player2
else:
if learn is True:
new_state = self.player2.make_move_and_learn(self.state, self.winner)
else:
new_state = self.player2.make_move(self.state, self.winner)
self.turn = 'X'
self.player_turn = self.player1
return new_state
def print_game(self):
s = list(self.state)
print(' {} | {} | {}'.format(s[0], s[1], s[2]))
print(' --------------')
print(' {} | {} | {}'.format(s[3], s[4], s[5]))
print(' --------------')
print(' {} | {} | {}'.format(s[6], s[7], s[8]))
print(' --------------')
print(' --------------')
def game_winner(self):
winner = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 3, 6], [1, 4, 7], [2, 5, 8], [0, 4, 8], [2, 4, 6]]
for line in winner:
s = self.state[line[0]] + self.state[line[1]] + self.state[line[2]]
if s == 'XXX':
self.winner = 'X'
break
elif s == 'OOO':
self.winner = 'O'
break
elif not any(s.isnumeric() for s in list(self.state)):
self.winner = 'No winner'
self.check_winner()
return self.winner
def check_winner(self):
if self.winner == 'X':
self.Xcount += 1
# print('The winner is X')
# print('')
# self.print_game()
elif self.winner == 'O':
self.Ocount += 1
# print('The winner is O')
# print('')
# self.print_game()
elif self.winner == 'No winner':
self.Tcount += 1
# print('No winner')
# print('')
# self.print_game()
def init_game(self):
self.state = '123456789'
self.winner = None
self.turn = 'X'
self.player_turn = self.player1
def print_bar(self):
plt.close()
fig = plt.figure()
ax1 = fig.add_subplot(2, 1, 1)
ax2 = fig.add_subplot(2, 1, 2)
x = ['X', 'Tie', 'O', 'Sum']
a = self.Xcount
b = self.Tcount
c = self.Ocount
d = self.all_count
aprec = 100*a / (a + b + c + 1)
bprec = 100*b / (a + b + c + 1)
cprec = 100*c / (a + b + c + 1)
ax1.clear()
ax2.clear()
bar1 = ax1.bar(x, [a, b, c, d])
bar1[0].set_color('r')
bar1[1].set_color('b')
ax1.set_ylim((0, d + 100))
plt.draw()
bar2 = ax2.bar(x[0:3], [aprec, bprec, cprec])
bar2[0].set_color('r')
bar2[1].set_color('b')
ax2.set_ylim((0, 100))
for rect in bar2:
height = rect.get_height()
ax2.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
'%d' % int(height),
ha='center', va='bottom')
plt.draw()
plt.pause(0.05)
def print_summary(self):
a = ['X', self.Xcount, 100 * self.Xcount / (self.Xcount + self.Ocount + self.Tcount)]
b = ['O', self.Ocount, 100 * self.Ocount / (self.Xcount + self.Ocount + self.Tcount)]
c = ['Tie', self.Tcount, 100 * self.Tcount / (self.Xcount + self.Ocount + self.Tcount)]
tab = tabulate([a, b, c], headers=['Player', 'num of wins', 'prec'])
print(tab)
class Player():
def __init__(self, tag, exploration_factor=1):
self.tag = tag
self.print_value = False
self.exp_factor = exploration_factor
def make_move(self, state, winner):
idx = int(input('Choose move number: '))
s = state[:idx-1] + self.tag + state[idx:]
return s
class Agent(Player):
def __init__(self, tag, exploration_factor=1):
super().__init__(tag, exploration_factor)
self.epsilon = 0.1
self.alpha = 0.5
self.prev_state = '123456789'
self.state = None
self.print_value = False
if self.tag == 'X':
self.op_tag = 'O'
else:
self.op_tag = 'X'
@abstractmethod
def calc_value(self, state):
pass
@abstractmethod
def learn_state(self, state, winner):
pass
def make_move(self, state, winner):
self.state = state
if winner is not None:
new_state = state
return new_state
p = random.uniform(0, 1)
if p < self.exp_factor:
new_state = self.make_optimal_move(state)
else:
moves = [s for s, v in enumerate(state) if v.isnumeric()]
idx = random.choice(moves)
new_state = state[:idx] + self.tag + state[idx + 1:]
return new_state
def make_move_and_learn(self, state, winner):
self.learn_state(state, winner)
return self.make_move(state, winner)
def make_optimal_move(self, state):
moves = [s for s, v in enumerate(state) if v.isnumeric()]
if len(moves) == 1:
temp_state = state[:moves[0]] + self.tag + state[moves[0] + 1:]
new_state = temp_state
return new_state
temp_state_list = []
v = -float('Inf')
for idx in moves:
v_temp = []
temp_state = state[:idx] + self.tag + state[idx + 1:]
moves_op = [s for s, v in enumerate(temp_state) if v.isnumeric()]
for idy in moves_op:
temp_state_op = temp_state[:idy] + self.op_tag + temp_state[idy + 1:]
v_temp.append(self.calc_value(temp_state_op))
# delets Nones
v_temp = list(filter(None.__ne__, v_temp))
if len(v_temp) != 0:
v_temp = np.min(v_temp)
else:
# encourage exploration
v_temp = 1
if v_temp > v:
temp_state_list = [temp_state]
v = v_temp
elif v_temp == v:
temp_state_list.append(temp_state)
try:
new_state = random.choice(temp_state_list)
except ValueError:
print('temp state:', temp_state_list)
raise Exception('temp state empty')
return new_state
def reward(self, winner):
if winner is self.tag:
R = 1
elif winner is None:
R = 0
elif winner == 'No winner':
R = 0.5
else:
R = -1
return R
class QAgent(Agent):
def __init__(self, tag, exploration_factor=1):
super().__init__(tag, exploration_factor)
self.tag = tag
self.values = dict()
self.load_values()
def learn_state(self, state, winner):
if self.tag in state:
if self.prev_state in self.values.keys():
v_s = self.values[self.prev_state]
else:
v_s = int(0)
R = self.reward(winner)
if self.state in self.values.keys() and winner is None:
v_s_tag = self.values[state]
else:
v_s_tag = int(0)
self.values[self.prev_state] = v_s + self.alpha*(R + v_s_tag - v_s)
self.prev_state = state
def calc_value(self, state):
if state in self.values.keys():
return self.values[state]
def load_values(self):
s = 'values' + self.tag + '.csv'
try:
value_csv = csv.reader(open(s, 'r'))
for row in value_csv:
k, v = row
self.values[k] = float(v)
except:
pass
# print(self.values)
def save_values(self):
s = 'values' + self.tag + '.csv'
try:
os.remove(s)
except:
pass
a = csv.writer(open(s, 'a'))
for v, k in self.values.items():
a.writerow([v, k])
class DeepAgent(Agent):
def __init__(self, tag, exploration_factor=1):
super().__init__(tag, exploration_factor)
self.tag = tag
self.value_model = self.load_model()
@staticmethod
def state2array(state):
num_state = []
for s in state:
if s == 'X':
num_state.append(1)
elif s == 'O':
num_state.append(-1)
else:
num_state.append(0)
num_state = np.array([num_state])
return num_state
def learn_state(self, state, winner):
target = self.calc_target(state, winner)
self.train_model(target, 10)
self.prev_state = state
def load_model(self):
s = 'model_values' + self.tag + '.h5'
model_file = Path(s)
if model_file.is_file():
model = Km.load_model(s)
print('load model: ' + s)
else:
print('new model')
model = Km.Sequential()
model.add(Kl.Dense(18, activation='relu', input_dim=9))
model.add(Kl.Dense(18, activation='relu'))
model.add(Kl.Dense(1, activation='linear'))
model.compile(optimizer='adam', loss='mean_absolute_error', metrics=['accuracy'])
model.summary()
return model
def calc_value(self, state):
return self.value_model.predict(self.state2array(state))
def calc_target(self, state, winner):
if self.tag in state:
v_s = self.calc_value(self.prev_state)
R = self.reward(winner)
if winner is None:
v_s_tag = self.calc_value(state)
else:
v_s_tag = 0
target = np.array(v_s + self.alpha * (R + v_s_tag - v_s))
return target
def train_model(self, target, epochs):
X_train = self.state2array(self.prev_state)
if target is not None:
self.value_model.fit(X_train, target, epochs=epochs, verbose=0)
def save_values(self):
s = 'model_values' + self.tag + '.h5'
try:
os.remove(s)
except:
pass
self.value_model.save(s)
def check_player():
#print('QAgent X 1 and QAgent 1 0')
#game = TicTacToe('QAgent', 'QAgent', 1, 0)
#game.play_to_learn(1000)
#print('DeepAgent X 0.8 and DeepAgent 0.8')
game = TicTacToe('DeepAgent', 'DeepAgent', 1, 1)
game.play_to_learn(100)
#print('DeepAgent X 0 and QAgent 1, 0')
#game = TicTacToe('Player', 'DeepAgent', 0.8, 1)
#game.play_game()
check_player()
```
#### File: jhc660/TicTacToe_RL/TokyoBoard.py
```python
import Board
import cardDeck
class TokyoBoard(Board.Board):
def __init__(self):
super().__init__()
self.trainLines.append(Board.TrainLine(2,2))
self.trainLines[0].addStations(self.stations, 11)
self.trainLines.append(Board.TrainLine(2,4))
self.trainLines[1].addStation(self.trainLines[0].stations[0])
self.trainLines[1].addStation(self.trainLines[0].stations[1])
self.trainLines[1].addStation(self.trainLines[0].stations[2])
self.trainLines[1].addStation(self.trainLines[0].stations[3])
self.trainLines[1].addStation(self.trainLines[0].stations[4])
self.trainLines[1].addStation(self.trainLines[0].stations[5])
self.trainLines[1].addStations(self.stations, 8)
self.trainLines.append(Board.TrainLine(3,7))
self.trainLines[2].addStations(self.stations, 3)
self.trainLines[2].addStation(self.trainLines[1].stations[7])
self.trainLines[2].addStations(self.stations, 10)
self.trainLines.append(Board.TrainLine(3,5))
self.trainLines[3].addStations(self.stations, 4)
self.trainLines[3].addStation(self.trainLines[0].stations[8])
self.trainLines[3].addStations(self.stations, 1)
self.trainLines[3].addStation(self.trainLines[1].stations[9])
self.trainLines[3].addStations(self.stations, 3)
self.trainLines[3].addStation(self.trainLines[2].stations[5])
self.trainLines[3].addStations(self.stations, 4)
self.trainLines[3].addStation(self.trainLines[0].stations[5])
self.trainLines.append(Board.TrainLine(3,4))
self.trainLines[4].addStations(self.stations, 1)
self.trainLines[4].addStation(self.trainLines[0].stations[9])
self.trainLines[4].addStations(self.stations, 2)
self.trainLines[4].addStation(self.trainLines[3].stations[7])
self.trainLines[4].addStation(self.trainLines[3].stations[8])
self.trainLines[4].addStation(self.trainLines[1].stations[10])
self.trainLines[4].addStation(self.trainLines[2].stations[5])
self.trainLines[4].addStation(self.trainLines[3].stations[11])
self.trainLines[4].addStations(self.stations, 5)
self.trainLines.append(Board.TrainLine(2,4))
self.trainLines[5].addStation(self.trainLines[0].stations[10])
self.trainLines[5].addStation(self.trainLines[4].stations[2])
self.trainLines[5].addStations(self.stations, 1)
self.trainLines[5].addStation(self.trainLines[1].stations[9])
self.trainLines[5].addStation(self.trainLines[2].stations[4])
self.trainLines[5].addStations(self.stations, 1)
self.trainLines[5].addStation(self.trainLines[2].stations[5])
self.trainLines[5].addStations(self.stations, 4)
self.trainLines.append(Board.TrainLine(2,5))
self.trainLines[6].addStation(self.trainLines[5].stations[0])
self.trainLines[6].addStation(self.trainLines[5].stations[1])
self.trainLines[6].addStation(self.trainLines[5].stations[2])
self.trainLines[6].addStation(self.trainLines[5].stations[3])
self.trainLines[6].addStation(self.trainLines[3].stations[7])
self.trainLines[6].addStations(self.stations, 1)
self.trainLines[6].addStation(self.trainLines[3].stations[9])
self.trainLines[6].addStation(self.trainLines[2].stations[6])
self.trainLines[6].addStation(self.trainLines[5].stations[7])
self.trainLines[6].addStations(self.stations, 4)
self.trainLines.append(Board.TrainLine(3,6))
self.trainLines[7].addStations(self.stations, 3)
self.trainLines[7].addStation(self.trainLines[3].stations[8])
self.trainLines[7].addStation(self.trainLines[1].stations[10])
self.trainLines[7].addStation(self.trainLines[3].stations[9])
self.trainLines[7].addStations(self.stations, 2)
self.trainLines[7].addStation(self.trainLines[2].stations[7])
self.trainLines[7].addStations(self.stations, 1)
self.trainLines[7].addStation(self.trainLines[6].stations[9])
self.trainLines[7].addStation(self.trainLines[6].stations[10])
self.trainLines[7].addStations(self.stations, 1)
self.trainLines[7].addStation(self.trainLines[4].stations[12])
self.trainLines.append(Board.TrainLine(3,4))
self.trainLines[8].addStations(self.stations, 3)
self.trainLines[8].addStation(self.trainLines[3].stations[7])
self.trainLines[8].addStation(self.trainLines[3].stations[6])
self.trainLines[8].addStation(self.trainLines[3].stations[5])
self.trainLines[8].addStation(self.trainLines[1].stations[8])
self.trainLines[8].addStation(self.trainLines[1].stations[7])
self.trainLines[8].addStation(self.trainLines[3].stations[13])
self.trainLines[8].addStations(self.stations, 4)
def testRoutine():
tokyoBoardTest = TokyoBoard()
cardDeckTest = cardDeck.CardDeck()
print(tokyoBoardTest.getState())
tokyoBoardTest.makeMove(4, '6')
tokyoBoardTest.makeMove(5, 'c2')
tokyoBoardTest.makeMove(5, 's')
tokyoBoardTest.makeMove(4, '6')
tokyoBoardTest.makeMove(4, '6')
print(tokyoBoardTest)
print(tokyoBoardTest.getState())
print('Points: '+str(tokyoBoardTest.calculateScore()))
print(tokyoBoardTest.getValidMoves())
print(tokyoBoardTest.previewMove(1, '6'))
print(tokyoBoardTest)
print(tokyoBoardTest.getState()+cardDeckTest.getState())
testRoutine()
``` |
{
"source": "jhcgt4869/gugua_helps",
"score": 2
} |
#### File: jhcgt4869/gugua_helps/test.py
```python
import os
import asyncio
import paddle
import paddlehub as hub
from wechaty import (
Contact,
FileBox,
Message,
Wechaty,
ScanStatus,
)
from PIL import Image
import paddle.nn
import numpy as np
from paddle.vision.models import resnet101
import time
os.environ['WECHATY_PUPPET_SERVICE_TOKEN'] = '<PASSWORD>'
# reply自动回复话术合集
reply = [
'''嗨~\n你好啊这里是“高质量”七夕孤寡小助手,很高兴为你服务。
小助手为你准备了以下3个高质量服务
1、很直很直的表白藏头诗— —回复[藏头诗]参与
2、“骚话语录”小助手教你说情话— —回复[情话]参与
3、女友死亡问答“我的口红是什么……”之口红种类识别— —回复[口红]参与
回复[帮助]即可获得小助手教学指南~~~
好消息!好消息!小助手给单身狗们准备了七夕大礼包!!!回复[大礼包]领取!''',
'欢迎使用~七夕藏头诗板块\n直接回复你要的藏的内容即可~~~\n格式如下:1+回复内容\n例如:回复“我喜欢你”就输入1我喜欢你\n(目前支持4-8个字如果超'
'出会自动截断)',
'哈喽!欢迎你找到宝藏内容,请开一个开头,小助手教你说情话~~~\n格式如下:2+回复内容\n例如:回复“我喜欢你”就输入2我喜欢你',
'''嗷呜~~~\n你还在为女友的灵魂发问而烦恼嘛?你还在为不知道女友口红是什么而烦恼嘛?小助手祝你一臂之力!
把女友口红照片发给小助手,小助手帮你识别女友的口红类型!\n回复— —[口红品牌]对目前支持查询的口红品牌进行查看\n回复— —[口红明细]查看具体的口红''']
helptxt = '''博客地址:https://blog.csdn.net/weixin_45623093/article/details/119484889
AI Stduio地址:https://aistudio.baidu.com/aistudio/projectdetail/2263052'''
khdata1 = '''古驰倾色柔纱润唇膏
古驰倾色丝润唇膏
古驰倾色琉光唇膏
古驰倾色华缎唇膏
古驰倾色绒雾唇膏
古驰倾色星辉唇膏
爱马仕唇妆系列缎光唇膏
阿玛尼「红管」臻致丝传奇绒哑光唇釉
阿玛尼「红黑管」哑光唇膏
阿玛尼「小胖丁」持色凝彩哑光染唇液
阿玛尼「5G」黑管
阿玛尼「黑」漆光迷情唇釉
迪奥烈艳蓝金唇膏
Dior「红管」花芯唇膏
DIOR迪奥魅惑釉唇膏
烈艳蓝金锁色唇釉
圣罗兰纯口红
圣罗兰细管纯口红(小金条)
圣罗兰莹亮纯魅唇膏
圣罗兰细管纯口红(小黑条)
娇兰臻彩宝石唇膏
娇兰亲亲唇膏
娇兰唇蜜
CHILI 小辣椒
魅可清新漆光唇釉
完美日记小细跟口红
完美日记唇彩
完美日记口红
兰蔻唇釉
兰蔻唇膏
娇韵诗丰盈唇膏
香奈儿可可小姐唇膏
CL路铂廷女王权杖(萝卜丁口红)
CL路铂廷女王权杖黑管(萝卜丁口红)
纪梵希小羊皮
纪梵希羊皮唇釉
纪梵希禁忌之吻星云唇膏
3CE细管唇膏
3CE哑光口红
3CE唇泥
3CE三熹玉云朵唇釉
UNNY唇泥
UNNY雾面雪雾花园唇釉
植村秀小黑方唇膏口红
植村秀无色限方管漆光唇釉口红
TOM FORD唇膏
雅诗兰黛口红金管
橘朵哑光唇釉
橘朵小花管唇膏
稚优泉口红
稚优泉无惧幻想绒雾唇釉
稚优泉琉光之镜水光唇釉
稚优泉 绒情迷雾哑光唇釉'''
khdata2 = '''Gucci(古驰)
爱马仕
阿玛尼(Armani)
Dior(奥迪)
YSL(圣罗兰)杨树林
GUerlain(娇兰)
mac(魅可)
完美日记
兰蔻(Lancome)
娇韵诗(clarins)
香奈儿(Chanel)
胡萝卜丁(christianlouboutin)
Givenhy(纪梵希)
3CE
unny
植村秀
<NAME> (TF)
雅诗兰黛(<NAME>)
橘朵(JudydoLL)
稚优泉'''
class MyBot(Wechaty):
async def on_message(self, msg: Message):
talker = msg.talker()
await talker.ready()
if msg.text() == "嗨~":
await talker.say(reply[0])
if msg.text() == "藏头诗":
await talker.say(reply[1])
if msg.text() == "情话":
await talker.say(reply[2])
if msg.text() == "口红":
await talker.say(reply[3])
if msg.text() == "帮助":
await talker.say(helptxt)
if msg.text()[0] == "1":
await talker.say('已经收到你的心意' + msg.text()[1:] + ',正在生产"藏头诗"~~~')
print(msg.text()[1:])
await talker.say(cts(msg.text()[1:]))
if msg.text()[0] == "2":
await talker.say('稍等片刻,小助手马上教你说"情话"~~~')
await talker.say(qh(msg.text()[1:]))
if msg.text() == "口红明细":
await talker.say(khdata1)
if msg.text() == "口红品牌":
await talker.say(khdata2)
if msg.type() == Message.Type.MESSAGE_TYPE_IMAGE:
await talker.say('已收到图像,开始验证')
# 将Message转换为FileBox
file_box_user_image = await msg.to_file_box()
# 获取图片名
img_name = file_box_user_image.name
# 图片保存的路径
img_path = './image/' + img_name
# 将图片保存为本地文件
await file_box_user_image.to_file(file_path=img_path)
await talker.say(kh(img_path))
if msg.text() == "大礼包":
await talker.say("孤寡~孤寡~孤寡~")
time.sleep(3)
await talker.say("祝你七夕孤寡~~~")
time.sleep(4)
await talker.say("你孤寡我孤寡大家一起孤寡寡\n下面是小助手送你的孤寡礼物!")
time.sleep(3)
for i in range(3):
await talker.say("孤寡 孤寡 孤寡 "*50)
time.sleep(2)
await talker.say("七夕节快乐~~~狗粮管够~~~")
def cts(data):
long = len(data)
if long <= 4:
long = 4
else:
long = 8
print(long)
module = hub.Module(name="ernie_gen_acrostic_poetry", line=long, word=7)
results = module.generate(texts=[data], use_gpu=True, beam_width=1)
for result in results:
print(results)
return result[0]
def qh(data):
module = hub.Module(name="ernie_gen_lover_words")
results = module.generate(texts=[data], use_gpu=True, beam_width=1)
for result in results:
return result[0]
def kh(path):
lablelist = {0: '古驰倾色柔纱润唇膏', 1: '古驰倾色丝润唇膏', 2: '古驰倾色琉光唇膏', 3: '古驰倾色华缎唇膏', 4: '古驰倾色绒雾唇膏',
5: '古驰倾色星辉唇膏', 6: '爱马仕唇妆系列缎光唇膏', 7: '阿玛尼红管臻致丝传奇绒哑光唇釉', 8: '阿玛尼红黑管哑光唇膏',
9: '阿玛尼小胖丁持色凝彩哑光染唇液', 10: '阿玛尼5G黑管', 11: '阿玛尼黑漆光迷情唇釉', 12: '迪奥烈艳蓝金唇膏',
13: 'Dior红管花芯唇膏', 14: 'Dior迪奥魅惑釉唇膏', 15: '烈艳蓝金锁色唇釉', 16: '圣罗兰纯口红',
17: '圣罗兰细管纯口红(小金条)', 18: '圣罗兰莹亮纯魅唇膏', 19: '圣罗兰细管纯口红(小黑条)', 20: '娇兰臻彩宝石唇膏',
21: '娇兰亲亲唇膏', 22: '娇兰唇蜜', 23: 'CHILI 小辣椒', 24: '魅可清新漆光唇釉', 25: '完美日记小细跟口红', 26: '完美日记唇彩',
27: '完美日记口红', 28: '兰蔻唇釉', 29: '兰蔻唇膏', 30: '娇韵诗丰盈唇膏', 31: '香奈儿可可小姐唇膏',
32: 'CL路铂廷女王权杖(萝卜丁口红)', 33: 'CL路铂廷女王权杖黑管(萝卜丁口红)', 34: '纪梵希小羊皮', 35: '纪梵希羊皮唇釉',
36: '纪梵希禁忌之吻星云唇膏', 37: '3CE细管唇膏', 38: '3CE哑光口红', 39: '3CE唇泥', 40: '3CE三熹玉云朵唇釉',
41: 'UNNY唇泥', 42: 'UNNY雾面雪雾花园唇釉', 43: '植村秀小黑方唇膏口红', 44: '植村秀无色限方管漆光唇釉口红',
45: 'TOM FORD唇膏', 46: '雅诗兰黛口红金管', 47: '橘朵哑光唇釉', 48: '橘朵小花管唇膏', 49: '稚优泉口红',
50: '稚优泉无惧幻想绒雾唇釉', 51: '稚优泉琉光之镜水光唇釉', 52: '稚优泉 绒情迷雾哑光唇釉'}
img = Image.open(path) # 打开图片
img = img.convert('RGB')
img = img.resize((100, 100), Image.ANTIALIAS) # 大小归一化
img = np.array(img).astype('float32') # 转换成 数组
img = img.transpose((2, 0, 1)) # 读出来的图像是rgb,rgb,rbg..., 转置为 rrr...,ggg...,bbb...
img = img / 255.0 # 缩放
model_state_dict = paddle.load('./resnet101.pdparams') # 读取模型
model = resnet101() # 实例化模型
model.set_state_dict(model_state_dict)
model.eval()
# print(paddle.to_tensor(img).shape)
ceshi = model(paddle.reshape(paddle.to_tensor(img), (1, 3, 100, 100))) # 测试
return lablelist[np.argmax(ceshi.numpy())] # 获取值
async def main():
bot = MyBot()
await bot.start()
asyncio.run(main())
``` |
{
"source": "jhchang-lanl/Draco",
"score": 3
} |
#### File: compton_tools/python/common_compton.py
```python
import numpy as np
# ------------------------------------------------------------------------------------------------ #
# These are the common functions that are used to read and write Compton data
# ------------------------------------------------------------------------------------------------ #
def print_grids(grids, fileroot, verbosity=False):
'''Print grids to files based on their names'''
# Save to files
for key in grids:
filePath = '{}.{}_grid'.format(fileroot, key)
if verbosity:
print('Saving {}'.format(filePath))
np.savetxt(filePath, grids[key])
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def print_mat(mat, fileroot, verbosity=False):
'''Print mat to files, one for each temperature '''
for i in range(mat.shape[0]):
filePath = '{}.mat_T{}'.format(fileroot, i)
if verbosity:
print('Saving {}'.format(filePath))
np.savetxt(filePath, mat[i, :, :])
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def read_data(fileroot, verbosity):
'''Read mat and grids data'''
# Read grids
keys = ['T', 'Efrom', 'Eto', 'Ebdr']
grids = {}
for key in keys:
# Read grid
filePath = '{}.{}_grid'.format(fileroot, key)
if verbosity:
print('Reading {}'.format(filePath))
grids[key] = np.loadtxt(filePath)
# Corner case: size-1 array
try:
# np defines a len method but it throws an exception
len(grids[key])
except TypeError:
grids[key] = grids[key] * np.ones(1)
# Print grid
if verbosity:
print(key)
print(grids[key])
# Read mat
numTs = len(grids['T'])
numEsfrom = len(grids['Efrom'])
numEsto = len(grids['Eto'])
mat = np.zeros((numTs, numEsto, numEsfrom))
for i in range(numTs):
# Read mat for one T
filePath = '{}.mat_T{}'.format(fileroot, i)
if verbosity:
print('Reading {}'.format(filePath))
mat[i, :, :] = np.loadtxt(filePath)
# Print mat for one T
if verbosity > 1:
print(mat[i, :, :])
# Return data
return grids, mat
# ------------------------------------------------------------------------------------------------ #
``` |
{
"source": "jhchein/freezer-ml-pipeline",
"score": 2
} |
#### File: src/deployment/score.py
```python
import json
import logging
import os
import joblib
import pandas as pd
from sktime.utils.load_data import from_long_to_nested
from utils import create_response, get_connection_device_id
TIMESERIESLENGTH = 10
def init():
global model
# The AZUREML_MODEL_DIR environment variable indicates
# a directory containing the model file you registered.
model_filename = "model.pkl"
model_path = os.path.join(os.environ["AZUREML_MODEL_DIR"], model_filename)
model = joblib.load(model_path)
logging.info("Model loaded.")
def run(data):
has_error = False
logging.info("started run.")
# CONVERT STREAM ANALYTICS TO SKTIME FORMAT
logging.info("loading json.")
data = json.loads(data)
logging.info("json loaded.")
# Parse timestamps and temperature data
time_created_start = data.get("allevents")[0].get("timeCreated")
time_created_end = data.get("allevents")[-1].get("timeCreated")
temperature_data = [event.get("temperature") for event in data.get("allevents")]
logging.info(f"time_created_start: {time_created_start}")
logging.info(f"time_created_end: {time_created_end}")
logging.info(f"temperature_data: {temperature_data}")
# Check connection_device_id
connection_device_id, has_error, error_message = get_connection_device_id(data)
if has_error:
return create_response(has_error=has_error, error_message=error_message)
# Assert time series has at least TIMESERIESLENGTH elements
if len(temperature_data) < TIMESERIESLENGTH:
error_message = f"Time series of length {len(temperature_data)} does not have enough samples ({TIMESERIESLENGTH} samples required)."
logging.warning(error_message)
return create_response(has_error=True, error_message=error_message)
# Convert data to sktime format
case_id, dim_id = 0, 0
try:
long_data = [
[case_id, dim_id, reading_id, reading_data]
for reading_id, reading_data in enumerate(
temperature_data[-TIMESERIESLENGTH:]
)
]
except Exception as e:
error_message = (
f"Could not convert dataset to long format due to exception: '{e}'"
)
logging.error(error_message)
return create_response(has_error=True, error_message=error_message)
# Predict
long_df = pd.DataFrame(
long_data, columns=["case_id", "dim_id", "reading_id", "value"]
)
sktime_df = from_long_to_nested(long_df)
prediction = model.predict(sktime_df).tolist()[0]
return create_response(
prediction=prediction,
connection_device_id=connection_device_id,
time_created_start=time_created_start,
time_created_end=time_created_end,
)
``` |
{
"source": "jhchen3121/wechat_shop",
"score": 3
} |
#### File: core_backend/database/connect.py
```python
import os
import string
import sqlalchemy
from sqlalchemy import *
from sqlalchemy.orm import mapper, sessionmaker
from conf import DBURL
from sqlalchemy import Table, Column, BigInteger, Integer, String, ForeignKey, Boolean, Date, DateTime,Text
from sqlalchemy.orm import relationship, backref,mapper,composite
from sqlalchemy.schema import Sequence
import datetime
class connect(object):
pass
con=connect()
Session = sessionmaker()
con.metadata= MetaData()
def connect(url=DBURL):
orm_debug=os.getenv("ORM_DEBUG")
if orm_debug is None:
con.engine=create_engine(url)
else:
con.engine=create_engine(url, echo=orm_debug)
con.connection=con.engine.connect()
con.session=Session(bind=con.connection)
con.metadata.bind=con.engine
#connect(DBURL)
##
# @brief use to get table related class from database schema
#
# @param name table name
#
# @return
def get_table(name):
return Table(name, con.metadata, autoload=True)
##
# @brief use to get a database session
#
# @return
def get_session():
Session = sessionmaker(con.engine)
return Session()
##
# @brief create mapper class
#
# @param clazz
# @param table
#
# @return
def get_mapper(clazz, table):
if isinstance(table, Table):
return mapper(clazz, table)
elif isinstance(table, str):
load_table = get_table(table)
return mapper(clazz, load_table)
def drop_table( name ):
get_table(name).drop(con.engine)
def create_table( name ):
get_table(name).create(con.engine)
if __name__ == '__main__':
connect()
```
#### File: core_backend/libs/exception.py
```python
import sys, traceback
import logging
logger = logging.getLogger(__name__)
class Error(Exception):
""" 标准错误接口"""
def __init__(self, code, msg):
if type(msg) == unicode:
super(Exception, self).__init__(msg.encode('utf-8'))
else:
super(Exception, self).__init__(msg)
self.code = code
self.msg = msg
class PrepareError(Error):
""" 预处理错误"""
pass
class PostError(Error):
""" 结束处理错误"""
pass
if __name__ == '__main__':
try :
raise Error(-1, "error")
except Error, e:
print e.code
print e.msg
except:
print traceback.format_exception(*sys.exc_info())
```
#### File: core_backend/service/handler.py
```python
from core_backend import context
from core_backend import conf
from core_backend.rpc.amqp import AMQPRpc
from functools import wraps
from contextlib import contextmanager
from core_backend.libs.exception import Error
import sys
import traceback
import logging
import plugin
import settings
import pprint
import tempfile
logger = logging.getLogger(__name__)
@contextmanager
def service_handler(instance):
""" 标准服务调用"""
if not isinstance(instance, handler):
raise Exception("instance is not a service handler")
logger.debug("begin to dispatch service: %s", instance._service)
service_prepare(instance)
instance._state = 'PREPARE'
try:
yield instance
instance._state = 'SUCCESS'
logger.debug("service instance %s has bee dispatched",
instance._service)
instance.response(0, u"处理成功")
except Error, e:
logger.error('error to dispatch service %s, %s', e.code, e.msg)
instance.response(e.code, e.msg)
except:
logger.error('error to dispatch service %s', instance._service)
instance._state = 'FAIL'
exc_type, exc_value, exc_traceback = sys.exc_info()
err_stack = traceback.format_exception(
exc_type, exc_value, exc_traceback)
for line in err_stack:
logger.error(line.strip())
instance.response(-1, u"调用服务[%s]失败:%s" %
(instance._service, ','.join(exc_value)))
finally:
service_post(instance)
instance._state = 'TERMINATE'
def service_decorator(callback):
""" 服务步骤decorator """
@wraps(callback)
def wrapper(*args, **kwargs):
instance = args[0]
result = callback(*args, **kwargs)
logger.debug("service %s:%s has being dispathced",
instance._service, callback.func_name)
return result
return wrapper
@service_decorator
def service_prepare(instance):
# 新的配置方式
if hasattr(settings, 'DB_URL'):
context.connect(settings.DB_URL)
else:
context.connect()
with context.session_scope() as session:
return instance.prepare_request(session)
@service_decorator
def service_post(instance):
with context.session_scope() as session:
return instance.post_request(session)
@service_decorator
def service_dispatch(instance):
with context.session_scope(instance=instance) as session:
logger.debug(
u"**************** SERVICE 【%s】 START ******************" % (instance._service))
logger.debug(u"请求报文: %s", instance.context.request)
instance.context.session = session
# plugin 的session和dispatch一致
plg_handler = plugin.PluginHandler(instance, session)
plg_handler.load_plugins(settings.PLUGINS)
plg_handler.run_plugins()
result = instance.dispatch(session)
plg_handler.run_post_plugins()
logger.debug(
u"++++++++++++++++ SERVICE 【%s】 END ++++++++++++++++++" % (instance._service))
return result
class handler(object):
xa = False
_record_jrnl = True
"""
service_code 服务名
channel RabbitMQ Channel, 用于自行分发对应的消息
deliver 消息分发属性
properties 消息属性
body 消息体/json报文
"""
def __init__(self, service, request):
""" @param _service servce code or name
@param _request RabbitMQ information
@param body request body packet
_respond check whether current service is respond
_responable check whether service need to response
"""
self._service = service
self._request = request
self.body = request.body
self.context = context.Context(request.body, _request=self._request)
self._respond = False
self._responable = True if self._get_reply_queue() is not None else False
self._state = 'INIT'
# 是否记录日志
"""
call user' s init
"""
self.init()
def init(self):
pass
def post(self, session):
raise Error(-1, 'method POST undefined.')
def get(self, session):
raise Error(-1, 'method GET undefined.')
def delete(self, session):
raise Error(-1, 'method DELETE undefined.')
def put(self, session):
raise Error(-1, 'method PUT undefined.')
def _get_reply_queue(self):
""" 根据rabbitmq信息获取响应队列"""
properties = self._request.properties
if properties.reply_to is not None:
res = properties.reply_to
logger.debug("response queue is :%s", res)
return res
else:
return None
def _get_dlq(self):
""" 取死信队列"""
properties = self._request.properties
basic_deliver = self._request.basic_deliver
if properties.headers.has_key('dlq'):
dlq = properties.headers.get('dlq')
logger.error("Reply queue not defined, using dlq:%s", dlq)
return dlq
else:
logger.debug('MQ properties:%s' % (properties))
dlq = basic_deliver.routing_key + ".dlq"
logger.error("Reply queue and DLQ not defined, using dlq:%s", dlq)
return dlq
def allow_anonymous(self):
'''
默认是不允许 匿名访问
如果服务需要支持匿名访问,请重载该函数
'''
return False
def dlq_declare(self, frame):
logger.debug('DLQ Queue [%s] Declared.' % (self._dlq))
def response(self, code, msg):
""" 用于返回失败或错误信息 """
if self._responable is False and code is 0:
""" FIXME responable 需要使用其它参数定义,而非reply_to?"""
return
if self._respond is True:
logger.warning(u"当前服务[%s]已回复", self._service)
return
reply_queue = self._get_reply_queue()
if reply_queue is None:
# FIXME DLQ的消息是需要处理的,该处需要重构
# 至少应包含以下几种信息:1、原请求报文,2、出错原因 3、原服务
# 可以不分服务么?还是统一至一个DLQ,而不是一个服务一个DLQ,则处理服务需要定制一个即可?
DLQ = self._get_dlq()
logger.error(
"serice [%s] error:[%s,%s], put message to DLQ [%s]", self._service, code, msg, DLQ)
self._dlq = DLQ
self._request.channel.queue_declare(
queue=DLQ, durable=True, callback=self.dlq_declare)
self._request.channel.basic_publish(
exchange='', routing_key=DLQ, properties=self._request.properties, mandatory=1, body=self.body)
else:
if code is not 0:
logger.error("%s,%s", code, msg)
else:
logger.info("service %s dispatched ok:%s,%s",
self._service, code, msg)
self.context.error(code, msg)
logger.debug(u"响应报文: %s", self.context.response)
payload = self.context.jsonify()
# 避免body过大时导致请求响应缓慢
logger.debug("service response:%s", payload[:2048])
self._request.channel.basic_publish(
exchange='', routing_key=reply_queue, properties=self._request.properties, mandatory=1, body=payload)
self._respond = True
def dispatch(self, session):
'''
如果服务不关心 请求方法, 则直接重载该方法即可
否则,请实现对应方法
'''
callback_func = {
"POST": self.post,
"GET": self.get,
"DELETE": self.delete,
"PUT": self.put
}
req_method = self.context.request.header.method
if req_method not in callback_func.keys():
raise Error(-1, "method: %s not supported." % (req_method,))
return callback_func[req_method](session)
def new_file(self, filename):
"""
创建一个临时文件,该文件将做为内容发往客户端
"""
resp_header = self.context.response.header
resp_header.send_file_name = filename
resp_header.tmp_file_name = tempfile.mktemp()
return open(resp_header.tmp_file_name, 'w')
def prepare_request(self, session):
""" 服务前准备"""
logger.debug("default prepare for service...")
def post_request(self, session):
""" 服务后处理"""
logger.debug("default post for service...")
```
#### File: core_backend/service/plugin.py
```python
import sys, traceback
from core_backend import context
from core_backend.libs.exception import Error
import logging
#logger = Log.getDebugLogger()
#logger.setLevel(logging.INFO)
logger = logging.getLogger(__name__)
class plugin(object):
def __init__(self, handler, session):
self.handler = handler
self.session = session
self.context = self.handler.context
self.request = self.context.request
self.service = self.handler._service
def process(self):
pass
def post_process(self):
pass
class PluginHandler(object):
def __init__(self, handler, session):
self.handler = handler
self.session = session
self.plg_modules = []
self.plg_inst_list = []
def import_module(self, module, fromlist):
# return __import__(self.get_module(m), fromlist=["plugins"])
return __import__(module, fromlist=fromlist)
def load_plugins(self, plg_module):
plgconfig = self.import_module(plg_module, [plg_module])
module_files = plgconfig.plugins_modules
for f in module_files:
m = self.import_module(plg_module + '.' + f, [plg_module])
self.plg_modules.append(m)
ins = m.Plugin(self.handler, self.session)
self.plg_inst_list.append(ins)
def run_plugins(self):
for ins in self.plg_inst_list:
ins.process()
def run_post_plugins(self):
for ins in self.plg_inst_list:
ins.post_process()
```
#### File: core_backend/utils/date.py
```python
from __future__ import unicode_literals
from __future__ import absolute_import
from datetime import timedelta
from datetime import date, datetime
DATE_FMT = "%Y-%m-%d"
DATE_FMT2 = "%Y%m%d"
def convert2date(in_date):
'''
将in_date转换为datetime.date类型
'''
if isinstance(in_date, str) or isinstance(in_date, unicode):
if len(in_date) == 10:
return datetime.strptime(in_date, DATE_FMT).date()
elif len(in_date) == 8:
return datetime.strptime(in_date, DATE_FMT2).date()
else:
raise ValueError("无法转换为日期类型: %s" % in_date)
elif isinstance(in_date, date):
return in_date
elif isinstance(in_date, datetime):
return in_date.date()
else:
raise ValueError("无法转换为日期类型: %s" % in_date)
def date_range(from_date, thru_date):
'''
按指定的时间范围返回该范围的日期迭代器,包含from_date和thru_date
'''
from_date = convert2date(from_date)
thru_date = convert2date(thru_date)
date = from_date
while date <= thru_date:
yield date.strftime(DATE_FMT)
date = date + timedelta(days=1)
def day_delta(a_date, b_date):
'''
a_date 和 b_date的时间间隔
a_date - b_date
'''
a = convert2date(a_date)
b = convert2date(b_date)
return (a - b).days
```
#### File: core_backend/utils/menu.py
```python
from __future__ import absolute_import
from __future__ import unicode_literals
import xlrd
import json
import re
import os
import sys
class MenuItem:
def __repr__(self):
return "\t" *self.level + self.name.encode('utf-8') + ":" + str(len(self.sub_menus))
def to_dict(self):
d = {}
d['title'] = self.name.encode('utf-8')
if self.icon:
d['icon'] = "fa " + self.icon
if self.issue and self.issue[-3:] != "000":
d['issue'] = self.issue
if os.environ.get('BIZUI_VER', "2") == "1":
d['href'] = '#/issue/%s' % self.issue
else:
d['href'] = '#/page/%s' % self.issue
if self.sub_menus:
d['sub_menus'] = [s.to_dict() for s in self.sub_menus]
if self.show == 1:
d['show'] = True
return d
def get_rows(file_path):
data = xlrd.open_workbook(file_path)
sheet_name = u'菜单'
table = data.sheet_by_name(sheet_name)
nrows = table.nrows
rows = []
for i in range(1, nrows):
#enable = table.row(i)[5].value
#if enable == 1:
# rows.append(table.row(i))
rows.append(table.row(i))
perm_dict = dict()
sheet_name = u'权限类型'
data = xlrd.open_workbook(file_path)
table = data.sheet_by_name(sheet_name)
nrows = table.nrows
for i in range(1, nrows):
row = table.row(i)
perm_dict[row[0].value] = row[1].value
return rows, perm_dict
def get_menu_list(rows, perm_dict):
menu_list = []
menus = [None, None, None]
for row in rows:
for i in (0,1,2):
if row[i].value:
item = MenuItem()
item.name = row[i].value
item.level = i
item.icon = row[3].value
item.issue = row[4].value
item.show = row[5].value
item.sub_menus = []
item.tran_code = row[6].value
#默认权限配置
perm_type = row[7].value
if perm_type:
item.perm_type = perm_dict[perm_type]
item.perm_params = row[8].value
else:
item.perm_type = None
item.service = row[9].value
if i > 0:
parent = menus[i-1]
if parent:
parent.sub_menus.append(item)
menus[i] = item
if i == 0:
menu_list.append(item)
return menu_list
def get_perms_type(filepath):
sheet_name = u'权限类型'
data = xlrd.open_workbook(file_path)
table = data.sheet_by_name(sheet_name)
nrows = table.nrows
perm_dict = dict()
for i in range(1, nrows):
row = table.row(i)
perm_dict[row[0].value] = row[1].value
return perm_dict
if __name__ == '__main__':
file_path = sys.argv[1]
rows, perm_dict = get_rows(file_path)
menu_list = get_menu_list(rows, perm_dict)
menu_dict = {"body": {"menus": [menu.to_dict() for menu in menu_list]}}
print json.dumps(menu_dict, indent=4)
```
#### File: src/core_backend/websocket.py
```python
import platform
import os
import sys
import time
import uuid
import pika
import tornado.ioloop
import tornado.web
import tornado.process
import tornado.httpserver
import tornado.websocket
import logging
import json
import pprint
# 用于导入基础的rabbitmq配置
#兼容通过core_backend.conf配置和settings的方式进行MQ的配置
try:
from settings import EXCHANGE, EXCHANGE_TYPE, QUEUE, ROUTING_KEY, ROUTING_PREFIX
except:
print "settings import error"
from core_backend.conf import EXCHANGE, EXCHANGE_TYPE, QUEUE, ROUTING_KEY, ROUTING_PREFIX
from core_backend.context import object_packet
logger=logging.getLogger(__name__)
from pika.adapters.tornado_connection import TornadoConnection
class PikaConsumer(object):
"""A modified class as described in pika's demo_tornado.py.
It handles the connection for the Tornado instance. Messaging/RPC
callbacks are handled by the Tornado RequestHandler above."""
#FIXME queue and routing key in configuration file
def __init__(self, exchange = EXCHANGE, queue='wep.service.user', routing_key="user.*",
amqp_url="amqp://guest:[email protected]:5672/%2F"):
self.connecting = False
self.connection = None
self.channel = None
self.queue = queue
self.exchange = exchange
self.routing_key = routing_key
self.amqp_url=amqp_url
# listener使用字典存储
self.event_listeners = dict()
def connect(self):
if self.connecting:
pika.logging.info('Already connecting to RabbitMQ.')
return
pika.logging.info("Connecting to RabbitMQ")
self.connecting = True
# FIXME using amqp_url to instead
creds = pika.PlainCredentials('guest', 'guest')
params = pika.URLParameters(self.amqp_url)
#pika.ConnectionParameters(host='localhost', port=5672,
#virtual_host='/', credentials=creds)
self.connection = TornadoConnection(params, on_open_callback=self.on_connect)
self.connection.add_on_close_callback(self.on_closed)
def on_connect(self, connection):
self.connection = connection
connection.channel(self.on_channel_open)
def on_channel_open(self, channel):
pika.logging.info('Channel Open')
self._channel = channel
# I'm having trouble using named exchanges.
channel.exchange_declare(exchange=self.exchange, exchange_type='topic',
callback=self.on_exchange_declare)
def on_exchange_declare(self, frame):
pika.logging.info("Exchange declared.")
self.qname = self.queue + "." + str(os.getpid())
self._channel.queue_declare(self.on_queue_declare, self.qname)
def on_queue_declare(self, frame):
qname = self.queue + "." + str(os.getpid())
pika.logging.info("Queue %s declared." % (self.qname))
self._channel.queue_bind(self.on_queue_bind, self.qname,
self.exchange, self.routing_key)
def on_queue_bind(self, frame):
pika.logging.info("Queue %s bind." % (self.queue))
self.start_consuming()
def add_on_cancel_callback(self):
"""Add a callback that will be invoked if RabbitMQ cancels the consumer
for some reason. If RabbitMQ does cancel the consumer,
on_consumer_cancelled will be invoked by pika.
"""
logger.debug('Adding consumer cancellation callback')
self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
def on_consumer_cancelled(self, method_frame):
"""Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
receiving messages.
:param pika.frame.Method method_frame: The Basic.Cancel frame
"""
logger.debug('Consumer was cancelled remotely, shutting down: %r',
method_frame)
if self._channel:
self._channel.close()
def stop_consuming(self):
"""Tell RabbitMQ that you would like to stop consuming by sending the
Basic.Cancel RPC command.
"""
if self._channel:
pika.logging.info('Sending a Basic.Cancel RPC command to RabbitMQ')
self._channel.basic_cancel(self.on_basic_cancel, self._consumer_tag)
def start_consuming(self):
"""This method sets up the consumer by first calling
add_on_cancel_callback so that the object is notified if RabbitMQ
cancels the consumer. It then issues the Basic.Consume RPC command
which returns the consumer tag that is used to uniquely identify the
consumer with RabbitMQ. We keep the value to use it when we want to
cancel consuming. The on_message method is passed in as a callback pika
will invoke when a message is fully received.
"""
logger.debug('Issuing consumer related RPC commands')
self.add_on_cancel_callback()
self._consumer_tag = self._channel.basic_consume(self.on_message,
self.qname)
def on_basic_cancel(self, frame):
pika.logging.info('Basic Cancel Ok.')
# If we don't have any more consumer processes running close
self._channel.close()
self.connection.close()
def on_closed(self, connection):
# We've closed our pika connection so stop the demo
tornado.ioloop.IOLoop.instance().stop()
def on_message(self, unused_channel, basic_deliver, properties, body):
self.notify_listeners(basic_deliver, properties, body)
self._channel.basic_ack(basic_deliver.delivery_tag)
def notify_listeners(self, basic_deliver, properties, body):
""" 收到exchange订阅消息的处理
其中routing_key的形式为 websocket.user.* 以`.'为分隔最末的值为用户名
"""
# here we assume the message the sourcing app
# post to the message queue is in JSON format
event_json = json.loads(body)
target = basic_deliver.routing_key
user = target.split('.')[-1]
if user == '*':
# 为*时表示广播,到所有用户
pika.logging.info(u'广播消息至所有用户')
for listener in self.event_listeners.values():
listener.write_message(body)
else:
listener = self.event_listeners.get(user)
if listener is None:
#logger.error(u'用户[%s]不在线' % (user))
return
# debug使用
pika.logging.info(u'发送消息至用户【%s】'%(user))
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(event_json)
listener.write_message(body)
def add_event_listener(self, uid, listener):
""" 添加事件监听器
@param uid 监听器的外部标识,如用户名
@param listener websocket 处理器
"""
self.event_listeners[uid] = listener
pika.logging.info('PikaConsumer: listener %s added for %s' % (repr(listener), uid))
def remove_event_listener(self, uid):
"""
@param 移除用户的websocket监听器
"""
try:
self.event_listeners.pop(uid)
pika.logging.info('PikaClient: listener %s removed' % repr(uid))
except KeyError:
pass
class MessageHandler(tornado.websocket.WebSocketHandler):
"""
消息服务处理器,基于websocket实现
"""
def open(self):
self.pika_consumer = self.application.settings.get('pika_consumer')
self.registerd = False
pika.logging.info("WebSocket opened")
def on_message(self, message):
logger.debug(u'请求报文:%d,[%s]' %(len(message), message))
self.req_body = object_packet(message)
logger.debug(u'基于Websocket的请求报文:')
# 消息动作
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(self.req_body)
header = self.req_body.header
if header is None:
logger.error(u'非法数据,请核实');
return
if header.action is None:
logger.error(u'未定义消息类别,请核实');
return
if hasattr(self, header.action) is False:
logger.error(u'消息[%s]指定的服务未定义' % (header.action))
return
action = self.__getattribute__(header.action)
action(self.req_body)
def on_close(self):
if hasattr(self, 'user'):
pika.logging.info(u"用户[%s]离线,注销" % (self.user))
self.pika_consumer.remove_event_listener(self.user)
pika.logging.info("WebSocket closed")
def register(self, request):
""" 注册websocket登记"""
if request.header.req_user is None:
logger.error(u'未指定用户')
return
self.user = request.header.req_user
self.pika_consumer.add_event_listener(request.header.req_user, self)
pika.logging.info(u"注册在线用户[%s]成功" % (request.header.req_user))
def unregister(self, request):
user = request.header.req_user
if user is not None and user != 'null':
req_user = user.teller_code
elif type(user) == unicode:
req_user = str(user)
if (req_user is None or req_user == 'null') and hasattr(self, 'user') :
req_user = self.user
self.pika_consumer.remove_event_listener(req_user)
pika.logging.info(u"注销用户[%s]" % (req_user))
def service(self, request):
"""基于websocket的服务调用,将前端的请求发送至服务端"""
header = request.header
if header is None:
pika.logging.info(u'未定义报文头')
return
if header.service is None:
pika.logging.info(u'未定义需要调用的服务')
return
self.pika_client = self.application.settings.get('pika_client')
self._channel = self.pika_client.channel
props = pika.BasicProperties(content_type='text/plain', delivery_mode=1)
self._channel.basic_publish(exchange=EXCHANGE,
routing_key=ROUTING_PREFIX + request.header.service,
body=json.dumps(request),
properties=props,
mandatory=1)
def publish(self, request):
""" FIXME 消息发布,直接按照消息头的定义发布向批定的exchange和队列"""
header = request.header
if header is None:
pika.logging.info(u'未定义报文头')
return
if header.queue is None:
pika.logging.info(u'需要定义消息发送的队列')
return
self.pika_client = self.application.settings.get('pika_client')
self._channel = self.pika_client.channel
props = pika.BasicProperties(content_type='text/plain', delivery_mode=1)
self._channel.basic_publish(exchange=EXCHANGE if header.exchange is None else header.exchange,
routing_key=header.queue,
body=json.dumps(request),
properties=props,
mandatory=1)
if __name__ == '__main__':
pass
```
#### File: src/server/front_srv.py
```python
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import logging.config
import settings
import os
logger_conf = os.path.join(settings.PROJ_DIR, 'etc', 'frontend_logger.conf')
logging.config.fileConfig(logger_conf)
from core_backend import server as Server
from core_backend import websocket
from core_backend.module import load_modules_hook
import core_backend.http
import tornado.web
logger = logging.getLogger(__name__)
def _start_frontend():
handlers = []
hooks = load_modules_hook()
for hook, pkg in hooks:
handlers_hook = getattr(hook, 'handlers')
if handlers_hook:
handlers_list = handlers_hook()
for hdl in handlers_list:
logger.info("add url handler %s by [%s]", hdl[0], pkg)
handlers.append(hdl)
handlers.extend([
(r'/service/(.*)', Server.ServiceHandler),
(r'/mp_service/(.*)', Server.MpServiceHandler),
(r'/attachment', Server.AttachmentHandler),
(r'/static_source/(.*)', Server.StaticSourceHandler),
(r'/file_export', Server.FileExportHandler),
(r'/bms', websocket.MessageHandler),
# admin静态资源文件
(r"/(.*)", core_backend.http.StaticFileHandler, {"path": "../web/dist/web", "default_filename": "index.html"})
])
pika_client = Server.PikaClient()
pika_consumer = websocket.PikaConsumer()
# 上传至静态资源文件夹
upload_path = settings.STATIC_SOURCE_DIR
application = tornado.web.Application(handlers,
pika_client = pika_client,
pika_consumer = pika_consumer,
upload_path = upload_path,
)
port = settings.FRONT_SRV_PORT
print "Tornado is serving on port {0}.".format(port)
sockets = tornado.netutil.bind_sockets(port)
server = tornado.httpserver.HTTPServer(application)
server.add_sockets(sockets)
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.spawn_callback(pika_client.connect)
ioloop.spawn_callback(pika_consumer.connect)
try:
ioloop.start()
except KeyboardInterrupt:
print "Front Server Exit!"
if __name__ == "__main__":
_start_frontend()
```
#### File: services/category/get_list.py
```python
from __future__ import unicode_literals
from __future__ import absolute_import
from core_backend import context
from core_backend.service import handler
from core_backend.libs import token as tk
from core_backend.libs.exception import Error
from server.domain.models import WechatshopCategory
import logging
import settings
logger = logging.getLogger(__name__)
class Handler(handler.handler):
""" 商品设置信息列表 """
def dispatch(self, session):
req_body = self.context.request.body
resp_body = self.context.response.body
page = req_body.params.page
cate = session.query(WechatshopCategory).order_by(WechatshopCategory.sort_order.asc()).all()
cate_list = [c.to_dict() for c in cate]
topcate_list = [c.to_dict() for c in cate if c.to_dict()['parent_id'] == 0]
category_list = []
for t in topcate_list:
t['level'] = 1
t['is_show'] = True if t['is_show'] == 1 else False
t['is_channel'] = True if t['is_channel'] == 1 else False
t['is_category'] = True if t['is_category'] == 1 else False
category_list.append(t)
for c in category_list:
if c['parent_id'] == t['id']:
c['level'] = 2
c['is_show'] = True if c['is_show'] == 1 else False
c['is_channel'] = True if c['is_channel'] == 1 else False
c['is_category'] = True if c['is_category'] == 1 else False
category_list.append(c)
resp_body.data = category_list
```
#### File: services/goods/cu_goods.py
```python
from __future__ import unicode_literals
from __future__ import absolute_import
from sqlalchemy import func
from core_backend import context
from core_backend.service import handler
from core_backend.libs import token as tk
from core_backend.libs.exception import Error
from server.domain.models import WechatshopGood, WechatshopCart, WechatshopProduct, WechatshopGoodsSpecification
import logging
import settings
logger = logging.getLogger(__name__)
class Handler(handler.handler):
"""
商品保存/修改
代码过于冗余
TODO
"""
def dispatch(self, session):
req_body = self.context.request.body
resp_body = self.context.response.body
values = req_body.info
spec_data = req_body.specData
spec_value = req_body.specValue
cate_id = req_body.cateId
pic_url = values['list_pic_url']
goods_id = values['id']
values['category_id'] = cate_id
values['is_index'] = 1 if values.get('is_index', 0) else 0
values['is_new'] = 1 if values['is_new'] else 0
id = values['id']
if id > 0:
# update
session.query(WechatshopGood).filter(WechatshopGood.id == id).update(values)
session.query(WechatshopCart).filter(WechatshopCart.goods_id == id).update({WechatshopCart.checked: values.get('checked'), WechatshopCart.is_on_sale: values.get('is_on_sale'), WechatshopCart.list_pic_url: pic_url, WechatshopCart.freight_template_id: values.get('freight_template_id')})
session.query(WechatshopProduct).filter(WechatshopProduct.goods_id == id).update({WechatshopProduct.is_delete: 1})
session.query(WechatshopGoodsSpecification).filter(WechatshopGoodsSpecification.goods_id == id).update({WechatshopGoodsSpecification.is_delete: 1})
session.flush()
session.commit()
for item in spec_data:
if item['id'] > 0:
session.query(WechatshopCart).filter(WechatshopCart.product_id == item['id']).filter(WechatshopCart.is_delete == 0).update({
WechatshopCart.retail_price: item['retail_price'],
WechatshopCart.goods_specifition_name_value: item['value'],
WechatshopCart.goods_sn: item['goods_sn']
})
item['is_delete'] = 0
value = item.pop('value')
item.pop('specification_id')
session.query(WechatshopProduct).filter(WechatshopProduct.id == item['id']).update(item)
specification_data = {
'value': value,
'specification_id': spec_value,
'is_delete': 0
}
session.query(WechatshopGoodsSpecification).filter(WechatshopGoodsSpecification.id == item['goods_specification_ids']).update(specification_data)
session.flush()
session.commit()
else:
specification_data = {
'value': item['value'],
'specification_id': spec_value,
'goods_id': id
}
gs = WechatshopGoodsSpecification(**specification_data)
session.add(specification_data)
session.commit()
item['goods_specification_ids'] = gs.id
item['goods_id'] = id
item.pop('value')
item.pop('specification_id')
product = WechatshopProduct(**item)
session.add(product)
session.flush()
session.commit()
else:
# create
values.pop('id')
goods = WechatshopGood(**values)
session.add(goods)
session.flush()
session.commit()
goods_id = goods.id
for item in spec_data:
specification_data = {
'value': item['value'],
'goods_id': goods_id,
'specification_id': spec_value
}
gs = WechatshopGoodsSpecification(**specification_data)
session.add(gs)
session.flush()
session.commit()
spec_id = gs.id
item['goods_specification_ids'] = spec_id
item['goods_id'] = goods_id
item['is_on_sale'] = 1
item.pop('value')
product = WechatshopProduct(**item)
session.add(product)
session.flush()
session.commit()
pro = session.query(WechatshopProduct).filter(WechatshopProduct.goods_id == goods_id).filter(WechatshopProduct.is_on_sale == 1).filter(WechatshopProduct.is_delete == 0).all()
pro = [p.to_dict() for p in pro]
if len(pro) > 1:
goods_num = session.query(func.sum(WechatshopProduct.goods_number)).filter(WechatshopProduct.goods_id == goods_id).filter(WechatshopProduct.is_on_sale == 1).filter(WechatshopProduct.is_delete == 0).scalar()
retail_price = session.query(WechatshopProduct).filter(WechatshopProduct.goods_id == goods_id).filter(WechatshopProduct.is_on_sale == is_on_sale).filter(WechatshopProduct.is_delete == 0).all()
retail_price = [r.to_dict()['retail_price'] for r in retail_price]
max_price = max(retail_price)
min_price = min(retail_price)
cost = session.query(WechatshopProduct).filter(WechatshopProduct.goods_id == goods_id).filter(WechatshopProduct.is_on_sale == 1).filter(WechatshopProduct.is_delete == 0).all()
cost = [c.to_dict()['cost'] for c in cost]
max_cost = max(cost)
min_cost = min(cost)
goods_price = ''
if min_price == max_price:
goods_price = min_price
else:
goods_price = '{}~{}'.format(min_price, max_price)
cost_price = '{}~{}'.format(min_cost, max_cost)
session.query(WechatshopGood).filter(WechatshopGood.id == goods_id).update({
'goods_number': goods_num,
'retail_price': goods_price,
'cost_price': cost_price,
'min_retail_price': min_price,
'min_cost_price': min_cost
})
session.flush()
session.commit()
else:
info = dict(
goods_number=pro[0]['goods_number'],
retail_price=pro[0]['retail_price'],
cost_price=pro[0]['retail_price'],
min_retail_price=pro[0]['retail_price'],
min_cost_price=pro[0]['cost']
)
session.query(WechatshopGood).filter(WechatshopGood.id == goods_id).update(info)
session.flush()
session.commit()
```
#### File: mp/cart/get_cart.py
```python
from __future__ import unicode_literals
from __future__ import absolute_import
from server.domain.models import WechatshopCart, WechatshopProduct, WechatshopGood, WechatshopOrderGood
import time
import logging
logger = logging.getLogger(__name__)
"""
获取购物信息
"""
def get_cart(session, user_id, index):
if not user_id:
raise Exception('缺失用户信息')
cart_list = session.query(WechatshopCart).filter(WechatshopCart.user_id == user_id).filter(WechatshopCart.is_delete == 0).filter(WechatshopCart.is_fast == index).all()
goods_count = 0
goods_amount = 0
checked_goods_count = 0
checked_goods_amount = 0
number_change = 0
result_list = []
for c in cart_list:
new_c = c.to_dict()
product = session.query(WechatshopProduct).filter(WechatshopProduct.id == new_c['product_id']).filter(WechatshopProduct.is_delete == 0).one_or_none()
if not product:
session.query(WechatshopCart).filter(WechatshopCart.product_id == new_c['product_id']).filter(WechatshopCart.user_id == user_id).filter(WechatshopCart.is_delete == 0).update({WechatshopCart.is_delete:1})
session.flush()
session.commit()
else:
product = product.to_dict()
if product['goods_number'] <= 0 or product['is_on_sale'] == 0:
session.query(WechatshopCart).filter(WechatshopCart.product_id == new_c['product_id']).filter(WechatshopCart.user_id == user_id).filter(WechatshopCart.checked == 1).filter(WechatshopCart.is_delete == 0).update({WechatshopCart.checked: 0}).update()
new_c['number'] = 0
elif product['goods_number'] > 0 and product['goods_number'] < new_c['number']:
new_c['number'] = product['goods_number']
number_change = 1
elif product['goods_number'] > 0 and new_c['number'] == 0:
new_c['number'] = 1
number_change = 1
goods_count += new_c['number']
goods_amount += new_c['number'] * float(product['retail_price'])
new_c['retail_price'] = product['retail_price']
# FIXME 没搞懂原作者这步骤, 可能翻译有误
if new_c['checked'] and product['goods_number'] > 0:
checked_goods_count += new_c['number']
checked_goods_amount += new_c['number'] * float(product['retail_price'])
info = session.query(WechatshopGood).filter(WechatshopGood.id == new_c['goods_id']).first().to_dict()
new_c['list_pic_url'] = info['list_pic_url']
new_c['weight_count'] = new_c['number'] * float(new_c['goods_weight'])
session.query(WechatshopCart).filter(WechatshopCart.product_id == new_c['product_id']).filter(WechatshopCart.user_id == user_id).filter(WechatshopCart.is_delete == 0).update({WechatshopCart.number: new_c['number'], WechatshopCart.add_price: product['retail_price']})
session.flush()
session.commit()
result_list.append(new_c)
c_amount = round(checked_goods_amount, 2)
a_amount = checked_goods_amount
session.flush()
session.commit()
return {
'cartList': result_list,
'cartTotal': {
'goodsCount': goods_count,
'googsAmount': goods_amount,
'checkedGoodsCount': checked_goods_count,
'checkedGoodsAmount': checked_goods_amount,
'user_id': user_id,
'numberChange': number_change
}
}
def get_again_cart(session, user_id, order_from):
if not user_id:
raise Exception('缺失用户id')
again_goods = session.query(WechatshopOrderGood).filter(WechatshopOrderGood.order_id == order_from).all()
session.query(WechatshopCart).filter(WechatshopCart.is_delete == 0).filter(WechatshopCart.user_id == user_id).update({WechatshopCart.checked: 0})
session.flush()
session.commit()
for item in again_goods:
new_itme = item.to_dict()
add_again(session, user_id, new_item['goods_id'], new_item['product_id'], new_item['number'])
cart_list = session.query(WechatshopCart).filter(WechatshopCart.user_id == user_id).filter(WechatshopCart.is_fast == 0).filter(WechatshopCart.is_delete == 0).all()
goods_count = 0
goods_amount = 0
checked_goods_count = 0
checked_goods_amount = 0
new_cart_list = []
for ci in cart_list:
new_ci = ci.to_dict()
goods_count += new_ci['number']
goods_amount += int(new_ci['number']) * float(new_ci['retail_price'])
if new_ci['checked']:
checked_goods_count += new_ci['number']
checked_goods_amount += new_ci['number'] * float(new_ci['retail_price'])
info = session.query(WechatshopGood).filter(WechatshopGood.id == new_ci['goods_id']).first().to_dict()
num = info['goods_number']
if num <= 0:
session.query(WechatshopCart).filter(WechatshopCart.product_id == new_ci['product_id']).filter(WechatshopCart.user_id == user_id).filter(WechatshopCart.checked == 1).filter(WechatshopCart.is_delete == 0).update({WechatshopCart.checked: 0})
session.flush()
session.commit()
new_ci['list_pic_url'] = info['list_pic_url']
new_ci['goods_number'] = info['goods_number']
new_ci['weight_count'] = new_ci['number'] * float(new_ci['goods_weight'])
new_cart_list.append(new_ci)
c_amount = round(checked_goods_amount, 2)
a_amount = checked_goods_amount
return {
'cartList': new_cart_list,
'cartTotal': {
'goodsCount': goods_count,
'googsAmount': round(goods_amount, 2),
'checkedGoodsCount': checked_goods_count,
'checkedGoodsAmount': c_amount,
'user_id': user_id,
}
}
def add_again(session, user_id, goods_id, product_id, number):
" 获取购物车信息,所有对购物车的增删改操作,都要重新返回购物车信息 "
if not user_id:
raise Exception('缺失用户id')
current_time = time.time()
# 判断商品可否购买
goods_info = session.query(WechatshopGood).filter(WechatshopGood.id == goods_id).one_or_none()
if not goods_info:
raise Exception('商品不存在')
else:
goods_info = goods_info.to_dict()
if goods_info['is_on_sale'] == 0:
raise Exception('商品已下架')
product_info = session.query(WechatshopProduct).filter(WechatshopProduct.id == product_id).one_or_none()
if not product_info:
raise Exception('库存不存在')
else:
product_info = product_info.to_dict()
if product_info['goods_number'] < number:
raise Exception('库存不足')
retail_price = product_info['retail_price']
cart_info = session.query(WechatshopCart).filter(WechatshopCart.user_id == user_id).filter(WechatshopCart.product_id == product_id).filter(WechatshopCart.is_delete == 0).one_or_none()
# 如果已在购物车中,则增加数量
if cart_info:
cart_info = cart_info.to_dict()
if product_info['goods_number'] < (number + cart_info['number']):
raise Error(-1, '库存不够!')
session.query(WechatshopCart).filter(WechatshopCart.user_id == user_id).filter(WechatshopCart.product_id == product_id).filter(WechatshopCart.is_delete == 0).filter(WechatshopCart.id == cart_info['id']).update({WechatshopCart.retail_price: retail_price, WechatshopCart.checked: 1, WechatshopCart.number: number})
session.flush()
session.commit()
else:
goods_sepcifition_value = []
if product_info['goods_specification_ids'] is not None:
goods_sepcifition_value = session.query(WechatshopGoodsSpecification).filter(WechatshopGoodsSpecification.goods_id == product_info['goods_id']).filter(WechatshopGoodsSpecification.is_delete == 0).filter(WechatshopGoodsSpecification.id.in_((product_info['goods_specification_ids'].split('-')))).all()
if goods_sepcifition_value:
goods_sepcifition_value = [g.to_dict()['value'] for g in goods_sepcifition_value]
# 添加至购物车
new_cart = {
'goods_id': product_info['goods_id'],
'product_id': product_id,
'goods_sn': product_info['goods_sn'],
'goods_name': goods_info['name'],
'goods_aka': product_info['goods_name'],
'goods_weight': product_info['goods_weight'],
'freight_template_id': goods_info['freight_template_id'],
'list_pic_url': goods_info['list_pic_url'],
'number': number,
'user_id': user_id,
'retail_price': retail_price,
'add_price': retail_price,
'goods_specifition_name_value': ';'.join(goods_sepcifition_value),
'goods_specifition_ids': product_info['goods_specification_ids'],
'checked': 1,
'add_time': current_time,
}
add_cart = WechatshopCart(**new_cart)
session.add(add_cart)
session.flush()
session.commit()
def clear_buy_goods(session, user_id):
""" 清空已购买商品 """
session.query(WechatshopCart).filter(WechatshopCart.user_id == user_id).filter(WechatshopCart.checked == 1).filter(WechatshopCart.is_delete == 0).update({WechatshopCart.is_delete: 1})
session.flush()
session.commit()
```
#### File: mp/order/submit.py
```python
from __future__ import unicode_literals
from __future__ import absolute_import
from core_backend import context
from core_backend.service import handler
from core_backend.libs.exception import Error
from server.domain.models import WechatshopAddres, WechatshopCart, WechatshopProduct, WechatshopSetting, WechatshopUser, WechatshopOrder, WechatshopOrderGood
from server.services.mp.cart.get_cart import clear_buy_goods
from server.utils import tools
import time
import base64
import logging
import settings
logger = logging.getLogger(__name__)
class Handler(handler.handler):
""" 提交订单 """
def dispatch(self, session):
req_body = self.context.request.body
resp_body = self.context.response.body
user_id = req_body.userId
address_id = req_body.addressId
freight_price = req_body.freightPrice
offline_pay = req_body.offlinePay
postscript = req_body.postscript #留言
checked_address = session.query(WechatshopAddres).filter(WechatshopAddres.id == address_id).one_or_none()
if not checked_address:
raise Error(-1, '请选择收获地址')
else:
checked_address = checked_address.to_dict()
checked_goods_list = session.query(WechatshopCart).filter(WechatshopCart.user_id == user_id).filter(WechatshopCart.checked == 1).filter(WechatshopCart.is_delete == 0).all()
if not checked_goods_list:
raise Error(-1, '请选择商品')
else:
checked_goods_list = [c.to_dict() for c in checked_goods_list]
check_price = 0
check_stock = 0
for item in checked_goods_list:
product = session.query(WechatshopProduct).filter(WechatshopProduct.id == item['product_id']).first().to_dict()
if item['number'] > product['goods_number']:
check_stock += 1
if item['retail_price'] != item['add_price']:
check_price += 1
if check_stock > 0:
raise Error(-1, '库存不足,请重新下单')
if check_price > 0:
raise Error(-1, '价格发生变化,请重新下单')
#获取订单使用的红包
#如果有用红包,则将红包的数量减少,当减到0时,将该条红包删除
#统计商品总价
goods_total_price = 0.00
for cart_item in checked_goods_list:
goods_total_price += cart_item['number'] * cart_item['retail_price']
# 订单总价
order_total_price = goods_total_price + freight_price
# 减去其他支付金额,最后实际支付
actual_price = order_total_price - 0.00
current_time = time.time()
print_info = ''
for index, item in enumerate(checked_goods_list, 1):
print_info = print_info + index + '、' + item['goods_aka'] + '【' + item['number'] + '】\n'
setting = session.query(WechatshopSetting).filter(WechatshopSetting.id == 1).first().to_dict()
sender_name = setting['Name']
sender_mobile = setting['Tel']
user_info = session.query(WechatshopUser).filter(WechatshopUser.id == user_id).first().to_dict()
order_info = dict(
order_sn=tools.generate_order_number(user_id),
user_id=user_id,
# 收获地址和运费
consignee=checked_address['name'],
mobile=checked_address['mobile'],
province=checked_address['province_id'],
city=checked_address['city_id'],
district=checked_address['district_id'],
address=checked_address['address'],
# 订单初始状态101
order_status=101,
freight_price=freight_price,
postscript=base64.b64encode(postscript),
add_time=current_time,
goods_price=goods_total_price,
order_price=order_total_price,
actual_price=actual_price,
change_price=actual_price,
print_info=print_info,
offline_pay=offline_pay
)
# 插入订单信息
order = WechatshopOrder(**order_info)
session.add(order)
session.flush()
session.commit()
if not order.id:
raise Error(-1, '订单提交失败')
order_goods_data = []
for goods_item in checked_goods_list:
goods_info = dict(
user_id=user_id,
order_id=order.id,
goods_id=goods_item['goods_id'],
goods_name=goods_item['goods_name'],
goods_aka=goods_item['goods_aka'],
list_pic_url=goods_item['list_pic_url'],
retail_price=goods_item['retail_price'],
number=goods_item['number'],
goods_specifition_name_value=goods_item['goods_specifition_name_value'],
goods_specifition_ids=goods_item['goods_specifition_ids']
)
goods = WechatshopOrderGood(**goods_info)
session.add(goods)
session.flush()
session.commit()
# 清空已购买商品
clear_buy_goods(session, user_id)
resp_body.orderInfo = order_info
```
#### File: server/utils/wechat_api.py
```python
from __future__ import unicode_literals
from __future__ import absolute_import
import settings
import requests
import json
"""
小程序api接口
"""
def auth_code2session(**kwargs):
""" 获取用户openid以及session """
base_url = "https://api.weixin.qq.com/sns/jscode2session?"
params = "&".join('{}={}'.format(k, v) for k, v in kwargs.items())
req_url = base_url + params
resp = requests.get(req_url)
return resp.json()
def get_access_token(**kwargs):
""" 获取accesstoken """
base_url = "https://api.weixin.qq.com/cgi-bin/token?"
params = "&".join('{}={}'.format(k, v) for k, v in kwargs.items())
req_url = base_url + params
resp = requests.get(req_url)
return resp.json()
def build_wxacode(**kwargs):
""" 生成小程序码 """
# 参数检测
if not kwargs['access_token'] or not kwargs['scene'] or not kwargs['page']:
raise Error(-1, '生成小程序二维码参数缺失')
url = "https://api.weixin.qq.com/wxa/getwxacodeunlimit?access_token={}".format(kwargs['access_token'])
data = {
'scene': kwargs['scene'],
'page': kwargs['page'],
'width': 200
}
resp = requests.post(url, data=json.dumps(data)).json()
if not resp:
raise Error(-1, '小程序服务器小程序码获取失败')
return resp
if __name__ == '__main__':
#print get_access_token(grant_type='client_credential', secret=settings.WECHAT_SECRET, appid=settings.WECHAT_APPID)
access_token = '<KEY>'
x = build_wxacode(access_token=access_token, scene=1235, page='pages/goods/goods')
print x
``` |
{
"source": "jhches21/mangython",
"score": 3
} |
#### File: app/scraper/kissmanga.py
```python
from app.scraper.common import MangaScraper
class KissMangaSC(MangaScraper):
_NAME = 'KissManga'
_SITE_LINK = 'http://kissmanga.com'
_SEARCH_LINK = 'http://kissmanga.com/Search/Manga'
_SEARCH_REST = 'post'
_REL_LINK = True
_ONE_PAGE = True
def _get_search_result(self, search):
values = {
'keyword': search,
'selectSearch': 'Manga'
}
return super()._get_search_result(
values,
r'<a class="bigChar" href="(?P<url>[^"]*)">(?P<title>[^<]*)</a>',
('class="listing"', '</table>'),
)
def _get_series_info(self, url):
values = {
'altname': r'<span class="info">Other name:</span>(.*?)</p>',
'img': r'<div id="rightside">.+?<img.+?src="([^"]*)"',
'genre': r'<span class="info">Genres:</span>(.*?)</p>',
'authors': r'<span class="info">Author:</span>(.*?)</p>',
# 'artists': r'<label>Artist\(s\):</label><a.*?>([^<]*)</a>',
'status': r'<span class="info">Status:</span>(.*?)<span',
'summary': r'<span class="info">Summary:.+?<p.*?>(.*?)</p>'
}
return super()._get_series_info(
url,
values,
('id="container"', 'id="footer"')
)
def _gen_chapter_list(self, link):
super()._gen_chapter_list(
link,
r'<a.+?href="([^"]*)".*?>([^<]*)</a>',
('class="listing"', '</table>')
)
# This scarper doesn't need _get_image() due to
# page_list acting as full page of pictures.
def _get_page_list(self, page_source):
return super()._get_page_list(
page_source,
r'lstImages.push\("([^"]*)"\);',
('lstImages', 'lstImagesLoaded')
)
```
#### File: app/scraper/mangahere.py
```python
from app.scraper.common import MangaScraper
class MangaHereSC(MangaScraper):
_NAME = 'MangaHere'
_SITE_LINK = 'http://www.mangahere.co'
_SEARCH_LINK = 'http://www.mangahere.co/search.php'
_SEARCH_REST = 'get'
def _get_search_result(self, search):
values = {
'name': search,
}
return super()._get_search_result(
values,
r'<a href="(?P<url>[^"]*).+?class="manga_info.+?rel="(?P<title>[^"]*)".*?</a>',
('result_search', '</table>')
)
def _get_series_info(self, url):
values = {
'altname': r'<label>Alternative Name:</label>([^<]*)</li>',
'img': r'<img.*?src="([^"]*)".*?class="img"',
'genre': r'<label>Genre\(s\):</label>([^<]*)</li>',
'authors': r'<label>Author\(s\):</label>(.*?)<li>',
'artists': r'<label>Artist\(s\):</label>(.*?)<li>',
'status': r'<label>Status:</label>(.*?)</li>',
'summary': r'id="show".*?>(.*?) '
}
return super()._get_series_info(
url,
values,
('mr316', 'detail_list')
)
def _gen_chapter_list(self, link):
super()._gen_chapter_list(
link,
r'<span class="left">.*?<a.+?href="([^"]*)".*?>([^<]*)</a>',
('detail_list', 'all_commet')
)
def _get_page_list(self, page_source):
return super()._get_page_list(
page_source,
r'<option.+?value="([^"]*)".+?</option>',
('wid60', 'read_img')
)
def _get_image(self, page_source):
return super()._get_image(
page_source,
r'<img.+?src="([^"]*)".+?id="image".*?>'
)
``` |
{
"source": "jhcho99/CoFormer",
"score": 2
} |
#### File: CoFormer/datasets/__init__.py
```python
from .swig import build as build_swig
def build_dataset(image_set, args):
if args.dataset_file == 'swig':
return build_swig(image_set, args)
raise ValueError(f'dataset {args.dataset_file} not supported')
```
#### File: jhcho99/CoFormer/inference.py
```python
import argparse
import random
import numpy as np
import torch
import datasets
import util.misc as utils
import cv2
import skimage
import skimage.transform
import nltk
import re
from util import box_ops
from PIL import Image
from torch.utils.data import DataLoader
from datasets import build_dataset
from models import build_model
from pathlib import Path
from nltk.corpus import wordnet as wn
def noun2synset(noun):
return wn.synset_from_pos_and_offset(noun[0], int(noun[1:])).name() if re.match(r'n[0-9]*', noun) else "'{}'".format(noun)
def visualize_bbox(image_path=None, num_roles=None, noun_labels=None, pred_bbox=None, pred_bbox_conf=None, output_dir=None):
image = cv2.imread(image_path)
image_name = image_path.split('/')[-1].split('.')[0]
h, w = image.shape[0], image.shape[1]
red_color = (232, 126, 253)
green_color = (130, 234, 198)
blue_color = (227,188, 134)
orange_color = (98, 129, 240)
brown_color = (79, 99, 216)
purple_color = (197, 152, 173)
colors = [red_color, green_color, blue_color, orange_color, brown_color, purple_color]
white_color = (255, 255, 255)
line_width = 4
# the value of pred_bbox_conf is logit, not probability.
for i in range(num_roles):
if pred_bbox_conf[i] >= 0:
# bbox
pred_left_top = (int(pred_bbox[i][0].item()), int(pred_bbox[i][1].item()))
pred_right_bottom = (int(pred_bbox[i][2].item()), int(pred_bbox[i][3].item()))
lt_0 = max(pred_left_top[0], line_width)
lt_1 = max(pred_left_top[1], line_width)
rb_0 = min(pred_right_bottom[0], w-line_width)
rb_1 = min(pred_right_bottom[1], h-line_width)
lt = (lt_0, lt_1)
rb = (rb_0, rb_1)
cv2.rectangle(img=image, pt1=lt, pt2=rb, color=colors[i], thickness=line_width, lineType=-1)
# label
label = noun_labels[i].split('.')[0]
text_size, baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 1)
p1 = (lt[0], lt[1] - text_size[1])
cv2.rectangle(img=image, pt1=(p1[0], (p1[1]-2-baseline)), pt2=((p1[0]+text_size[0]), (p1[1]+text_size[1])), color=colors[i], thickness=-1)
cv2.putText(image, label, (p1[0], p1[1] + baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.4, white_color, 1, 8)
# save image
cv2.imwrite("{}/{}_result.jpg".format(output_dir, image_name), image)
return
def process_image(image):
mean = np.array([[[0.485, 0.456, 0.406]]])
std = np.array([[[0.229, 0.224, 0.225]]])
image = (image.astype(np.float32) - mean) / std
min_side, max_side= 512, 700
rows_orig, cols_orig, cns_orig = image.shape
smallest_side = min(rows_orig, cols_orig)
scale = min_side / smallest_side
largest_side = max(rows_orig, cols_orig)
if largest_side * scale > max_side:
scale = max_side / largest_side
# resize the image with the computed scale
image = skimage.transform.resize(image, (int(round(rows_orig * scale)), int(round((cols_orig * scale)))))
rows, cols, cns = image.shape
new_image = np.zeros((rows, cols, cns)).astype(np.float32)
new_image[:rows, :cols, :] = image.astype(np.float32)
image = torch.from_numpy(new_image)
shift_1 = int((700 - cols) * 0.5)
shift_0 = int((700 - rows) * 0.5)
max_height = 700
max_width = 700
padded_imgs = torch.zeros(1, max_height, max_width, 3)
padded_imgs[0, shift_0:shift_0+image.shape[0], shift_1:shift_1+image.shape[1], :] = image
padded_imgs = padded_imgs.permute(0, 3, 1, 2)
height = torch.tensor(int(image.shape[0])).float()
width = torch.tensor(int(image.shape[1])).float()
shift_0 = torch.tensor(shift_0).float()
shift_1 = torch.tensor(shift_1).float()
scale = torch.tensor(scale).float()
mw = torch.tensor(max_width).float()
mh = torch.tensor(max_height).float()
return (utils.nested_tensor_from_tensor_list(padded_imgs),
{'width': width,
'height': height,
'shift_0': shift_0,
'shift_1': shift_1,
'scale': scale,
'max_width': mw,
'max_height': mh})
def inference(model, device, image_path=None, inference=False, idx_to_verb=None, idx_to_role=None,
vidx_ridx=None, idx_to_class=None, output_dir=None):
model.eval()
image_name = image_path.split('/')[-1].split('.')[0]
# load image & process
image = Image.open(image_path)
image = image.convert('RGB')
image = np.array(image)
image = image.astype(np.float32) / 255.0
image, info = process_image(image)
image = image.to(device)
info = {k: v.to(device) if type(v) is not str else v for k, v in info.items()}
output = model(image, inference=inference)
pred_verb = output['pred_verb'][0]
pred_noun = output['pred_noun_3'][0]
pred_bbox = output['pred_bbox'][0]
pred_bbox_conf = output['pred_bbox_conf'][0]
top1_verb = torch.topk(pred_verb, k=1, dim=0)[1].item()
roles = vidx_ridx[top1_verb]
num_roles = len(roles)
verb_label = idx_to_verb[top1_verb]
role_labels = []
noun_labels = []
for i in range(num_roles):
top1_noun = torch.topk(pred_noun[i], k=1, dim=0)[1].item()
role_labels.append(idx_to_role[roles[i]])
noun_labels.append(noun2synset(idx_to_class[top1_noun]))
# convert bbox
mw, mh = info['max_width'], info['max_height']
w, h = info['width'], info['height']
shift_0, shift_1, scale = info['shift_0'], info['shift_1'], info['scale']
pb_xyxy = box_ops.swig_box_cxcywh_to_xyxy(pred_bbox.clone(), mw, mh, device=device)
for i in range(num_roles):
pb_xyxy[i][0] = max(pb_xyxy[i][0] - shift_1, 0)
pb_xyxy[i][1] = max(pb_xyxy[i][1] - shift_0, 0)
pb_xyxy[i][2] = max(pb_xyxy[i][2] - shift_1, 0)
pb_xyxy[i][3] = max(pb_xyxy[i][3] - shift_0, 0)
# locate predicted boxes within image (processing w/ image width & height)
pb_xyxy[i][0] = min(pb_xyxy[i][0], w)
pb_xyxy[i][1] = min(pb_xyxy[i][1], h)
pb_xyxy[i][2] = min(pb_xyxy[i][2], w)
pb_xyxy[i][3] = min(pb_xyxy[i][3], h)
pb_xyxy /= scale
# outputs
with open("{}/{}_result.txt".format(output_dir, image_name), "w") as f:
text_line = "verb: {} \n".format(verb_label)
f.write(text_line)
for i in range(num_roles):
text_line = "role: {}, noun: {} \n".format(role_labels[i], noun_labels[i])
f.write(text_line)
f.close()
visualize_bbox(image_path=image_path, num_roles=num_roles, noun_labels=noun_labels, pred_bbox=pb_xyxy, pred_bbox_conf=pred_bbox_conf, output_dir=output_dir)
def get_args_parser():
parser = argparse.ArgumentParser('Set CoFormer', add_help=False)
# Backbone parameters
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--position_embedding', default='learned', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
# Transformer parameters
parser.add_argument('--num_glance_enc_layers', default=3, type=int,
help="Number of encoding layers in Glance Transformer")
parser.add_argument('--num_gaze_s1_dec_layers', default=3, type=int,
help="Number of decoding layers in Gaze-Step1 Transformer")
parser.add_argument('--num_gaze_s1_enc_layers', default=3, type=int,
help="Number of encoding layers in Gaze-Step1 Transformer")
parser.add_argument('--num_gaze_s2_dec_layers', default=3, type=int,
help="Number of decoding layers in Gaze-Step2 Transformer")
parser.add_argument('--dim_feedforward', default=2048, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=512, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.15, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
# Dataset parameters
parser.add_argument('--dataset_file', default='swig')
parser.add_argument('--swig_path', type=str, default="SWiG")
parser.add_argument('--image_path', default='inference/image.jpg',
help='path where the test image is')
# Etc...
parser.add_argument('--inference', default=True)
parser.add_argument('--output_dir', default='CoFormer_inference',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for inference')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--num_workers', default=1, type=int)
parser.add_argument('--saved_model', default='CoFormer_checkpoint.pth',
help='path where saved model is')
return parser
def main(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
print(args)
if not args.inference:
assert False, f"Please set inference to True"
# fix the seed
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# num noun classes in train dataset
dataset_train = build_dataset(image_set='train', args=args)
args.num_noun_classes = dataset_train.num_nouns()
# build model
device = torch.device(args.device)
model, _ = build_model(args)
model.to(device)
checkpoint = torch.load(args.saved_model, map_location='cpu')
model.load_state_dict(checkpoint['model'])
inference(model, device, image_path=args.image_path, inference=args.inference,
idx_to_verb=args.idx_to_verb, idx_to_role=args.idx_to_role, vidx_ridx=args.vidx_ridx,
idx_to_class=args.idx_to_class, output_dir=args.output_dir)
return
if __name__ == '__main__':
nltk.download('wordnet')
parser = argparse.ArgumentParser('CoFormer inference script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
``` |
{
"source": "jhcho99/gsrtr",
"score": 2
} |
#### File: jhcho99/gsrtr/engine.py
```python
import math
import os
import sys
import torch
import util.misc as utils
from typing import Iterable
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, max_norm: float = 0):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
# data & target
samples = samples.to(device)
targets = [{k: v.to(device) if type(v) is not str else v for k, v in t.items()} for t in targets]
# model output & calculate loss
outputs = model(samples, targets)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
# scaled with different loss coefficients
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
# stop when loss is nan or inf
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
# loss backward & optimzer step
optimizer.zero_grad()
losses.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate_swig(model, criterion, data_loader, device, output_dir):
model.eval()
criterion.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
print_freq = 10
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
# data & target
samples = samples.to(device)
targets = [{k: v.to(device) if type(v) is not str else v for k, v in t.items()} for t in targets]
# model output & calculate loss
outputs = model(samples, targets)
loss_dict = criterion(outputs, targets, eval=True)
weight_dict = criterion.weight_dict
# reduce losses over all GPUs for logging purposes
# scaled with different loss coefficients
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
return stats
```
#### File: jhcho99/gsrtr/main.py
```python
import argparse
import datetime
import json
import random
import time
import numpy as np
import torch
import datasets
import util.misc as utils
from torch.utils.data import DataLoader, DistributedSampler
from datasets import build_dataset
from engine import evaluate_swig, train_one_epoch
from models import build_model
from pathlib import Path
def get_args_parser():
parser = argparse.ArgumentParser('Set grounded situation recognition transformer', add_help=False)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_backbone', default=1e-5, type=float)
parser.add_argument('--lr_drop', default=100, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--epochs', default=40, type=int)
# Backbone parameters
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--position_embedding', default='learned', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
# Transformer parameters
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=2048, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=512, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.15, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
# Loss coefficients
parser.add_argument('--noun_loss_coef', default=1, type=float)
parser.add_argument('--verb_loss_coef', default=1, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--bbox_conf_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=5, type=float)
# Dataset parameters
parser.add_argument('--dataset_file', default='swig')
parser.add_argument('--swig_path', type=str, default="SWiG")
parser.add_argument('--dev', default=False, action='store_true')
parser.add_argument('--test', default=False, action='store_true')
# Etc...
parser.add_argument('--inference', default=False)
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--num_workers', default=4, type=int)
parser.add_argument('--saved_model', default='gsrtr_checkpoint.pth',
help='path where saved model is')
# Distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
def main(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# check dataset
if args.dataset_file == "swig":
from datasets.swig import collater
else:
assert False, f"dataset {args.dataset_file} is not supported now"
# build dataset
dataset_train = build_dataset(image_set='train', args=args)
args.num_noun_classes = dataset_train.num_nouns()
if not args.test:
dataset_val = build_dataset(image_set='val', args=args)
else:
dataset_test = build_dataset(image_set='test', args=args)
# build model
model, criterion = build_model(args)
model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
param_dicts = [
{"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad]},
{
"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad],
"lr": args.lr_backbone,
}
]
# optimizer & LR scheduler
optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
# dataset sampler
if not args.test and not args.dev:
if args.distributed:
sampler_train = DistributedSampler(dataset_train)
sampler_val = DistributedSampler(dataset_val, shuffle=False)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
if args.dev:
if args.distributed:
sampler_val = DistributedSampler(dataset_val, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
elif args.test:
if args.distributed:
sampler_test = DistributedSampler(dataset_test, shuffle=False)
else:
sampler_test = torch.utils.data.SequentialSampler(dataset_test)
output_dir = Path(args.output_dir)
# dataset loader
if not args.test and not args.dev:
batch_sampler_train = torch.utils.data.BatchSampler(sampler_train, args.batch_size, drop_last=True)
data_loader_train = DataLoader(dataset_train, num_workers=args.num_workers,
collate_fn=collater, batch_sampler=batch_sampler_train)
data_loader_val = DataLoader(dataset_val, num_workers=args.num_workers,
drop_last=False, collate_fn=collater, sampler=sampler_val)
else:
if args.dev:
data_loader_val = DataLoader(dataset_val, num_workers=args.num_workers,
drop_last=False, collate_fn=collater, sampler=sampler_val)
elif args.test:
data_loader_test = DataLoader(dataset_test, num_workers=args.num_workers,
drop_last=False, collate_fn=collater, sampler=sampler_test)
# use saved model for evaluation (using dev set or test set)
if args.dev or args.test:
checkpoint = torch.load(args.saved_model, map_location='cpu')
model.load_state_dict(checkpoint['model'])
if args.dev:
data_loader = data_loader_val
elif args.test:
data_loader = data_loader_test
test_stats = evaluate_swig(model, criterion, data_loader, device, args.output_dir)
log_stats = {**{f'test_{k}': v for k, v in test_stats.items()}}
# write log
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
return None
# train model
print("Start training")
start_time = time.time()
max_test_mean_acc = 42
for epoch in range(args.start_epoch, args.epochs):
# train one epoch
if args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(model, criterion, data_loader_train, optimizer,
device, epoch, args.clip_max_norm)
lr_scheduler.step()
# evaluate
test_stats = evaluate_swig(model, criterion, data_loader_val, device, args.output_dir)
# log & output
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
# save checkpoint for every new max accuracy
if log_stats['test_mean_acc_unscaled'] > max_test_mean_acc:
max_test_mean_acc = log_stats['test_mean_acc_unscaled']
checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')
for checkpoint_path in checkpoint_paths:
utils.save_on_master({'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'args': args}, checkpoint_path)
# write log
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('GSRTR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
``` |
{
"source": "jhchung/summary-gwas-imputation",
"score": 2
} |
#### File: summary-gwas-imputation/src/collapse_folder_files.py
```python
__author__ = "<NAME>"
import re
import logging
import os
import shutil
from genomic_tools_lib import Logging, Utilities
def run(args):
if not args.reentrant:
if os.path.exists(args.output_folder):
logging.info("Output path exists. Nope.")
return
Utilities.maybe_create_folder(args.output_folder)
logging.info("Checking input folder")
r = re.compile(args.rule)
folders = [x for x in sorted(os.listdir(args.input_folder)) if r.search(x)]
if args.exclude:
folders = [x for x in folders if not x in {y for y in args.exclude}]
names = {}
for f in folders:
name = r.search(f).group(1)
if not name in names: names[name] = []
names[name].append(os.path.join(args.input_folder, f))
_f = shutil.move if args.move else shutil.copy
for name in sorted(names):
logging.info("Processing %s", name)
output_folder = os.path.join(args.output_folder, name)
Utilities.maybe_create_folder(output_folder)
for input_folder in names[name]:
logging.log(8, "Processing %s", input_folder)
files = os.listdir(input_folder)
for file in files:
i = os.path.join(input_folder, file)
o = os.path.join(output_folder, file)
_f(i, o)
logging.info("Finished collapse")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Convert model training format data to parquet format ")
parser.add_argument("-input_folder", help="Folder where sub folders can be found")
parser.add_argument("-rule", help="Regexp to group input folders")
parser.add_argument("-output_folder", help="Destination folder where contets will be saved")
parser.add_argument("--reentrant", help="Lenient, multiple-run mode", action="store_true")
parser.add_argument("--exclude", help="Skip these folders", nargs="+")
parser.add_argument("--move", help="Wether to move or copy files", action="store_true")
parser.add_argument("-parsimony", help="Log parsimony level. 1 is everything being logged. 10 is only high level messages, above 10 will hardly log anything", default = "10")
args = parser.parse_args()
Logging.configure_logging(int(args.parsimony))
run(args)
```
#### File: summary-gwas-imputation/src/concatenate.py
```python
__author__ = "<NAME>"
from genomic_tools_lib import Utilities, Logging
import os
import re
import gzip
import logging
def _key(x, r, sort_groups):
k = ()
s = r.search(x)
for g in sort_groups:
k = k + ( int(s.group(g)),)
return k
def run(args):
r = re.compile(args.pattern)
files = [x for x in os.listdir(args.folder) if r.search(x)]
if args.sort_groups:
files = sorted(files, key=lambda x: _key(x, r, args.sort_groups))
output_firstline = True
Utilities.ensure_requisite_folders(args.output)
logging.info("Starting concatenation")
with gzip.open(args.output, "w") as o:
for file in files:
path = os.path.join(args.folder, file)
logging.log(9, "Opening %s", path)
for i, line in Utilities.iterate_file(path):
if i==0:
if output_firstline:
o.write(line.encode())
if not args.headerless:
output_firstline = False
continue
o.write(line.encode())
logging.info("Finished")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Post summary imputation results")
parser.add_argument("-folder", help="How far to extend in each direction when searching for variants")
parser.add_argument("-pattern", help="Work only with one chromosome")
parser.add_argument("-output", help="Where to save stuff")
parser.add_argument("--headerless", help="concatenate all lines", action="store_true")
parser.add_argument("--sort_groups", help="what to extract and sort for", nargs="+", type=int)
parser.add_argument("-parsimony",
help="Log verbosity level. 1 is everything being logged. 10 is only high level messages, above 10 will hardly log anything",
default="10")
args = parser.parse_args()
Logging.configure_logging(int(args.parsimony))
run(args)
```
#### File: summary-gwas-imputation/src/covariance_for_model.py
```python
__author__ = "<NAME>"
import logging
import os
import re
import sqlite3
import pandas
import numpy
import gzip
from pyarrow import parquet as pq
from genomic_tools_lib import Logging, Utilities
from genomic_tools_lib.data_management import TextFileTools
from genomic_tools_lib.miscellaneous import matrices, PandasHelpers
from genomic_tools_lib.file_formats import Parquet
def get_file_map(args):
r = re.compile(args.parquet_genotype_pattern)
files = os.listdir(args.parquet_genotype_folder)
files = {int(r.search(f).groups()[0]):os.path.join(args.parquet_genotype_folder, f) for f in files if r.search(f)}
p = {}
for k,v in files.items():
g = pq.ParquetFile(v)
p[k] = g
return p
n_ = re.compile("^(\d+)$")
def run(args):
if os.path.exists(args.output):
logging.info("Output already exists, either delete it or move it")
return
logging.info("Getting parquet genotypes")
file_map = get_file_map(args)
logging.info("Getting genes")
with sqlite3.connect(args.model_db) as connection:
# Pay heed to the order. This avoids arbitrariness in sqlite3 loading of results.
extra = pandas.read_sql("SELECT * FROM EXTRA order by gene", connection)
extra = extra[extra["n.snps.in.model"] > 0]
individuals = TextFileTools.load_list(args.individuals) if args.individuals else None
logging.info("Processing")
Utilities.ensure_requisite_folders(args.output)
with gzip.open(args.output, "w") as f:
f.write("GENE RSID1 RSID2 VALUE\n".encode())
with sqlite3.connect(args.model_db) as connection:
for i,t in enumerate(extra.itertuples()):
g_ = t.gene
logging.log(9, "Proccessing %i/%i:%s", i+1, extra.shape[0], g_)
w = pandas.read_sql("select * from weights where gene = '{}';".format(g_), connection)
chr_ = w.varID.values[0].split("_")[0].split("chr")[1]
if not n_.search(chr_):
logging.log(9, "Unsupported chromosome: %s", chr_)
continue
dosage = file_map[int(chr_)]
if individuals:
d = Parquet._read(dosage, columns=w.varID.values, specific_individuals=individuals)
del d["individual"]
else:
d = Parquet._read(dosage, columns=w.varID.values, skip_individuals=True)
var_ids = list(d.keys())
if len(var_ids) == 0:
if len(w.varID.values) == 1:
logging.log(9, "workaround for single missing genotype at %s", g_)
d = {w.varID.values[0]:[0,1]}
else:
logging.log(9, "No genotype available for %s, skipping",g_)
next
if args.output_rsids:
ids = [x for x in pandas.DataFrame({"varID": var_ids}).merge(w[["varID", "rsid"]], on="varID").rsid.values]
else:
ids = var_ids
c = numpy.cov([d[x] for x in var_ids])
c = matrices._flatten_matrix_data([(w.gene.values[0], ids, c)])
for entry in c:
l = "{} {} {} {}\n".format(entry[0], entry[1], entry[2], entry[3])
f.write(l.encode())
logging.info("Finished building covariance.")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Generate BSLMM runs on study")
parser.add_argument("-parquet_genotype_folder", help="Parquet Genotype folder")
parser.add_argument("-parquet_genotype_pattern", help="Pattern to detect parquet genotypes by chromosome")
parser.add_argument("-model_db", help="Where to save stuff")
parser.add_argument("-output", help="Where to save stuff")
parser.add_argument("--output_rsids", action="store_true")
parser.add_argument("--individuals")
parser.add_argument("-parsimony", help="Log verbosity level. 1 is everything being logged. 10 is only high level messages, above 10 will hardly log anything", default = "10")
args = parser.parse_args()
Logging.configure_logging(int(args.parsimony))
run(args)
```
#### File: summary-gwas-imputation/src/dump_model_db.py
```python
import gzip
import pandas
import numpy
import logging
import re
import os
from genomic_tools_lib import Utilities, Logging
from genomic_tools_lib.miscellaneous import Models
__author__ = "<NAME>"
def _read(folder, pattern):
files = sorted([x for x in os.listdir(folder) if pattern.search(x)])
files = [os.path.join(folder, x) for x in files]
r =[]
for file in files:
try:
r.append(pandas.read_table(file))
except:
logging.info("issue opening file %s", file)
return pandas.concat(r)
def _read_2(input_prefix, stem):
path_ = os.path.split(input_prefix)
r = re.compile(path_[1] + stem)
return _read(path_[0], r)
def run(args):
logging.info("Loading models")
weights, extra = Models.read_model(args.input)
Utilities.save_dataframe(weights, args.output_prefix + "_weights.txt.gz")
Utilities.save_dataframe(extra, args.output_prefix + "_extra.txt.gz")
logging.info("Done")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Dump")
parser.add_argument("-input")
parser.add_argument("-output_prefix")
parser.add_argument("-parsimony", type=int, default=logging.INFO)
args = parser.parse_args()
Logging.configure_logging(args.parsimony)
run(args)
```
#### File: summary-gwas-imputation/src/gencode_conversion.py
```python
import os
import logging
from genomic_tools_lib import Utilities, Logging
from genomic_tools_lib.file_formats import Gencode
__author__ = "<NAME>"
#Quick and dirty hack for format needed elsewhere.
#TODO: formalize
def _reformat(gencode):
gencode = gencode.rename(columns={"chromosome": "chr", "start_location": "start", "end_location": "end"})
gencode.chr = gencode.chr.str.split("chr").str.get(1)
return gencode[["chr", "gene_id", "gene_name", "start", "end", "gene_type"]]
def run(args):
if os.path.exists(args.output):
logging.info("Output exists. Nope.")
return
if args.output_column_map:
selected = [x[0] for x in args.output_column_map]
else:
selected = [Gencode.GFTF.K_GENE_ID, Gencode.GFTF.K_GENE_NAME, Gencode.GFTF.K_GENE_TYPE]
logging.info("Loading Gencode")
gencode = Gencode.load(args.gencode_file,
feature_type_whitelist={x for x in args.feature_type_whitelist},
gene_type_white_list={x for x in args.gene_type_whitelist},
transcript_type_whitelist={x for x in args.transcript_type_whitelist},
selected_key_value_pairs=selected)
#gencode = _reformat(gencode)
logging.info("Converting format")
if args.output_column_map:
gencode = gencode.rename(columns={x[0]:x[1] for x in args.output_column_map})
if "gene_version" in gencode and "gene_id" in gencode:
gencode["gene_id"] = gencode.gene_id+ "." + gencode.gene_version
keep = ["chromosome", "start_location", "end_location", "feature_type", "strand"]+[x[1] for x in args.output_column_map if x[1] not in {"gene_version"}]
gencode = gencode[keep]
else:
gencode = gencode[["chromosome", "start_location", "end_location", "feature_type", "strand"] + [x[1] for x in
args.output_column_map]]
logging.info("Saving")
Utilities.save_dataframe(gencode, args.output)
logging.info("Finished")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Convert gencode file into a table file")
parser.add_argument("-gencode_file", help="Where to load file from")
parser.add_argument("-output", help="Where to save")
parser.add_argument("-gene_type_whitelist", help="Which types of genes to keep", default=[], nargs="+")
parser.add_argument("-feature_type_whitelist", help="Which types of genes to keep", default=[], nargs="+")
parser.add_argument("-transcript_type_whitelist", help="Which types of transcripts to keep", default=[], nargs="+")
parser.add_argument("-output_column_map", help="Specify multiple key-value pairs to specify format conversion", nargs=2, action="append", default=[])
parser.add_argument("-verbosity", help="Logging verbosity (actually loquacity)", type=int, default=10)
args = parser.parse_args()
Logging.configure_logging(args.verbosity)
run(args)
```
#### File: external_tools/dap/RunDAP.py
```python
__author__ = "<NAME>"
import os
import re
import numpy
import shutil
import logging
import traceback
from collections import namedtuple
from subprocess import call
from ...file_formats import SBAM
from ...individual_data.Utilities import _StudyBasedContext
from ...individual_data import Utilities as StudyUtilities
from ...Exceptions import ReportableException
from ... import Utilities
Stats = namedtuple("Stats", ["gene", "status"])
# TODO: abstract and refactor into utilities
class _Context(_StudyBasedContext):
def get_dap_exe(self): raise RuntimeError("Not implemented")
def get_grid_file_path(self): raise RuntimeError("Not implemented")
def get_options(self): raise RuntimeError("Not implemented")
def get_prior_file_path(self, gene): raise RuntimeError("Not implemented")
def get_intermediate_folder(self): raise RuntimeError("Not implemented")
def get_output_folder(self): raise RuntimeError("Not implemented")
def get_delete_intermediate(self): raise RuntimeError("Not implemented")
def run_dap(context, gene):
stats = _stats(gene)
try:
_run_dap(context, gene)
except ReportableException as ex:
status = Utilities.ERROR_REGEXP.sub('_', ex.msg)
stats = _stats(gene, status=status)
logging.info("Reportable exception running dap: %s", ex.msg)
except Exception as ex:
msg = '{0}'.format(type(ex)) # .replace('"', "'")
status = '"{0}"'.format(msg)
stats = _stats(gene, status=status)
logging.info("Exception running dap:\n%s", traceback.format_exc())
finally:
if context.get_delete_intermediate():
folder = _intermediate_folder(context, gene)
if os.path.exists(folder):
shutil.rmtree(folder)
return stats
def _run_dap(context, gene):
intermediate = _intermediate_folder(context, gene)
os.makedirs(intermediate)
study = StudyUtilities._get_study_for_gene(context, gene, rename_pheno=None)
SBAM.save_study(study, intermediate)
run_dap_command(context, gene)
r_ = re.compile(r"\\\n[\s]+\\\n")
def _render(s):
while r_.search(s):
s = r_.sub("\\\n", s) #substitute empty lines on missing values
return s
def dap_command(context, gene):
args = {"dap":context.get_dap_exe(),
"data":_study_path(context, gene),
"grid": context.get_grid_file_path(),
"prior": context.get_prior_file_path(gene),
"output": _output(context, gene),
"OUTPUT_DIR": context.get_output_folder(),
"INTERMEDIATE_DIR": context.get_intermediate_folder()}
options = context.get_options()
if len(options):
args["extra"] = " ".join(["{} {}".format(k, str(v)) for k,v in options.items()])
command = \
"""#!/usr/bin/env bash
[ -d {OUTPUT_DIR} ] || mkdir -p {OUTPUT_DIR}
[ -d {INTERMEDIATE_DIR} ] || mkdir -p {INTERMEDIATE_DIR}
{dap} \\
-d {data} \\
-prior {prior} \\
{extra} \\
-t 1 > {output}
""".format(**args)
#The following is not currently supported in dao-g. Sup.
#-t 1 \\
#-it 0.05 > {output}
command = _render(command)
return command
def run_dap_command(context, gene):
command = dap_command(context, gene)
script_path = _script_path(context, gene)
with open(script_path, "w") as script:
script.write(command)
_o = os.path.join(_intermediate_folder(context, gene), "dap.o")
_e = os.path.join(_intermediate_folder(context, gene), "dap.e")
with open(_o, "w") as o:
with open(_e, "w") as e:
call(["bash", script_path], stderr=e, stdout=o)
def _stats(gene, status=numpy.nan):
return Stats(gene=gene, status=status)
########################################################################################################################
def _intermediate_folder(context, gene): return os.path.join(context.get_intermediate_folder(), gene)
def _study_path(context, gene): return os.path.join(_intermediate_folder(context, gene), gene+".txt")
def _script_path(context, gene): return os.path.join(_intermediate_folder(context, gene), gene+".sh")
def _output(context, gene): return os.path.join(context.get_output_folder(), gene+".dap.txt")
########################################################################################################################
def data_frame_from_stats(data):
return Utilities.to_dataframe(data, list(Stats._fields))
```
#### File: external_tools/dap/Utilities.py
```python
__author__ = "<NAME>"
import logging
import os
from . import RunDAP
from ...individual_data.Utilities import StudyBasedContext
from ...individual_data import Utilities as StudyUtilities
from ...miscellaneous import PandasHelpers
from ...file_formats import Parquet
class Context(StudyBasedContext, RunDAP._Context):
def __init__(self, dap_exe, grid_file, prior_folder, intermediate_folder, output_folder, study, gene_annotation, window, delete_intermediate, options):
super().__init__(study, gene_annotation, window)
self.dap_exe = dap_exe
self.grid_file = grid_file
self.prior_folder = prior_folder
self.intermediate_folder = intermediate_folder
self.output_folder = output_folder
self.gene_to_prior = {os.path.splitext(x)[0]:os.path.join(prior_folder,x) for x in os.listdir(prior_folder)}
self.delete_intermediate = delete_intermediate
self.options = options
def get_dap_exe(self): return self.dap_exe
def get_grid_file_path(self): return self.grid_file
def get_prior_file_path(self, gene): return self.gene_to_prior[gene]
def get_intermediate_folder(self): return self.intermediate_folder
def get_output_folder(self): return self.output_folder
def get_delete_intermediate(self):return self.delete_intermediate
def get_options(self): return self.options
def get_available_genes(self):
if not self.available_genes_:
g = StudyUtilities.available_genes(self.study, self.gene_annotation)
self.available_genes_ = [x for x in g if x in self.gene_to_prior]
return self.available_genes_
def context_from_args(args):
logging.info("Creating context")
gene_annotation = StudyUtilities.load_gene_annotation(args.gene_annotation, args.chromosome)
if args.sub_batches and args.sub_batch is not None:
logging.log(9, "Trimming gene annotation on sub-batches")
gene_annotation = PandasHelpers.sub_batch(gene_annotation, args.sub_batches, args.sub_batch)
logging.info("Loading study")
p_ = (lambda x: StudyUtilities.trim_variant_metadata_on_gene_annotation(x, gene_annotation, args.window, _log_level_v=5)) if args.sub_batches else None
study = Parquet.study_from_parquet(args.parquet_genotype, args.parquet_genotype_metadata, args.parquet_phenotype, args.parquet_covariate, post_process_variants_metadata=p_, frequency_filter=args.frequency_filter, chromosome=args.chromosome)
options= {}
if args.options:
options = {"-"+x[0]:x[1] for x in args.options}
context = Context(args.dap_command, args.grid_file, args.priors_folder, args.intermediate_folder, args.output_folder, study, gene_annotation, args.window, (not args.keep_intermediate_folder), options)
return context
```
#### File: genomic_tools_lib/file_formats/Miscellaneous.py
```python
__author__ = "<NAME>"
import pandas
def dapg_signals(file, threshold=None, id_whitelist=None):
w = pandas.read_table(file, usecols=["gene", "variant_id", "pip", "cluster_id"])
if id_whitelist:
w = w[w.variant_id.isin(id_whitelist)]
w = w.sort_values("pip", ascending=False).groupby(["gene", "cluster_id"]).head(1)
if threshold:
w = w[w.pip >= threshold]
w = w.sort_values("gene")
return w
```
#### File: genomic_tools_lib/file_formats/Parquet.py
```python
__author__ = "<NAME>"
import logging
import pandas
import pyarrow as pa
import pyarrow.parquet as pq
import numpy
from ..individual_data import Study, Genotype
from ..DataSink import DataFrameSink
from ..miscellaneous import Genomics
def _deplete_variants_to_record_batch(variants, variant_ids, individual_ids):
data = [pa.array(individual_ids)]
logging.log(8, "Consuming genotype variant data")
for i in range(0, len(variants)):
variant = variants.pop(0)
data.append(pa.array(variant))
logging.log(8, "Consumed genotype variant data")
names = ["individual"]+variant_ids
return pa.RecordBatch.from_arrays(data, names)
def _deplete_genotype_variants_to_record_batch(genotype, individual_ids):
return _deplete_variants_to_record_batch(genotype.variants, [x for x in genotype.metadata.id.values], individual_ids)
#The conversion to record batch is because of a bug in pyarrow for flavor="spark"
def _to_record_batch(df):
data =[]
names = list(df.columns.values)
for c in names:
data.append(pa.array(df[c]))
return pa.RecordBatch.from_arrays(data, names)
def save_variants(path, genotype, individual_ids):
"""Will consume the data in the study"""
record_batch = _deplete_genotype_variants_to_record_batch(genotype, individual_ids)
table = pa.Table.from_batches([record_batch])
pq.write_table(table, path, flavor="spark")
def _save_metadata(path, metadata):
table = _to_record_batch(metadata.iloc[0:2, ])
with ParquetDataFrameSink(path, table.schema) as sink:
for c_ in range(1, 23):
logging.log(8, "Saving metadata for chromosome %d", c_)
p_ = metadata.loc[metadata.chromosome == c_]
sink.sink(p_)
def save_metadata(path, genotype):
_save_metadata(path, genotype.metadata)
def save_variable(path, variables, individual_ids=None):
if individual_ids:
variables = variables.copy()
columns = list(variables.columns.values)
columns = ["individual"] + columns
variables["individual"] = individual_ids
variables = variables[columns]
batches = [_to_record_batch(variables)]
table = pa.Table.from_batches(batches)
pq.write_table(table, path, flavor="spark")
def save_study(study, path_prefix):
"""Will consume the data in the study"""
genotype = study.get_genotype()
individuals = study.get_individuals()
phenotype = study.get_phenotype()
covariates = study.get_covariates()
path_variant = path_prefix + ".variants.parquet"
save_variants(path_variant, genotype, individuals)
path_variant_metadata = path_prefix + ".variants_metadata.parquet"
save_metadata(path_variant_metadata, genotype)
path_pheno = path_prefix + ".pheno.parquet"
save_variable(path_pheno, phenotype, individuals)
if covariates is not None:
path_covariate = path_prefix + ".covariate.parquet"
save_variable(path_covariate, covariates, individuals)
########################################################################################################################
def variant_key_value_from_metadata(path):
p = pq.read_table(path, columns=["id", "rsid"]).to_pandas()
return {x.id:x.rsid for x in p.itertuples()}
def variants_from_metadata(path, frequency_threshold=None):
if not frequency_threshold:
p = pq.read_table(path, columns=["id"]).to_pandas()
else:
p = pq.read_table(path, columns=["id", "allele_1_frequency"]).to_pandas().rename(columns={"allele_1_frequency":"f"})
p = p.loc[(p.f > frequency_threshold) & (p.f < (1-frequency_threshold))]
return {x for x in p.id.values}
########################################################################################################################
class ParquetStudyBase(Study._Study):
def __init__(self, variant_metadata, phenotype_file, covariates, individuals):
super().__init__()
self.variant_metadata = variant_metadata
self.phenotype_file = phenotype_file
self.individuals = individuals
self.pheno_list_ = None
self.covariates = covariates
def get_variants_metadata(self, variants=None): return Genotype._get_variants_metadata(self.variant_metadata, variants)
def get_phenos(self, phenos=None, to_pandas=True): return _read(self.phenotype_file, phenos, to_pandas=to_pandas)
def get_individuals(self): return self.individuals
#def get_available_pheno_list(self): return Study._get_list(self.pheno)
def get_available_covariate_list(self): return Study._get_list(self.covariates)
def get_covariates(self, covariates=None, to_pandas=True): return Study._get(self.covariates, covariates, to_pandas=to_pandas)
def get_available_pheno_list(self):
if not self.pheno_list_:
self.pheno_list_ = [x for x in self.phenotype_file.schema.names if x != "individual"]
return self.pheno_list_
class ParquetStudy(ParquetStudyBase):
"""
variant_file and phenotype_file are meant to be read on the fly.
Variant data will be loaded into a dictionary to avoid pandas overhead. This is the opposite default to other sibling Study classes.
variant_metadata, we'll preload.
"""
def __init__(self, variant_file, variant_metadata, phenotype_file, covariates, individuals):
super().__init__(variant_metadata, phenotype_file, covariates, individuals)
self.variant_file = variant_file
def get_variants(self, variants=None, to_pandas=False, omit_individuals=False, specific_individuals=None): return _read(self.variant_file, variants, omit_individuals, to_pandas, specific_individuals)
########################################################################################################################
class ParquetSplitStudy(ParquetStudyBase):
"""
variant_file_map is a map from chromosome numbers to parquet variant filed
phenotype_file is meant to be read on the fly.
Variant data will be loaded into a dictionary to avoid pandas overhead. This is the opposite default to other sibling Study classes.
variant_metadata, we'll preload.
"""
def __init__(self, variant_file_map, variant_metadata, phenotype_file=None, covariates=None, individuals=None):
super().__init__(variant_metadata, phenotype_file, covariates, individuals)
self.variant_file_map = variant_file_map
def get_variants(self, variants=None, to_pandas=False, omit_individuals=False, specific_individuals=None):
"""Asssumes all requested varianst in a same chromosome"""
if variants is None:
raise RuntimeError("This implementation demands a specific list of variants")
chr = variants[0].split("_")[0]
v = self.variant_file_map[chr]
return _read(v, variants, omit_individuals, to_pandas, specific_individuals)
class ParquetSingleSplitStudy(ParquetStudyBase):
"""
variant_file_map is a map from chromosome numbers to paths. Each genotype file will be opened when a variant requirement needs it.
Support only one chromosome open at any givven time.
phenotype_file is meant to be read on the fly.
Variant data will be loaded into a dictionary to avoid pandas overhead. This is the opposite default to other sibling Study classes.
variant_metadata, we'll preload.
"""
def __init__(self, variant_paths, variant_metadata, phenotype_file=None, covariates=None, individuals=None):
super().__init__(variant_metadata, phenotype_file, covariates, individuals)
self.variant_paths = variant_paths
self.last_chr = None
self.file = None
def get_variants(self, variants=None, to_pandas=False, omit_individuals=False, specific_individuals=None):
"""Asssumes all requested varianst in a same chromosome"""
if variants is None:
raise RuntimeError("This implementation demands a specific list of variants")
chr = variants[0].split("_")[0]
if chr != self.last_chr:
logging.log(9, "Loading new chromosome requirement: %s", chr)
self.last_chr = chr
path = self.variant_paths[chr]
self.file = pq.ParquetFile(path)
logging.log(9, "Loaded new chromosome requirement: %s", chr)
return _read(self.file, variants, omit_individuals, to_pandas, specific_individuals)
########################################################################################################################
def _individual_mask(individuals, specific_individuals):
if specific_individuals:
return [individuals.index(x) for x in specific_individuals]
else:
return None
def get_snps_data(annotation_row, window, snp_metadata, snp_file, specific_individuals, to_pandas=False):
features_in_window = Genomics.entries_for_gene_annotation(annotation_row, window, snp_metadata)
return features_in_window, _read(snp_file, [x for x in features_in_window.id.values], to_pandas=to_pandas, specific_individuals=specific_individuals)
def _read(file, columns=None, skip_individuals=False, to_pandas=False, specific_individuals=None):
if columns is None:
columns = file.schema.names
if not skip_individuals:
columns = ["individual"]+[x for x in columns]
if skip_individuals and specific_individuals is not None:
raise RuntimeError("Unsupported combination")
v = file.read(columns=columns)
if to_pandas:
v = v.to_pandas()
if specific_individuals is not None:
indexes = set(specific_individuals)
v = v.loc[v.individual.isin(indexes)]
else:
if specific_individuals:
mask = _individual_mask(v.column(0).to_pylist(), specific_individuals)
v = {c.name:(numpy.array(c.to_pylist(), dtype=numpy.float32) if c.name != "individual" else numpy.array(c.to_pylist(), dtype=numpy.str)) for c in v}
if specific_individuals:
v = {k:d[mask] for k,d in v.items()}
return v
def study_from_parquet(variants, variants_metadata, pheno=None, covariates=None, post_process_variants_metadata=None, chromosome=None, frequency_filter=None):
logging.info("Loading variants' parquet file")
_v = pq.ParquetFile(variants)
logging.info("Loading variants metadata")
if post_process_variants_metadata or chromosome:
f = pq.ParquetFile(variants_metadata)
_vm =[]
if chromosome:
_r = (chromosome-1,)
else:
_r = f.num_row_groups if f.num_row_groups < 22 else 22
_r = range(0, _r)
for i in _r:
logging.log(9,"Loading row group %d", i)
vmi_ = f.read_row_group(i).to_pandas()
if post_process_variants_metadata:
vmi_ = post_process_variants_metadata(vmi_)
if vmi_.shape[0] > 0:
_vm.append(vmi_)
_vm = pandas.concat(_vm)
else:
_vm = pq.ParquetFile(variants_metadata).read().to_pandas()
if frequency_filter:
_vm = _vm.loc[(_vm.allele_1_frequency > frequency_filter) & (_vm.allele_1_frequency < (1 - frequency_filter))]
if pheno:
logging.info("Loading phenotype")
p_ = pq.ParquetFile(pheno)
else:
p_ = None
individuals = _v.read(["individual"]).to_pandas()["individual"]
if covariates:
logging.info("Loading covariates")
c_ = pq.ParquetFile(covariates).read().to_pandas()
else:
c_ = None
return ParquetStudy(_v, _vm, p_, c_, individuals)
########################################################################################################################
class ParquetDataFrameSink(DataFrameSink):
def __init__(self, path, schema, compression=None):
self.path = path
self.schema = schema
self.writer = None
self.compression = compression
def __enter__(self):
self.initialize()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.finalize()
def sink(self, d):
d = _to_record_batch(d)
d = pa.Table.from_batches([d])
self.writer.write_table(d)
def initialize(self):
logging.log(9, "Initializing parquet sink")
self.writer = pq.ParquetWriter(self.path, self.schema, flavor="spark", compression=self.compression)
def finalize(self):
logging.log(9, "Finalizing parquet sink")
self.writer.close()
```
#### File: genomic_tools_lib/individual_data/Simulate.py
```python
__author__ = "<NAME>"
import numpy
import pandas
from . import Genotype, Study
_DEFAULT_SAMPLE_SIZE=500
_DEFAULT_VARIANTS_PER_CHROM=100
def reset_seed():
numpy.random.seed(0)
def random_alleles():
_map = {
"A": ["C","G"],
"T": ["C","G"],
"C": ["A", "T"],
"G": ["A", "T"]}
first = numpy.random.choice(list(_map.keys()))
second = numpy.random.choice(_map[first])
return first, second
def random_dosage(sample_size):
a = list(numpy.random.uniform(0,2.0/3, sample_size))
b = list(numpy.random.uniform(2.0/3, 4.0/3, sample_size))
c = list(numpy.random.uniform(4.0/3, 2, sample_size))
mode_ = numpy.random.choice(3)
if mode_ == 0:
pool = a*5 + b + c
elif mode_ == 1:
pool = a + b*5 + c
else:
pool = a + b + c*5
return numpy.random.choice(pool, sample_size, False)
def simulate_genotype(variants_per_chromosome=_DEFAULT_VARIANTS_PER_CHROM, sample_size=_DEFAULT_SAMPLE_SIZE):
id_ = 0
dosages = []
metadata = []
for i in range(1,23):
chromosome = i
pos_ = 1
for j in range(0, variants_per_chromosome):
id_ += 1
rsid = "rs{}".format(id_)
pos_ += numpy.random.randint(0,100)
allele_0, allele_1 = random_alleles()
dosage = random_dosage(sample_size)
frequency = numpy.mean(dosage)/2
metadata.append((chromosome, pos_, rsid, allele_0, allele_1, frequency))
dosages.append(dosage)
metadata = Genotype._metadata_from_raw(metadata)
return Genotype.Genotype(dosages, metadata)
def simulate_individuals(sample_size=_DEFAULT_SAMPLE_SIZE):
return ["ID_{}".format(i) for i in range(0,sample_size)]
def simulate_random_phenotype(individual_ids):
p = numpy.random.uniform(size=len(individual_ids))
return pandas.DataFrame({"phenotype": p})
def simulate_bslmm_phenotype(genotype, individual_ids, selected):
sample_size = len(individual_ids)
m = genotype.metadata
d = genotype.variants
pheno_b = numpy.zeros(sample_size)
for i,row in enumerate(m.itertuples()):
if row.id in selected:
coef = numpy.random.uniform(1,2, sample_size)*numpy.random.choice([1,-1])
else:
coef = numpy.random.uniform(-0.02, 0.02, sample_size)
d_ = numpy.nan_to_num(d[i])
pheno_b += d_*coef
pheno_b += numpy.random.normal(size=sample_size)
pheno_r = numpy.random.normal(size=sample_size)
pheno = pandas.DataFrame({"GID1":pheno_b, "GID2":pheno_r})
return pheno
def simulate_bslmm_study(snps_per_chromosome=_DEFAULT_VARIANTS_PER_CHROM):
snps_per_chromosome = snps_per_chromosome if snps_per_chromosome is not None else _DEFAULT_VARIANTS_PER_CHROM
genotype = simulate_genotype(snps_per_chromosome)
individuals = simulate_individuals()
gene_annotation = simulate_gencode_parsed()
selected_snps = select_snps(gene_annotation, genotype.metadata)
phenotype = simulate_bslmm_phenotype(genotype, individuals, selected_snps)
return Study.Study(genotype, phenotype, individuals), selected_snps, gene_annotation
def simulate_gencode_parsed():
d = [[1, "GID1", "A", 100, 300, "pseudogene"],
[2, "GID2", "B", 4000, 5000, "lincRNA"]]
d = pandas.DataFrame(d, columns=["chr", "gene_id", "gene_name", "start", "end", "gene_type"])
return d
def select_snps(gene_annotation, metadata):
g_ = gene_annotation[gene_annotation.gene_id == "GID1"]
c_ = g_.chr[0]
s_ = 0
e_ = g_.end[0] + 100
v_ = metadata[(metadata.chromosome == c_) & (metadata.position >= s_) & (metadata.position <= e_)]
selected_ = numpy.random.choice(range(0, v_.shape[0]), 2)
v_ = v_.iloc[selected_]
return set(v_.id)
```
#### File: summary-gwas-imputation/src/gwas_summary_imputation_postprocess.py
```python
__author__ = "<NAME>"
import os
import re
import logging
from timeit import default_timer as timer
import pandas
import numpy
from scipy import stats
from genomic_tools_lib import Logging, Utilities
from genomic_tools_lib.miscellaneous import Genomics
COLUMN_ORDER = ["variant_id", "panel_variant_id", "chromosome", "position", "effect_allele", "non_effect_allele",
# Original_build (TODO),
"current_build", "frequency", "sample_size", "zscore", "pvalue", "effect_size", "standard_error",
"imputation_status", "n_cases"]
def gwas_k(d):
r = []
for t in d.itertuples():
_r = None
try:
_r = "{}_{}".format(t.chromosome, int(t.position))
except Exception as e:
logging.log(6, "Error for {}_{}".format(t.chromosome, t.position))
r.append(_r)
return r
def process_original_gwas(args, imputed):
logging.info("Processing GWAS file %s", args.gwas_file)
g = pandas.read_table(args.gwas_file)
g = g.assign(current_build="hg38", imputation_status="original")[COLUMN_ORDER]
# Remember the palindromic snps are to be excluded from the input GWAS;
logging.info("Read %d variants", g.shape[0])
if not args.keep_all_observed:
if args.keep_criteria == "GTEX_VARIANT_ID":
g = g.loc[~ g.panel_variant_id.isin(imputed.panel_variant_id)]
elif args.keep_criteria == "CHR_POS":
g = g.assign(k = gwas_k(g))
imputed = imputed.assign(k = gwas_k(imputed))
g = g.loc[~ g.k.isin({x for x in imputed.k})]
g.drop("k", axis=1, inplace=True)
imputed.drop("k", axis=1, inplace=True)
else:
raise RuntimeError("Unsupported keep option")
logging.info("Kept %d variants as observed", g.shape[0])
g = pandas.concat([g, imputed])[COLUMN_ORDER]
logging.info("%d variants", g.shape[0])
logging.info("Filling median")
g = Genomics.fill_column_to_median(g, "sample_size", numpy.int32)
logging.info("Sorting by chromosome-position")
g = Genomics.sort(g)
logging.info("Saving")
Utilities.save_dataframe(g, args.output)
return g[["panel_variant_id"]]
def process_imputed(args):
r = re.compile(args.pattern)
files = sorted([x for x in os.listdir(args.folder) if r.search(x)])
result =[]
for i, file in enumerate(files):
logging.info("Processing imputed %s", file)
p = os.path.join(args.folder, file)
g = pandas.read_table(p)
if g.shape[0] == 0:
logging.info("Empty set of results for %s", p)
continue
g.drop(["n", "n_indep", "most_extreme_z"], axis=1, inplace=True)
g.rename(columns={"effect_allele_frequency": "frequency", "status": "imputation_status"}, inplace=True)
g = g.assign(pvalue=2 * stats.norm.sf(numpy.abs(g.zscore)), effect_size=numpy.nan, standard_error=numpy.nan,
sample_size=numpy.nan, current_build="hg38", n_cases=numpy.nan)
g = g[COLUMN_ORDER]
result.append(g)
result = pandas.concat(result)
logging.info("Processed %d imputed variants", result.shape[0])
return result
def run(args):
if os.path.exists(args.output):
logging.info("Output exists. Nope.")
return
start = timer()
logging.info("Beginning process")
imputed = process_imputed(args)
process_original_gwas(args, imputed)
end = timer()
logging.info("Finished in %s seconds", str(end - start))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Post summary imputation results")
parser.add_argument("-gwas_file",
help="GWAS file. For the moment, uniform-formatted hg38-based files are accepted.")
parser.add_argument("-folder", help="How far to extend in each direction when searching for variants")
parser.add_argument("-pattern", help="Work only with one chromosome")
parser.add_argument("-output", help="Where to save stuff")
parser.add_argument("--keep_all_observed", help="If an imputed gwas is present in the observed values",
action="store_true")
parser.add_argument("--keep_criteria",
help="Discard original entries according to match by: CHR_POS or GTEX_VARIANT_ID",
default="GTEX_VARIANT_ID", type=str)
parser.add_argument("-parsimony",
help="Log verbosity level. 1 is everything being logged. 10 is only high level messages, above 10 will hardly log anything",
default="10")
args = parser.parse_args()
Logging.configure_logging(int(args.parsimony))
run(args)
```
#### File: summary-gwas-imputation/src/models_from_gene_snp.py
```python
__author__ = "<NAME>"
import os
import logging
import pandas
import sqlite3
import pyarrow.parquet as pq
from genomic_tools_lib import Utilities, Logging
from genomic_tools_lib.individual_data import Utilities as StudyUtilities
from genomic_tools_lib.miscellaneous import Models
from genomic_tools_lib.file_formats import Miscellaneous
def get_weights(x_weights, id_whitelist=None):
if x_weights[1] == "PIP":
w = Miscellaneous.dapg_signals(x_weights[0], float(x_weights[2]), id_whitelist)
w = w.rename(columns={"gene":"gene_id", "pip":"w", "variant_id":"id"})
else:
raise RuntimeError("unsupported weights argument")
return w
def run(args):
if os.path.exists(args.output):
logging.info("output exists already, delete it or move it")
return
logging.info("Starting")
Utilities.ensure_requisite_folders(args.output)
logging.info("Loading data annotation")
gene_annotation = StudyUtilities.load_gene_annotation(args.gene_annotation)
gene_annotation = gene_annotation.rename({"gene_name":"genename"}, axis=1)[["gene_id", "genename", "gene_type"]]
logging.info("Loading variant annotation")
features_metadata = pq.read_table(args.features_annotation).to_pandas()
logging.info("Loading spec")
weights = get_weights(args.spec)
w = weights.merge(features_metadata[["id", "allele_0", "allele_1", "rsid"]], on="id", how="left")
w = w.rename({"allele_0":"ref_allele", "allele_1":"eff_allele", "id":"varID"}, axis=1)
w["gene"] = w.gene_id.str.cat(w.cluster_id.astype(str), sep="_")
w = w.drop(["w", "cluster_id"], axis=1)
w = w.sort_values(by="gene").assign(weight = 1)
logging.info("Building models")
with sqlite3.connect(args.output) as conn:
w.drop("gene_id", axis=1).fillna("NA")[["gene", "rsid", "varID", "ref_allele", "eff_allele", "weight"]].to_sql("weights", conn, index=False)
e = w[["gene_id", "gene"]].merge(gene_annotation, on="gene_id").drop("gene_id", axis=1)
e["n_snps_in_window"] = None
e["n.snps.in.model"] = 1
e["pred.perf.pval"] = None
e["pred.perf.qval"] = None
e["pred.perf.R2"] = None
e = e[["gene", "genename", "gene_type", "n_snps_in_window", "n.snps.in.model", "pred.perf.R2", "pred.perf.pval", "pred.perf.qval"]]
e.to_sql("extra", conn, index=False)
Models.model_indexes(conn)
logging.info("Finished")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Train Elastic Net prediction models from GLMNET")
parser.add_argument("-spec", nargs="+")
parser.add_argument("-gene_annotation")
parser.add_argument("-features_annotation")
parser.add_argument("-output")
parser.add_argument("-parsimony", default=10, type=int)
args = parser.parse_args()
Logging.configure_logging(args.parsimony)
run(args)
```
#### File: summary-gwas-imputation/src/model_training_variable_to_parquet.py
```python
__author__ = "<NAME>"
import logging
from timeit import default_timer as timer
from genomic_tools_lib import Logging
from genomic_tools_lib import Utilities
from genomic_tools_lib.file_formats import ModelTraining, Parquet
def run(args):
start = timer()
Utilities.ensure_requisite_folders(args.parquet_output)
logging.info("Loading variable")
variables = ModelTraining.load_variable_file(args.variable_file)
logging.info("Saving")
Parquet.save_variable(args.parquet_output, variables)
end = timer()
logging.info("Finished in %s", str(end-start))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Convert model training format data to parquet format ")
parser.add_argument("-variable_file", help="Folder where genotype files are")
parser.add_argument("-parquet_output", help="Parquet file to save")
parser.add_argument("-parsimony", help="Log parsimony level. 1 is everything being logged. 10 is only high level messages, above 10 will hardly log anything", default = "10")
args = parser.parse_args()
Logging.configure_logging(int(args.parsimony))
run(args)
```
#### File: summary-gwas-imputation/src/parse_dapg.py
```python
__author__ = "<NAME>"
import logging
import re
import gzip
import pandas
from genomic_tools_lib import Utilities, Logging
def get_gene_whitelist(args):
g = pandas.read_table(args.gencode)
return {x for x in g.gene_id.values}
model_re = re.compile("\s+(?P<rank>\d+)\s+(?P<mpp>[-+]?\d*\.\d+e[-+]?\d+)\s+(?P<n>\d+)\s+(?P<posterior_score>[-+]?\d*\.\d+|\d+)\s+\[(?P<variants>.*)\]\n$")
def parse_model_line(name, s):
return "{}\t{}\t{}\t{}\t{}\n".format(name, s.group("rank"), s.group("n"), s.group("mpp"), s.group("posterior_score"))
def parse_model_line_for_variant(name, s):
v = s.group("variants").split("] [")
if v[0] == "NULL":
return None
return ["{}\t{}\t{}\n".format(name, s.group("rank"), x) for x in v]
model_expected_size_re = re.compile("Posterior expected model size: (?P<p>[-+]?\d*\.\d+|\d+) \(sd = (?P<pse>[-+]?\d*\.\d+|\d+)\)\n$")
def parse_expected_size(s):
return s.group("p"), s.group("pse")
lognc_re = re.compile("LogNC = (?P<lognc>[-+]?\d*\.\d+|\d+) \( Log10NC = (?P<log10nc>[-+]?\d*\.\d+|\d+) \)\n$")
def parse_log_10_nc(s):
return s.group("lognc"), s.group("log10nc")
variant_re = re.compile("\(\((?P<variant_rank>\d+)\)\)\s+(?P<variant_id>[^\s]+)\s+(?P<variant_pip>[-+]?\d*\.\d+e[-+]?\d+)\s+(?P<variant_log10abvf>[-+]?\d*\.\d+|\d+)\s+(?P<cluster_id>\d+)\n$")
def parse_variant_line(s):
return s.group("variant_rank"), s.group("variant_id"), s.group("variant_pip"), s.group("variant_log10abvf"), s.group("cluster_id")
cluster_re = re.compile("\s+\{(?P<cluster_id>\d+)\}\s+(?P<n>\d+)\s+(?P<cluster_pip>[-+]?\d*\.\d+e[-+]?\d+)\s+(?P<r2>[-+]?\d*\.\d+|\d+)\s+(?P<correlation>.*)\n$")
def parse_cluster_line(s):
return s.group("cluster_id"), s.group("n"), s.group("cluster_pip"), s.group("r2")
def run(args):
logging.info("Processing...")
Utilities.ensure_requisite_folders(args.output_prefix)
spec = Utilities.file_logic(args.input_folder, args.input_pattern)
with gzip.open(args.output_prefix+ ".models.txt.gz", mode="w") as models:
models.write("gene\tmodel\tn\tpp\tps\n".encode())
with gzip.open(args.output_prefix + ".models_variants.txt.gz", mode="w") as model_variants:
model_variants.write("gene\tmodel\tvariant\n".encode())
with gzip.open(args.output_prefix+ ".model_summary.txt.gz", mode="w") as model_summary:
model_summary.write("gene\tpes\tpes_se\tlog_nc\tlog10_nc\n".encode())
with gzip.open(args.output_prefix+ ".variants_pip.txt.gz", mode="w") as variant_pip:
variant_pip.write("gene\trank\tvariant_id\tpip\tlog10_abf\tcluster_id\n".encode())
with gzip.open(args.output_prefix + ".clusters.txt.gz", mode="w") as clusters:
clusters.write("gene\tcluster\tn_snps\tpip\taverage_r2\n".encode())
with gzip.open(args.output_prefix + ".cluster_correlations.txt.gz", mode="w") as cluster_correlations:
cluster_correlations.write("gene\tid1\tid2\tvalue\n".encode())
for i,t in enumerate(spec.itertuples()):
logging.log(9, "Processing %s", t.name)
written = set()
with open(t.path) as dap:
p, pse, lognc, log10nc = None, None, None, None
for l in dap:
s = model_re.search(l)
if s:
ml = parse_model_line(t.name, s)
models.write(ml.encode())
vl = parse_model_line_for_variant(t.name, s)
if vl:
for vl_ in vl:
model_variants.write(vl_.encode())
continue
s = model_expected_size_re.search(l)
if s:
p, pse = parse_expected_size(s)
continue
s = lognc_re.search(l)
if s:
lognc, log10nc = parse_log_10_nc(s)
model_summary.write("{}\t{}\t{}\t{}\t{}\n".format(t.name, p, pse, lognc, log10nc).encode())
continue
s = variant_re.search(l)
if s:
rank, id, pip, log10_abvf, cluster_id = parse_variant_line(s)
variant_pip.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(t.name, rank, id, pip, log10_abvf, cluster_id).encode())
continue
s = cluster_re.search(l)
if s:
id, n, pip, r2 = parse_cluster_line(s)
clusters.write("{}\t{}\t{}\t{}\t{}\n".format(t.name, id, n, pip, r2).encode())
_id1 = int(id)
comps = s.group("correlation").strip().split()
for _id2 in range(1, len(comps)+1):
if (_id1,_id2) in written or (_id2,_id1) in written:
continue
comp = comps[_id2-1]
cluster_correlations.write("{}\t{}\t{}\t{}\n".format(t.name, _id1, _id2, comp).encode())
written.add((_id1,_id2))
logging.info("Finished")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Patch gene name column")
parser.add_argument("-input_folder", help="Where to load file from")
parser.add_argument("-input_pattern", help="Name pattern")
parser.add_argument("-output_prefix", help="Where to save")
parser.add_argument("-parsimony", help="Logging parsimony", type=int, default=10)
args = parser.parse_args()
Logging.configure_logging(args.parsimony)
run(args)
```
#### File: summary-gwas-imputation/src/parse_db_snp.py
```python
__author__ = "<NAME>"
import os
import logging
from timeit import default_timer as timer
from genomic_tools_lib import Logging, Utilities
from genomic_tools_lib.file_formats import DBSnp
def run(args):
if (not args.output and not args.output_blacklist) or (args.output and args.output_blacklist):
logging.info("Provide only one output argument")
return
if args.output and os.path.exists(args.output):
logging.info("Output path %s exists. Nope.", args.output)
return
if args.output_blacklist and os.path.exists(args.output_blacklist):
logging.info("Output path for skipped variants %s exists. Nope.", args.output_blacklist)
return
start = timer()
logging.info("Started parsing DB SNP file")
if args.output:
Utilities.ensure_requisite_folders(args.output)
entries = Utilities.lineify(DBSnp.generate(args.input, args.fields, args.keep_zero_based, recode_observed=args.recode_observed))
Utilities.write_iterable_to_file(entries, args.output, Utilities.to_line(args.fields))
else:
Utilities.ensure_requisite_folders(args.output_blacklist)
entries = Utilities.lineify(DBSnp.generate_skips(args.input, args.fields, args.keep_zero_based, recode_observed=args.recode_observed))
Utilities.write_iterable_to_file(entries, args.output_blacklist, Utilities.to_line(args.fields+ ["reason"]))
end = timer()
logging.info("Finished parsing at %s", str(end-start))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("parse/convert dbsnp file")
parser.add_argument("-keep_zero_based", action="store_true")
parser.add_argument("-input", help="input DB SNP file")
parser.add_argument("-output", help="output DB SNP file")
parser.add_argument("-output_blacklist", help="output variants to skip from DB SNP file")
parser.add_argument("-fields", help="fields to extract", default=["chromosome", "start", "end", "name"], nargs="+")
parser.add_argument("--recode_observed", action="store_true")
parser.add_argument("-parsimony", help="log output parsimony", type=int, default=logging.INFO)
args = parser.parse_args()
Logging.configure_logging(args.parsimony)
run(args)
```
#### File: summary-gwas-imputation/src/patch_gene_name.py
```python
__author__ = "<NAME>"
import logging
import pandas
from genomic_tools_lib import Utilities, Logging
from genomic_tools_lib.data_management import KeyedDataSource
def run(args):
r_ = pandas.read_csv if ".csv" in args.input else pandas.read_table
sep = "," if ".csv" in args.output else "\t"
logging.info("Loading gene table")
g = KeyedDataSource.load_data(args.gene_table, "gene_id", "gene_name")
logging.info("Loading input")
i = r_(args.input)
gene_name = []
for t in i.itertuples():
gene_name.append(g[t.gene])
i["gene_name"] = gene_name
logging.info("saving")
Utilities.save_dataframe(i, args.output, sep=sep)
logging.info("Done")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Patch gene name column")
parser.add_argument("-input", help="Where to load file from")
parser.add_argument("-output", help="Where to save")
parser.add_argument("-gene_table", help="Which types of genes to keep")
parser.add_argument("-verbosity", help="Logging verbosity (actually loquacity)", type=int, default=10)
args = parser.parse_args()
Logging.configure_logging(args.verbosity)
run(args)
```
#### File: summary-gwas-imputation/src/run_coloc.py
```python
__author__ = "<NAME>"
import pandas
import os
import logging
from timeit import default_timer as timer
from genomic_tools_lib import Logging, Utilities
from genomic_tools_lib.external_tools.coloc import Coloc
def run(args):
Coloc.initialize(args.coloc_script)
if os.path.exists(args.output):
logging.info("Output exists. Nope.")
return
start = timer()
logging.info("Loading gwas")
gwas = Coloc.read_gwas(args.gwas, args.gwas_sample_size, args.gwas_mode)
streamer = Coloc.eqtl_streamer(args.eqtl, gwas)
results = []
logging.info("Beggining process")
MAX_N=args.MAX_N
for i, d in enumerate(streamer):
gene = d.gene_id.values[0]
logging.log(9, "Processing gene %s", gene)
eqtl = Coloc.get_eqtl(d, args.eqtl_sample_size, args.eqtl_mode)
r = Coloc.coloc_on_gwas_eqtl(gene, gwas, eqtl, args.gwas_mode, args.eqtl_mode, args.p1, args.p2, args.p12)
results.append(r)
if MAX_N and i > MAX_N:
logging.info("Early exit")
break
logging.info("Saving")
results = Coloc.results_to_dataframe(results)
Utilities.ensure_requisite_folders(args.output)
Utilities.save_dataframe(results, args.output)
end = timer()
logging.info("Finished COLOC in %s seconds" % (str(end - start)))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Run COLOC")
parser.add_argument("-coloc_script", help="Optional override of R's coloc infrastructure")
parser.add_argument("-gwas", help="Which gwas to run")
parser.add_argument("-gwas_mode", help="options in [pvalue, bse, zscore_1]", default="bse")
parser.add_argument("-eqtl", help="Which eQTL to run")
parser.add_argument("-eqtl_mode", help="options in [pvalue, bse, zscore_1]", default="bse")
parser.add_argument("-gwas_sample_size", help="either 'FROM_GWAS' (default) or integer sample size", default="FROM_GWAS")
parser.add_argument("-eqtl_sample_size", help="eQTL number of samples", type=int)
parser.add_argument("-p1", type=float, default=1e-4)
parser.add_argument("-p2", type=float, default=1e-4)
parser.add_argument("-p12", type=float, default=1e-5)
parser.add_argument("-output", help="Folder where torus output will be written to. Must not be a subfolder of intermediate.")
parser.add_argument("-keep_intermediate_folder", help="Wether to keep torus intermediate stuff", action="store_true")
parser.add_argument("-snp_annotation_from_parquet_metadata", help="Load a genotype study metadata (parquet format) for the required information")
parser.add_argument("-parsimony", help="Log verbosity level. 1 is everything being logged. 10 is only high level messages, above 10 will hardly log anything", default = "10")
parser.add_argument("-MAX_N", type=int)
args = parser.parse_args()
Logging.configure_logging(int(args.parsimony))
run(args)
```
#### File: summary-gwas-imputation/src/simulate_study.py
```python
__author__ = "<NAME>"
import os
from genomic_tools_lib import Logging, Utilities
from genomic_tools_lib.individual_data import Simulate
from genomic_tools_lib.file_formats import BIMBAM, Parquet, SBAM
def save_study(study, selected_snps, simulated_gencode, prefix, _save):
Utilities.ensure_requisite_folders(prefix)
_save(study)
selected_snps_ = prefix + ".selected_snps.txt.gz"
Utilities.write_iterable_to_file(selected_snps, selected_snps_)
gencode_path = os.path.join(os.path.split(prefix)[0], "gene_annotation.txt.gz")
Utilities.save_dataframe(simulated_gencode, gencode_path)
def run(args):
if not (args.bimbam_output_prefix or args.parquet_output_prefix or args.sbam_output_folder):
raise RuntimeError("Need output argument")
#reproducibility. Add argument for different seed.
Simulate.reset_seed()
study, selected_snps, gene_annotation = Simulate.simulate_bslmm_study(args.snps_per_chromosome)
if args.bimbam_output_prefix:
_save = lambda study: BIMBAM.save_study(study, args.bimbam_output_prefix)
save_study(study, selected_snps, gene_annotation, args.bimbam_output_prefix, _save)
if args.parquet_output_prefix:
_save = lambda study: Parquet.save_study(study, args.parquet_output_prefix)
save_study(study, selected_snps, gene_annotation, args.parquet_output_prefix, _save)
if args.sbam_output_folder:
_save = lambda study: SBAM.save_study(study, args.sbam_output_folder)
save_study(study, selected_snps, gene_annotation, os.path.join(args.sbam_output_folder, "_"), _save)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Generate Simulated Study")
parser.add_argument("-bimbam_output_prefix", help="prefix for BIMBAM output")
parser.add_argument("-parquet_output_prefix", help="prefix for PARQUET output")
parser.add_argument("-sbam_output_folder", help="prefix for PARQUET output")
parser.add_argument("-snps_per_chromosome", help="How many snps simulate per chromosome", type=int)
parser.add_argument("-verbosity", help="Log verbosity level. 1 is everything being logged. 10 is only high level messages, above 10 will hardly log anything", default = "10")
args = parser.parse_args()
Logging.configure_logging(int(args.verbosity))
run(args)
```
#### File: summary-gwas-imputation/src/slice_gwas_by_region.py
```python
__author__ = "<NAME>"
import os
import logging
import numpy
import pandas
from genomic_tools_lib import Utilities, Logging
def run(args):
if os.path.exists(args.output):
logging.info("%s exists. Nope.", args.output)
return
logging.info("Loading regions")
regions = pandas.read_table(args.region_file).rename(columns={"chr":"chromosome"})
regions.dropna(inplace=True)
regions.start = regions.start.astype(int)
regions.stop = regions.stop.astype(int)
logging.info("Loading gwas")
gwas = pandas.read_table(args.gwas_file, usecols=["panel_variant_id", "chromosome", "position", "zscore"])
gwas.dropna(inplace=True)
logging.info("Processing")
sliced = []
for i,region in enumerate(regions.itertuples()):
logging.log(8, "Processing region %d", i+1)
if numpy.isnan(region.start) or numpy.isnan(region.stop) or \
(type(region.chromosome) != str and numpy.isnan(region.chromosome)):
logging.log(8, "skipping incomplete region")
continue
slice = gwas[(gwas.chromosome == region.chromosome) & (gwas.position >= region.start) & (gwas.position < region.stop)]
slice = slice.sort_values(by = "position")
if slice.shape[0] == 0:
continue
slice = slice.assign(region = "region-{}-{}-{}".format(region.chromosome, region.start, region.stop), r=i)
slice = slice[["panel_variant_id", "region", "r", "zscore"]]
sliced.append(slice)
sliced = pandas.concat(sliced).sort_values(by="r")
if args.output_format == "dapg":
sliced.region = sliced.r.apply(lambda x: "region{}".format(x))
sliced = sliced.drop(["r"], axis=1)
Utilities.save_dataframe(sliced, args.output, header=False)
elif args.output_format == "gtex_eqtl":
sliced = sliced.assign(gene_id = sliced.region, variant_id=sliced.panel_variant_id, tss_distance = numpy.nan, ma_samples = numpy.nan, ma_count= numpy.nan, maf = numpy.nan, pval_nominal = numpy.nan, slope= sliced.zscore, slope_se=1)
sliced = sliced[["gene_id", "variant_id", "tss_distance", "ma_samples", "ma_count", "maf", "pval_nominal", "slope", "slope_se"]]
Utilities.save_dataframe(sliced, args.output, header=True)
logging.info("Finished slicing gwas")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Group gwas results by region")
parser.add_argument("-region_file", help="Non-overlapping regions")
parser.add_argument("-gwas_file", help="GWAS file, in fixed format (imputed) for now")
parser.add_argument("-output", help="Where to save the result")
parser.add_argument("-parsimony", help="How much logging to output", type=int, default=10)
parser.add_argument("--output_format", default="dapg")
args = parser.parse_args()
Logging.configure_logging(args.parsimony)
run(args)
``` |
{
"source": "jhcknzzm/SSFL-Benchmarking-Semi-supervised-Federated-Learning",
"score": 3
} |
#### File: SSFL-Benchmarking-Semi-supervised-Federated-Learning/models/EMNIST_test.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
from EMNIST_model import *
# Training settings
batch_size = 64
train_dataset = datasets.EMNIST(root='./data/',
train=True,
transform=transforms.ToTensor(),split='byclass',
download=True)
test_dataset = datasets.EMNIST(root='./data/',
train=False,split='byclass',
transform=transforms.ToTensor())
# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
model = Net()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
def train(epoch):
for batch_idx, (data, target) in enumerate(train_loader):
output = model(data)
#output:64*10
loss = F.nll_loss(output, target)
if batch_idx % 200 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
optimizer.zero_grad()
loss.backward()
optimizer.step()
``` |
{
"source": "jhclark/groupify",
"score": 4
} |
#### File: jhclark/groupify/groupify.py
```python
from __future__ import print_function
from __future__ import division
import sys
import random
WHO_FILE = "who.txt"
def usage():
print("Usage: groupify <max_per_group>")
print()
print("First, place the members of your group who will be attending in who.txt")
def load_who():
with open(WHO_FILE) as f:
who = f.readlines()
who = [ x.strip() for x in who ]
return who
def groupify(who, num_groups):
random.seed()
random.shuffle(who)
groups = [ [] for _ in range(num_groups) ]
for i in range(len(who)):
groups[i%num_groups].append(who[i])
return groups
def main(*args):
if len(args) != 1:
usage()
sys.exit(1)
who = load_who()
max_per_group = int(args[0])
num_groups = 1
while len(who) / num_groups > max_per_group:
num_groups += 1
groups = groupify(who, num_groups)
for i in range(len(groups)):
print("Group {}: {}".format(i+1, ", ".join(groups[i])))
if __name__ == "__main__":
main(*sys.argv[1:])
``` |
{
"source": "jhcode14/Costco_Recorder",
"score": 3
} |
#### File: jhcode14/Costco_Recorder/Data_Analysis.py
```python
import re
from ItemData import ItemData
import requests
#setup values
Data = [ItemData]
def loadAndAnalysis(link):
HEADERS = {'user-agent': ('Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:79.0) Gecko/20100101 Firefox/79.0')}
# Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:79.0) Gecko/20100101 Firefox/79.0
http = requests.get(link,timeout=5, headers=HEADERS)
if http.status_code == 200:
print('Success')
elif http.status_code == 404:
print('fail')
else:
print("didnt work at all")
filename = "html_temp.md"
file_object = open(filename,"w+")
file_object.write(http.text)
file_object.close
php1_lines = []
with open (filename, "rt") as myfile:
for line in myfile:
php1_lines.append(line)
#catagorize things in order
line_number = 6000
temp_list = []
lnlist = 0
arOrder = 0
title = ""
for line in php1_lines:
if line.find("<title>") != -1:
title = line.rstrip().strip("<title>").strip("| Costco</title>").strip()
#store type (0) line number (1) and link of product (2) in temporory list due to complications with sorting everything at once
countIndex = 0
for line in php1_lines[6000:]:
line_number +=1
if line.find('<div class="product-tile-set" data-set-same-height data-pdp-url=') != -1:
temp_list.append([title,line_number,line.rstrip().replace('<div class="product-tile-set" data-set-same-height data-pdp-url=','').strip().strip(">").strip('"').strip('" item-index="'+str(countIndex))])
countIndex += 1
if lnlist == 0:
lnlist = line_number
#add name and price of the product to temp array
#would be great to add spec later
for line in php1_lines[lnlist:]:
#add price (3)
if line.find('<div class="price" id="price') != -1:
temp_list[arOrder].extend([re.sub(r'^.+?"DEFAULT">',"", line).replace("</div>","")])
#add name of the product (4)
if line.find('<a href="https://www.costco.com/') != -1:
temp_list[arOrder].extend([re.sub(r'^.+?html">',"", line).replace("</a>","")])
arOrder +=1
if len(temp_list) == arOrder:
break
#print('testing', list_of_results[1][2])
for elem in temp_list:
#print('type = ', elem[0], 'line numb = ', elem[1], 'link = ', elem[2], 'price= ', elem[3], 'product name= ', elem[4])
#print("--------------------------------------------------------------------------------")
Data.append(ItemData(elem[0], elem[4], elem[3], elem[2]))
def getTitle():
return Data[1].getType
def getData():
return Data
```
#### File: jhcode14/Costco_Recorder/GUI1.py
```python
import tkinter as tk
"""
from tkinter import *
from tkinter.filedialog import askopenfilename
from tkinter import ttk
from tkinter.ttk import *
"""
#gui 1 when requesting for new link or view old recordings
class Application1(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.pack()
self.create_widgets()
def create_widgets(self):
self.label = tk.Label(self, text="""
Welcome to Costco_Recorder made by <NAME>
this program will record and analyze the costco
item catagory you entered! Here are 3 ways to
use this program:
1. To create a fresh costco recording:
Enter costco catagory link and press "Analyze"
2. To look at old, analyzed costco_recorder file:
Press "Analyze" directly
3. To Combine new link with old analyzed files:
Enter link, press "Analyze" and in the next
page please go to menu on top left and select
old file by pressing "Open" """)
self.label.pack()
"""
self.load = tk.Button(self)
self.load["text"] = "Enter"
self.load["command"] = self.pressed
self.load.pack(side="right")
"""
self.contents = tk.StringVar()
self.entry = tk.Entry(self)
self.entry.pack(side="left")
self.entry["textvariable"] = self.contents
self.quit = tk.Button(self, text="Analyze", fg="red",
command=self.master.destroy)
self.quit.pack(side="bottom")
def pressed(self):
if self.contents.get() != "":
print ("you have entered:", self.contents.get(), "\nPress Analyze to continue")
else:
print("please enter a costco website catagory")
def getLink(self):
if self.contents.get() != "":
return self.contents.get()
else:
return "readfile"
class runtk1:
def callGUI():
root = tk.Tk()
app = Application1(master=root)
app.master.title("Costco Recorder Start-up")
root.geometry("350x260")
app.mainloop()
return app.getLink()
#for testing
#runtk1.callGUI()
```
#### File: jhcode14/Costco_Recorder/ItemData.py
```python
class ItemData:
def __init__(self, type1, title, price, link):
self.type1 = type1
self.title = title
#self.spec = spec
self.price = price
self.link = link
@property
def getType(self):
return self.type1
@property
def getTitle(self):
return self.title
@property
def getSpec(self):
return self.spec
@property
def getPrice(self):
return self.price
@property
def getLink(self):
return self.link
@property
def setType(self, text):
self.type1 = text
@property
def setTitle(self, text):
self.title = text
@property
def setSpec(self, text):
self.spec = text
@property
def setPrice(self, text):
self.price = text
@property
def setLink(self, text):
self.link = text
``` |
{
"source": "jhcode14/Task_Manager",
"score": 4
} |
#### File: Task_Manager/my_module/functions.py
```python
from datetime import datetime as dt
# For my_script.py, MUST comment/hide when running test_function.py
from my_module.classes import Manager
# For test_function.py, MUST comment/hide when running my_script.py
#from classes import Manager
import os
def create_task():
"""
Function create_task():
Description: Prompt the user simple print and input questions,
containing task name, detail, due date(optional), user (optional)
, and catagory (optional). The info recorded will be processed to
form a Manager type data.
In-take Variables: n/a
Returned Variable: new_task (in the format of Manager class)
"""
print("Please enter the task info")
input_name = input('|ENTER Task Name|: ')
while not input_name:
input_name = input('|ENTER Task Name|: ')
input_detail = input('|ENTER Detail of Task|: ')
while not input_detail:
input_detail = input('|ENTER Detail of Task|: ')
input_due = input('|(Optional)ENTER Due Date or press ENTER to skip|: ')
if not input_due:
input_due = 'n/a'
input_user = input('|(Optional)ENTER User Name or press ENTER to skip|: ')
if not input_user:
input_user= 'n/a'
input_catagory = input('|(Optional)ENTER Catagory or press ENTER to skip|: ')
if not input_catagory:
input_catagory = 'n/a'
new_task = Manager(input_name, input_detail, dt.now(), input_due, input_user, input_catagory)
return new_task
def delete_num(order, tasks, num_to_delete):
"""
Function delete_num(order, tasks, num_to_delete):
Description: Delete the given number in order list, and remove the key
and value in dictionary tasks, where the key = number given. Since the
order list is in order (i.e. 1,2,3,4,5), whichever number removed will
cause the number list and dictionary to shift and adjust so the new
edited list will remain in order without skipping.
In-take Variable: order (list), tasks (dictionary), num_to_delete (int)
Returned Variable: order, tasks
"""
order = order[:-1]
del tasks[num_to_delete]
#proceeds to shift all tasks' keys by n-1 if num_to delete is not at the end
shift_num = num_to_delete
while shift_num + 1 in tasks.keys():
tasks[shift_num] = tasks[shift_num + 1]
shift_num += 1
return order , tasks
# Print all tasks as a list (without detail)
def print_tasks_as_list(order,tasks):
"""
Function print_tasks_as_list(order, tasks):
Description: Takes in order and tasks, date created and task name will be
printed out as a list.
In-take Variables: order (list), tasks (dictionary)
Returned Variable: n/a
"""
for num in order:
print(str(num) + tasks[num].return_min_info())
# Print all the tasks info provided
def print_tasks(order, tasks):
"""
Function print_tasks(order, tasks):
Description: Takes in order and tasks, all tasks will be printed
out in the order of the order list.
In-take Variables: order (list), tasks (dictionary)
Returned Variable: n/a
"""
for num in order:
info_list = tasks[num].return_info()
print("-----[Task No. " + str(num) + "]-----")
for content in info_list:
print(content)
# Used when program starts to re-open files saved from previous session
def read_file(file_name):
"""
Function read_file(file_name):
Description: Process the file of given file_name to read the saved data
, the data will be processed and form "order" list and "tasks" dictionary
these two variables will be returned.
In-take Variable: file_name (str)
Returned Variable: order (list), tasks (dictionary)
"""
order = []
tasks = {}
# If no previous data found, return a empity list and dictionary
if not os.path.exists("../"+file_name):
return order, tasks
# For counting where the start of each dataset is located
order_increment = lambda num: num+8 # The start of each dataset is 8 lines apart
order_num = 2 # The start of first dataset is at line 2
process_task = False # True when the start of a new dataset is found
# Start of processing data file
with open("../"+file_name, "rt") as file:
for num, line in enumerate(file,1):
line = line.strip("\n")
# After the line locating # for "order" list is found,
# This will be used to create a new task for "tasks" dict.
if process_task:
if line == "---":
new_task = Manager(temp[0], temp[1], temp[2],
temp[3],temp[4], temp[5])
tasks.setdefault(order[-1], new_task)
process_task = False
else:
temp.append(line)
# Test 1: make sure first line is in right format
elif num == 1 and not "FILE DATA" in line:
print("Inocorrect Data Format")
break
# Append data for "order" list
elif num == order_num:
order.append(int(line))
order_num = order_increment(num)
process_task = True
temp = []
# For Manual Testing:
#print(order)
#print(tasks)
print("*Previous saved data found and opened* \n")
return order, tasks
# Used before program closes to keep the tasks created
def write_file(file_name, order, tasks):
"""
Function write_file(file_name, order, tasks):
Description: Record the given order/tasks data to a .md file with given name.
If old file exists, and order is now empty, old file will be removed, if old
file does not exist, and old file is empty, nothing will be saved.
In-take Variable: file (str), order (list), tasks (dictionary)
Returned Variable: n/a
"""
# If there aren't things to be saved:
if not order:
if not os.path.exists("../"+file_name):
return
os.remove("../"+file_name) # Old file removed if order list is empty
return
# If there are things to be saved:
file = open("../"+file_name, "w")
file.write("FILE DATA | Warning: manually editing may result in file error\n")
for num in order:
file.write(str(num)+"\n")
info_list = tasks[num].return_raw_info()
for content in info_list:
file.write(content+"\n")
file.write("---\n")
file.close
``` |
{
"source": "jhconning/DevII",
"score": 3
} |
#### File: DevII/notebooks/coase.py
```python
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fsolve
Pc = 4
Pw = 8
c = 1/2
d = 1/2
def F(x,P=Pc,c=c):
'''Cattle Profit Function'''
return P*x - c*x**2
def AG(x, P=Pw):
'''Wheat farm profit before crop damage'''
return P*(x**0) # to return an array of len(x)
def AGD(x,P=Pw,d=d):
'''Wheat farm profit after damage'''
return AG(x,P) - d*x**2
def copt(P=Pc,c=c):
'''rancher private optimum'''
return P/(2*c)
def topt(P=Pc,c=c, d=d):
'''Social effient optimum'''
return P/(2*(c+d))
CE, TE = copt(),topt()
def coaseplot1():
xx = np.linspace(0,6,100)
fig = plt.subplots(figsize=(12,8))
plt.plot(xx, F(xx), label = 'Rancher Profit' )
plt.plot(xx, AG(xx), '--', label = 'Farm w/ no cattle damage' )
plt.plot(xx, AGD(xx), label = 'Farm w/ cattle damage')
plt.plot(xx, F(xx) + AGD(xx),label='Sum of both activities')
plt.scatter(copt(),F(copt()))
plt.scatter(copt(), 0)
plt.scatter(topt(),F(topt()) + AGD(topt()))
plt.grid()
plt.xlim(0,6)
plt.ylim(0,14)
plt.xlabel('x -- head of cattle', fontsize=18)
plt.ylabel('Benefits/Profit', fontsize=18)
plt.legend(fontsize=14);
def MC(x,c=1/2):
'''Cattle MC'''
return 2*c*x
def excost(x,d=1/2):
return 2*d*x
def coaseplot2(Pw=Pw, Pc=Pc):
xx = np.linspace(0,6,100)
fig = plt.subplots(figsize=(12,9))
plt.axhline(Pc);
plt.plot(xx, MC(xx), label = 'Rancher PMC' )
plt.plot(xx, MC(xx)+excost(xx), label = 'SMC')
plt.fill_between(xx, MC(xx)+excost(xx),Pc*xx**0, where=((MC(xx)<=Pc*xx**0) & (xx>2)),
facecolor='green', alpha=0.2, label='DWL')
plt.text(3,5,'DWL' , fontsize=15)
plt.text(5,3.5,r'$SMB = P_C$', fontsize=15)
plt.text(5,5.5, r'$PMC$', fontsize=15)
plt.text(4.5,8.5, r'$SMC$', fontsize=15)
#plt.scatter(topt(),G(topt()) + AGD(topt()))
plt.grid()
plt.xlim(0,6)
plt.ylim(0,10)
plt.yticks(np.arange(0, 10, 1))
plt.xlabel('x -- head of cattle')
plt.ylabel('Benefits/Profit')
plt.legend();
# #### Code for land example
A=1
def FT(T, A=A):
return A*np.sqrt(T)
def MVPT(P,T,A=A):
return A*P/T**(1/2)
def LD(P,r,A=A):
return (P*A/r)**2
A=1
Tbar = 10 # Total land endowment
P = 5.5 # Price of output
cl = 3 # cost of clearing land
def req(P, cl, Tb=Tbar, N=2, A=A):
'''equilibrium rental rate'''
def landemand(r):
return N*(A*P/r)**2 - Tb
return fsolve(landemand, 1)[0]
def mopt(P,cl,A=A):
'''Optimum land use for each i at the P*MPT = max(cl,r)'''
r = req(P,cl)
ru = max(cl, r)
return (A*P/ru)**2
mopt(P,cl), MVPT(P, mopt(P,cl) )
def landmarket(P, cl, title, A=A):
t = np.linspace(0.1,Tbar-0.1, 2*Tbar)
fig = plt.subplots(figsize=(12,8))
x0 = mopt(P,cl,A=A)
plt.ylim(0,5)
#plt.axhline(cl,linestyle=':')
plt.axhline(max(cl,req(P,cl,A=A)),linestyle='--')
plt.axhline(cl,linestyle=':')
plt.plot(t,MVPT(P,t))
plt.text(8, MVPT(P,8),r'$P \cdot F_T(T)$', fontsize=18)
plt.text(1, MVPT(P,Tbar-1),r'$P \cdot F_T(\bar T - T)$', fontsize=18)
plt.xlabel('T -- land use', fontsize=18)
plt.ylabel('MVPT', fontsize=18)
plt.scatter(x0, MVPT(P,x0))
plt.scatter(Tbar-mopt(P,cl),MVPT(P,x0))
plt.plot([x0,x0],[0,MVPT(P,x0)],':')
plt.plot([Tbar-x0,Tbar-x0],[0,MVPT(P,x0)],':')
plt.plot(t,MVPT(P,Tbar - t))
plt.plot(t,MVPT(P,Tbar-t))
plt.title(title, fontsize=18)
plt.xlim(0,Tbar);
```
#### File: DevII/notebooks/farmhh.py
```python
import matplotlib.pyplot as plt
from ipywidgets import interact, fixed
import numpy as np
#plt.style.use('seaborn-whitegrid')
#from mpl_toolkits.mplot3d import *
#from matplotlib import cm
#from scipy.optimize import minimize
## Defautl parameters
ALPHA = 0.5
BETA = 0.7
TBAR = 100
LBAR = 100
## Technology and Preferences preferences
def F(T,L,alpha=ALPHA):
return (T**alpha)*(L**(1-alpha))
def FL(T,L,alpha=ALPHA):
"""Shadow price of labor"""
return (1-alpha)*F(T,L,alpha=ALPHA)/L
def FT(T,L,alpha=ALPHA):
"""Shadow price of labor"""
return alpha*F(T,L,alpha=ALPHA)/T
def U(c, l, beta=BETA):
return (c**beta)*(l**(1-beta))
def indif(l, ubar, beta=BETA):
return ( ubar/(l**(1-beta)) )**(1/beta)
def leisure(Lbar,alpha=ALPHA, beta=BETA):
a = (1-alpha)*beta/(1-beta)
return Lbar/(1+a)
def HH(Tbar,Lbar,alpha=ALPHA, beta=BETA):
"""Household optimum leisure, consumption and utility"""
a = (1-alpha)*beta/(1-beta)
leisure = Lbar/(1+a)
output = F(Tbar,Lbar-leisure, alpha)
utility = U(output, leisure, beta)
return leisure, output, utility
## Farm optima (analytic solutions)
def farm_optimum(Tbar, w, alpha=ALPHA):
"""returns optimal labor demand and profits"""
LD = Tbar * ((1-alpha)/w)**(1/alpha)
profit = F(Tbar, LD) - w*LD
return LD, profit
def HH_optimum(Tbar, Lbar, w, alpha=ALPHA, beta=BETA):
"""returns optimal consumption, leisure and utility.
Simple Cobb-Douglas choices from calculated income """
_, profits = farm_optimum(Tbar, w, alpha)
income = profits + w*Lbar
print(profits, income)
consumption = beta * income
leisure = (1-beta) * income/w
utility = U(consumption, leisure, beta)
return consumption, leisure, utility
## plots
def chayanov(Tbar,Lbar,alpha=ALPHA, beta=BETA):
leis = np.linspace(0.1,Lbar,num=100)
q = F(Tbar,Lbar-leis,alpha)
l_opt, Q, U = HH(Tbar, Lbar, alpha, beta)
print("Leisure, Consumption, Utility =({:5.2f},{:5.2f},{:5.2f})"
.format(l_opt, Q, U))
print("shadow price labor:{:5.2f}".format(FL(Tbar,Lbar-l_opt,beta)))
c = indif(leis,U,beta)
fig, ax = plt.subplots(figsize=(8,8))
ax.plot(leis, q, lw=2.5)
ax.plot(leis, c, lw=2.5)
ax.plot(l_opt,Q,'ob')
ax.vlines(l_opt,0,Q, linestyles="dashed")
ax.hlines(Q,0,l_opt, linestyles="dashed")
ax.set_xlim(0, 110)
ax.set_ylim(0, 150)
ax.set_xlabel(r'$l - leisure$', fontsize=16)
ax.set_ylabel('$c - consumption$', fontsize=16)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.grid()
ax.set_title("Chayanovian Household Optimum")
plt.show()
def plot_production(Tbar,Lbar, w, ax=None):
if ax is None:
ax = plt.gca()
lei = np.linspace(1, Lbar)
q = F(Tbar, Lbar-lei)
ax.plot(lei, q, lw=2.5)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
def plot_farmconsumption(Tbar, Lbar, w, alpha=ALPHA, beta=BETA, ax=None):
if ax is None:
ax = plt.gca()
lei = np.linspace(1, Lbar)
LD, profits = farm_optimum(Tbar, w)
q_opt = F(Tbar,LD)
yline = profits + w*Lbar - w*lei
c_opt, l_opt, u_opt = HH_optimum(Tbar, Lbar, w)
ax.plot(Lbar-LD,q_opt,'ob')
ax.plot(lei, yline)
ax.plot(lei, indif(lei,u_opt, beta),'k')
ax.plot(l_opt, c_opt,'ob')
ax.vlines(l_opt,0,c_opt, linestyles="dashed")
ax.hlines(c_opt,0,l_opt, linestyles="dashed")
ax.vlines(Lbar - LD,0,q_opt, linestyles="dashed")
ax.hlines(profits,0,Lbar, linestyles="dashed")
ax.vlines(Lbar,0,F(Tbar,Lbar))
ax.hlines(q_opt,0,Lbar, linestyles="dashed")
ax.text(Lbar+1,profits,r'$\Pi ^*$',fontsize=16)
ax.text(Lbar+1,q_opt,r'$F(\bar T, L^{*})$',fontsize=16)
ax.text(-6,c_opt,r'$c^*$',fontsize=16)
ax.annotate('',(Lbar-LD,2),(Lbar,2),arrowprops={'arrowstyle':'->'})
ax.text((2*Lbar-LD)/2,3,r'$L^{*}$',fontsize=16)
ax.text(l_opt/2,8,'$l^*$',fontsize=16)
ax.annotate('',(0,7),(l_opt,7),arrowprops={'arrowstyle':'<-'})
def farmHHplot(W, TBAR):
fig, ax = plt.subplots(figsize=(10,8))
plot_production(TBAR,LBAR,W)
plot_farmconsumption(TBAR, LBAR, W)
ax.set_title("The Separable Household Model")
ax.set_xlim(0,LBAR+20)
ax.set_ylim(0,F(TBAR,LBAR)+20)
plt.show()
``` |
{
"source": "jhconning/teaching",
"score": 3
} |
#### File: notebooks/trade/hos.py
```python
import numpy as np
from scipy.optimize import fsolve, minimize
np.seterr(divide='ignore', invalid='ignore')
import matplotlib.pyplot as plt
from ipywidgets import interact, fixed
import seaborn
plt.rcParams["figure.figsize"] = (9,9)
plt.style.use('seaborn-colorblind')
plt.rcParams["axes.spines.right"] = True
plt.rcParams["axes.spines.top"] = False
plt.rcParams["font.size"] = 18
plt.rcParams['figure.figsize'] = (10, 6)
plt.rcParams['axes.grid']=True
Kbar = 100 # economywide capital endowment
Lbar = 100 # economywide labor endowment
alpha = 0.6 # A capital share
beta = 0.4 # M capital share
theta = 0.5 # A consumption share
def F(K,L, alpha=alpha):
return K**alpha * L**(1-alpha)
def G(K,L, beta=beta):
return K**beta * L**(1-beta)
def U(Ca,Cm, theta=theta):
return Ca**theta * Cm**(1-theta)
def obj(X, alpha=alpha, beta=beta):
return - U( F(X[0], X[1], alpha), G(Kbar-X[0], Lbar-X[1], beta) )
def kl(wr, kshare):
return (kshare/(1-kshare))* wr
def isoq(L, kshare, qbar):
return ( qbar/(L**(1-kshare)) )**(1/kshare)
def klplot(KL):
wr = np.linspace(0,10,100)
f, ax = plt.subplots(1)
ax.set_xlabel(r'$\frac{K}{L}$')
ax.set_xlim(0,10)
ax.set_ylim(0,10)
ax.set_ylabel(r'$\frac{w}{r}$ -- wage-rental')
ax.plot(kl(wr, alpha), wr, linewidth=4)
ax.plot(kl(wr, beta), wr, linewidth=4)
ax.axvline(KL)
ax.text(KL+0.5, 9, r'$\frac{\bar K}{\bar L}$ ')
ax.set_aspect('equal');
ll = np.linspace(0.1,Lbar,100)
ll_ = np.linspace(0.05*Lbar,0.80*Lbar,100)
def SS(p):
Za = alpha**alpha * (1-alpha)**((1-alpha))
Zm = beta**beta * (1-beta)**((1-beta))
return (p*(Za/Zm))**(1/(alpha-beta))
def lerner(p):
wr = SS(p=p)
Kas = kl(wr, alpha)
Kms = kl(wr, beta)
QQ=30
Lmqq = QQ/Kms**beta
Kmqq = Kms*Lmqq
Laqq = p*QQ/(Kas**alpha)
Kaqq = Kas*Laqq
I = Kmqq + wr*Lmqq
f, ax = plt.subplots(1)
plt.scatter(Laqq,Kaqq)
plt.scatter(Lmqq,Kmqq)
print(f'w/r = {wr:2.2f}, KLa = {Kas:2.2f}, KLm = {Kms:2.2f}')
plt.xlim(0,100)
plt.ylim(0,100)
plt.plot(ll, Kas*ll, ':')
plt.plot(ll, Kms*ll, ':')
plt.plot(ll_, isoq(ll_,beta, QQ),'b')
plt.plot(ll_, isoq(ll_,alpha, p*QQ))
plt.text(ll_[-1],isoq(ll_[-1],beta,QQ),f"Qm={QQ}", fontsize=14)
plt.text(ll_[-1],isoq(ll_[-1],alpha,p*QQ),f"Qa=p*{QQ}={p*QQ:0.1f}", fontsize=14)
plt.plot(ll, I - wr*ll,'b:')
plt.xlabel('L - labor')
plt.ylabel('K - capital')
plt.suptitle('Lerner Diagram', y=0.98)
plt.title(f'w/r = {wr:2.2f}, Ka/La = {Kas:2.2f}, Km/Lm = {Kms:2.2f}', fontsize = 12)
plt.gca().set_aspect('equal');
def ssplot(p):
wr = SS(p=p)
print(p,wr)
pp = np.linspace(0.2,2,100)
plt.plot(pp, SS(pp),'b')
plt.ylabel(r'$\frac{w}{r}$')
plt.xlabel(r'$p = \frac{p_m}{p_a}$')
plt.axhline(y = wr, xmin = 0, xmax = p)
plt.axvline(x = p, ymin=0, ymax =wr, linestyle=':')
plt.ylim(0,6);
def num_opt(alpha=alpha, beta=beta):
'''Numerically solve closed economy util max subject to PPF '''
x0 = [50,50] # -- guess
sol = minimize(obj, x0,args=(alpha,beta))
Kae, Lae = sol.x
Qae, Qme = F(Kae,Lae, alpha), G(Kbar-Kae, Lbar-Lae, beta)
return Qae, Qme
def indif(Cm, theta , ubar):
return (ubar/(Cm**(1-theta)))**(1/theta)
def closed_plot(alpha=alpha, beta=beta):
La = np.arange(0,Lbar)
Ka = edgeworth(La, Kbar, Lbar,alpha, beta)
Qa = F(Ka,La,alpha)
Qm = G(Kbar-Ka,Lbar-La,beta)
Qae, Qme = num_opt(alpha, beta)
print(f'(Qa, Qm) = ({Qae:0.1f}, {Qme:0.1f})')
fig, ax = plt.subplots()
ax.plot(Qm, Qa)
ax.plot(ll, indif(ll, theta, U(Qae,Qme)) )
ax.set_ylim(0,110)
ax.set_xlim(0,110)
ax.scatter(Qme, Qae)
ax.set_aspect('equal')
def rybplot(p, Kbar=Kbar, Lbar=Lbar, alpha=alpha, beta=beta):
""" Trade-style Edgeworth KL line intersections
"""
ll = np.linspace(0.1,Lbar,100)
wr = SS(p)
ka = kl(wr, alpha)
km = kl(wr, beta)
LA = (Kbar-km*Lbar)/(ka-km)
KA = ka*LA
LM, KM = Lbar - LA, Kbar - KA
fig, ax = plt.subplots()
ax.set_xlim(0,Lbar)
ax.set_ylim(0,Kbar)
ax.set_xlabel('L - Labor')
ax.set_ylabel('K - Capital')
ax.spines['top'].set_visible(True)
ax.spines['right'].set_visible(True)
ax.set_aspect('equal')
ax.plot(ll,ka*ll, '--')
ax.plot(ll,km*ll, '--')
ax.scatter(LA,KA)
ax.scatter(LM,KM)
ax.scatter(0.5,0.5)
Im = (KA-km*LA)+km*ll
Ia = (KM-ka*LM)
llm_up = np.linspace(LA,Lbar,100)
lla_lo = np.linspace(LM,Lbar,100)
ax.plot(llm_up,(KA-km*LA)+km*llm_up,'--')
ax.plot(lla_lo,(KM-ka*LM)+ka*lla_lo,'--')
ax.text(1.05*Lbar, 0.95*Kbar,r' $p$'+f' = {p:3.1f}', fontsize=14)
ax.text(1.05*Lbar, 0.95*Kbar-6,r'$(L_A,K_A)$'+f' = ({LA:2.0f}, {KA:2.0f})', fontsize=14)
ax.text(1.05*Lbar, 0.95*Kbar-12,r'$(L_M,K_M)$'+f' = ({LM:2.0f}, {KM:2.0f})', fontsize=14)
ax.text(1.05*Lbar, 0.95*Kbar-18,r'$(Q_A, Q_M)$'+f' = ({F(KA,LA):2.0f},{G(KM,LM):2.0f} )', fontsize=14)
def hos_eq(p, Kbar=Kbar, Lbar=Lbar):
wr = SS(p)
ka = kl(wr, alpha)
km = kl(wr, beta)
LA = (Kbar-km*Lbar)/(ka-km)
KA = ka*LA
return LA, KA
def edgeworth(L, Kbar=Kbar, Lbar=Lbar, alpha=alpha, beta=beta):
"""efficiency locus: """
A = (beta*(1-alpha)) / (alpha*(1-beta) )
#return b*L*Kbar/(a*(Lbar-L)+b*L)
return (L*Kbar)/(A*(Lbar-L)+L)
def edgeplot(LA, Kbar=Kbar, Lbar=Lbar, alpha=alpha, beta=beta):
"""Draw an edgeworth box
arguments:
LA -- labor allocated to ag, from which calculate QA(Ka(La),La)
"""
KA = edgeworth(LA, Kbar, Lbar, alpha, beta)
RTS = (alpha/(1-alpha))*(KA/LA)
QA = F(KA, LA, alpha)
QM = G(Kbar-KA, Lbar-LA, beta)
print("(LA,KA)=({:4.1f}, {:4.1f}) (QA, QM)=({:4.1f}, {:4.1f}) RTS={:4.1f}"
.format(LA,KA,QA,QM,RTS))
La = np.arange(1,Lbar)
fig, ax = plt.subplots(figsize=(7,6))
ax.set_xlim(0, Lbar)
ax.set_ylim(0, Kbar)
ax.plot(La, edgeworth(La, Kbar, Lbar, alpha, beta),'k-')
#ax.plot(La, La,'k--')
ax.plot(La, isoq(La, alpha, QA))
ax.plot(La, Kbar - isoq(Lbar-La, beta, QM),'g-')
ax.plot(LA, KA, 'ob')
ax.vlines(LA, 0, KA, linestyles="dashed")
ax.hlines(KA, 0, LA, linestyles="dashed")
ax.text(-6,-6,r'$O_A$',fontsize=16)
ax.text(Lbar,Kbar,r'$O_M$',fontsize=16)
ax.set_xlabel(r'$L_A - Labor$', fontsize=16)
ax.set_ylabel(r'$K_A - Capital$', fontsize=16)
#plt.show()
def HOS(p, Kbar=Kbar, Lbar=Lbar, alpha=alpha, beta=beta):
LA, KA = hos_eq(p, Kbar=Kbar, Lbar=Lbar)
edgeplot(LA, Kbar=Kbar, Lbar=Lbar, alpha=alpha, beta=beta)
def ppf(LA, Kbar=Kbar, Lbar=Lbar,alpha=alpha,beta=beta):
"""Draw a production possibility frontier
arguments:
LA -- labor allocated to ag, from which calculate QA(Ka(La),La)
"""
fig, ax = plt.subplots(figsize=(6,5))
KA = edgeworth(LA, Kbar, Lbar,alpha, beta)
RTS = (alpha/(1-alpha))*(KA/LA)
QA = F( KA,LA,alpha)
QM = G(Kbar-KA,Lbar-LA,beta)
ax.scatter(QA,QM)
La = np.arange(0,Lbar)
Ka = edgeworth(La, Kbar, Lbar,alpha, beta)
Qa = F(Ka,La,alpha)
Qm = G(Kbar-Ka,Lbar-La,beta)
ax.set_xlim(0, Lbar)
ax.set_ylim(0, Kbar)
ax.plot(Qa, Qm,'k-')
ax.set_xlabel(r'$Q_A$',fontsize=18)
ax.set_ylabel(r'$Q_B$',fontsize=18)
def wreq(p,a=alpha, b=beta):
B = ((1-a)/(1-b))*(a/(1-a))**a * ((1-b)/b)**b
return B*p**(1/(b-a))
def ssline(a=alpha, b=beta):
p = np.linspace(0.5,1.5,500)
plt.title('The Stolper-Samuelson line')
plt.xlabel(r'$p = \frac{P_a}{P_m}$', fontsize=18)
plt.ylabel(r'$ \frac{w}{r}$', fontsize=18)
plt.plot(p, wreq(p, a, b));
``` |
{
"source": "jhcross/sacrebleu",
"score": 3
} |
#### File: sacrebleu/tokenizers/tokenizer_base.py
```python
class BaseTokenizer:
"""A base dummy tokenizer to derive from."""
def signature(self):
"""
Returns a signature for the tokenizer.
:return: signature string
"""
return 'none'
def __call__(self, line):
"""
Tokenizes an input line with the tokenizer.
:param line: a segment to tokenize
:return: the tokenized line
"""
return line
```
#### File: sacrebleu/sacrebleu/utils.py
```python
import os
import re
import sys
import gzip
import math
import hashlib
import logging
import portalocker
from collections import defaultdict
from typing import List, Optional, Sequence, Dict
from argparse import Namespace
from tabulate import tabulate
import colorama
from .dataset import DATASETS, SUBSETS, DOMAINS, COUNTRIES
# Where to store downloaded test sets.
# Define the environment variable $SACREBLEU, or use the default of ~/.sacrebleu.
#
# Querying for a HOME environment variable can result in None (e.g., on Windows)
# in which case the os.path.join() throws a TypeError. Using expanduser() is
# a safe way to get the user's home folder.
USERHOME = os.path.expanduser("~")
SACREBLEU_DIR = os.environ.get('SACREBLEU', os.path.join(USERHOME, '.sacrebleu'))
sacrelogger = logging.getLogger('sacrebleu')
class Color:
ENABLE_COLORS = True
@staticmethod
def format(msg: str, color: str) -> str:
"""Returns a colored version of the given message string.
:param msg: The string to Color.format.
:param color: The color specifier i.e. 'red', 'blue', 'green', etc.
:return: A colored version of the string if the output is a terminal.
"""
if not Color.ENABLE_COLORS:
return msg
_ansi_str = getattr(colorama.Fore, color.upper(), None)
if _ansi_str:
return f'{_ansi_str}{msg}{colorama.Style.RESET_ALL}'
return msg
def _format_score_lines(scores: dict,
width: int = 2,
multiline: bool = True) -> Dict[str, List[str]]:
"""Formats the scores prior to tabulating them."""
new_scores = {'System': scores.pop('System')}
p_val_break_char = '\n' if multiline else ' '
is_bootstrap = False
def _color_p_value(p: float):
msg = f'(p = {p:.4f})'
if p > 0.05:
return Color.format(msg, 'red')
return msg + '*'
for metric, vals in scores.items():
new_vals = []
for result in vals:
if not isinstance(result, str):
# Format result instances
_str = f'{result.score:.{width}f}'
if result.mean is not None:
is_bootstrap = True
_str += f' ({result.mean:.{width}f} ± {result.ci:.{width}f})'
if result.p_value is not None:
_str += p_val_break_char + _color_p_value(result.p_value)
else:
# Already formatted in non paired-test mode
_str = result
new_vals.append(_str)
if is_bootstrap:
# Change titles
metric += ' (μ ± 95% CI)'
new_scores[metric] = new_vals
return new_scores
def print_results_table(results: dict, signatures: dict, args: Namespace):
"""Prints out a nicely formatted table for multi-system evaluation mode."""
tablefmt = args.format
if tablefmt in ('text', 'json'):
# Fallback to simple table if json is given
tablefmt = 'fancy_grid'
elif tablefmt == 'latex':
# Use booktabs
tablefmt = 'latex_booktabs'
# If paired testing has been given, this'll format the score lines
results = _format_score_lines(
results, args.width, multiline=tablefmt == 'fancy_grid')
new_dict = {}
# Color the column names and the baseline system name and scores
has_baseline = False
baseline_name = ''
for name in results.keys():
val = results[name]
if val[0].startswith('Baseline:') or has_baseline:
if val[0].startswith('Baseline:'):
baseline_name = val[0]
has_baseline = True
val[0] = Color.format(val[0], 'yellow')
new_dict[Color.format(name, 'cyan')] = results[name]
# Finally tabulate
table = tabulate(
new_dict, headers='keys', tablefmt=tablefmt,
colalign=('right', ),
stralign='center',
numalign='center',
floatfmt=f'.{args.width}f')
print(table)
print()
is_paired = args.paired_bs or args.paired_ar
if is_paired:
test_type = 'bootstrap resampling' if args.paired_bs else 'approximate randomization'
n_samples_or_trials = args.paired_bs_n if args.paired_bs else args.paired_ar_n
test_sample_type = 'resampling trials' if args.paired_bs else 'trials'
msg = f'Paired {test_type} test with {n_samples_or_trials} {test_sample_type}'
bline = Color.format('baseline', 'yellow')
bline_name = Color.format(baseline_name, 'yellow')
null_hyp = Color.format('Null hypothesis', 'green')
pval_color = Color.format('highlighted in red', 'red')
# Print fancy header
print('-' * len(msg) + '\n' + msg + '\n' + '-' * len(msg))
print(f' - Each system is pairwise compared to {bline_name}.')
if args.paired_bs:
print(' Actual system score / bootstrap estimated true mean / 95% CI are provided for each metric.')
else:
print(' Actual system score is provided for each metric.')
print()
print(f' - {null_hyp}: the system and the {bline} translations are essentially')
print(f' generated by the same underlying process. For a given system and the {bline},')
print(' the p-value is roughly the probability of the absolute score difference (delta)')
print(f' or higher occurring due to chance, under the assumption that the {null_hyp.lower()} is correct.')
print()
print(f' - Assuming a significance threshold of 0.05, the {null_hyp.lower()} can be rejected')
print(' for p-values < 0.05 (marked with "*"). This means that the delta is unlikely to be attributed')
print(f' to chance, hence the system is significantly "different" than the {bline}.')
print(f' Otherwise, the p-values are {pval_color}.')
print()
print(f' - NOTE: Significance does not tell whether a system is "better" than the {bline} but rather')
print(' emphasizes the "difference" of the systems in terms of the replicability of the delta.')
print()
print('-----------------')
print('Metric signatures')
print('-----------------')
for name, sig in signatures.items():
print(f' - {name:<10} {sig}')
def print_single_results(results: List[str], args: Namespace):
"""Re-process metric strings to align them nicely."""
if args.format == 'json':
if len(results) > 1:
proper_json = '[\n' + ',\n'.join(results) + '\n]'
print(proper_json)
else:
print(results[0])
return
# Color confidence strings for emphasis
if 'μ' in results[0]:
color_re = re.compile(r'(\(μ = [0-9\.]+ ± [0-9\.]+\))')
for idx in range(len(results)):
results[idx] = color_re.sub(
lambda m: Color.format(m.group(), 'cyan'), results[idx])
if len(results) == 1:
# Just one system, nothing to align.
print(results[0])
return
# Align by '=' character
lens = []
for line in results:
# If not score_only, split lines from '=' for re-alignment
try:
lens.append(line.index('=') - 1)
except ValueError:
print(line)
if len(lens) > 0:
w = max(lens)
for (_len, line) in zip(lens, results):
left, right = line[:_len], line[_len:]
print(f'{left:>{w}}{right}')
def sanity_check_lengths(system: Sequence[str],
refs: Sequence[Sequence[str]],
test_set: Optional[str] = None):
n_hyps = len(system)
if any(len(ref_stream) != n_hyps for ref_stream in refs):
sacrelogger.error("System and reference streams have different lengths.")
if test_set:
sacrelogger.error("This could be an issue with your system output "
"or with sacreBLEU's reference database if -t is given.")
sacrelogger.error("For the latter, try cleaning out the cache by typing:\n")
sacrelogger.error(f" rm -r {SACREBLEU_DIR}/{test_set}\n")
sacrelogger.error("The test sets will be re-downloaded the next time you run sacreBLEU.")
sys.exit(1)
def smart_open(file, mode='rt', encoding='utf-8'):
"""Convenience function for reading compressed or plain text files.
:param file: The file to read.
:param mode: The file mode (read, write).
:param encoding: The file encoding.
"""
if file.endswith('.gz'):
return gzip.open(file, mode=mode, encoding=encoding, newline="\n")
return open(file, mode=mode, encoding=encoding, newline="\n")
def my_log(num: float) -> float:
"""
Floors the log function
:param num: the number
:return: log(num) floored to a very low number
"""
if num == 0.0:
return -9999999999
return math.log(num)
def sum_of_lists(lists):
"""Aggregates list of numeric lists by summing."""
if len(lists) == 1:
return lists[0]
# Preserve datatype
size = len(lists[0])
init_val = type(lists[0][0])(0.0)
total = [init_val] * size
for ll in lists:
for i in range(size):
total[i] += ll[i]
return total
def args_to_dict(args, prefix: str, strip_prefix: bool = False):
"""Filters argparse's `Namespace` into dictionary with arguments
beginning with the given prefix."""
prefix += '_'
d = {}
for k, v in args.__dict__.items():
if k.startswith(prefix):
k = k.replace(prefix, '') if strip_prefix else k
d[k] = v
return d
def process_to_text(rawfile, txtfile, field: int = None):
"""Processes raw files to plain text files. Can handle SGML, XML, TSV files, and plain text.
Called after downloading datasets.
:param rawfile: the input file (possibly SGML)
:param txtfile: the plaintext file
:param field: For TSV files, which field to extract.
"""
def _clean(s):
"""
Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.
:param s: The string.
:return: A cleaned-up string.
"""
return re.sub(r'\s+', ' ', s.strip())
if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:
sacrelogger.info(f"Processing {rawfile} to {txtfile}")
if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
if line.startswith('<seg '):
print(_clean(re.sub(r'<seg.*?>(.*)</seg>.*?', '\\1', line)), file=fout)
# IWSLT
elif rawfile.endswith('.xml'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
if line.startswith('<seg '):
print(_clean(re.sub(r'<seg.*?>(.*)</seg>.*?', '\\1', line)), file=fout)
# MTNT
elif rawfile.endswith('.tsv'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
print(line.rstrip().split('\t')[field], file=fout)
# PLAIN TEXT
else:
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
print(line.rstrip(), file=fout)
def print_test_set(test_set, langpair, side, origlang=None, subset=None):
"""Prints to STDOUT the specified side of the specified test set.
:param test_set: the test set to print
:param langpair: the language pair
:param side: 'src' for source, 'ref' for reference
:param origlang: print only sentences with a given original language (2-char ISO639-1 code), "non-" prefix means negation
:param subset: print only sentences whose document annotation matches a given regex
"""
if side == 'src':
files = [get_source_file(test_set, langpair)]
elif side == 'ref':
files = get_reference_files(test_set, langpair)
elif side == "both":
files = [get_source_file(test_set, langpair)] + get_reference_files(test_set, langpair)
streams = [smart_open(file) for file in files]
streams = filter_subset(streams, test_set, langpair, origlang, subset)
for lines in zip(*streams):
print('\t'.join(map(lambda x: x.rstrip(), lines)))
def get_source_file(test_set: str, langpair: str) -> str:
"""
Returns the source file for a given testset/langpair.
Downloads it first if it is not already local.
:param test_set: The test set (e.g., "wmt19")
:param langpair: The language pair (e.g., "de-en")
:return: the path to the requested source file
"""
return get_files(test_set, langpair)[0]
def get_reference_files(test_set: str, langpair: str) -> List[str]:
"""
Returns a list of one or more reference file paths for the given testset/langpair.
Downloads the references first if they are not already local.
:param test_set: The test set (e.g., "wmt19")
:param langpair: The language pair (e.g., "de-en")
:return: a list of one or more reference file paths
"""
return get_files(test_set, langpair)[1:]
def get_files(test_set, langpair) -> List[str]:
"""
Returns the path of the source file and all reference files for
the provided test set / language pair.
Downloads the references first if they are not already local.
:param test_set: The test set (e.g., "wmt19")
:param langpair: The language pair (e.g., "de-en")
:return: a list of the source file and all reference files
"""
if test_set not in DATASETS:
raise Exception(f"No such test set {test_set}")
if langpair not in DATASETS[test_set]:
raise Exception(f"No such language pair {test_set}/{langpair}")
cachedir = os.path.join(SACREBLEU_DIR, test_set)
source, target = langpair.split("-")
source_path = os.path.join(cachedir, f"{langpair}.{source}")
num_refs = len(DATASETS[test_set][langpair]) - 1
if num_refs == 1:
reference_paths = [os.path.join(cachedir, f"{langpair}.{target}")]
else:
reference_paths = [os.path.join(cachedir, f"{langpair}.{target}.{num}") for num in range(num_refs)]
all_files = [source_path] + reference_paths
for fname in all_files:
if not os.path.exists(fname):
download_test_set(test_set, langpair)
break
return all_files
def extract_tarball(filepath, destdir):
sacrelogger.info(f'Extracting {filepath} to {destdir}')
if filepath.endswith('.tar.gz') or filepath.endswith('.tgz'):
import tarfile
with tarfile.open(filepath) as tar:
tar.extractall(path=destdir)
elif filepath.endswith('.zip'):
import zipfile
with zipfile.ZipFile(filepath, 'r') as zipfile:
zipfile.extractall(path=destdir)
def check_md5sum(dest_path, expected_md5):
# Check md5sum
if expected_md5 is not None:
md5 = hashlib.md5()
with open(dest_path, 'rb') as infile:
for line in infile:
md5.update(line)
cur_md5 = md5.hexdigest()
if cur_md5 != expected_md5:
sacrelogger.error(f'Fatal: MD5 sum of downloaded file was incorrect (got {cur_md5}, expected {expected_md5}).')
sacrelogger.error(f'Please manually delete {tarball!r} and rerun the command.')
sacrelogger.error('If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.')
sys.exit(1)
else:
sacrelogger.info(f'Checksum passed: {cur_md5}')
def download_file(source_path, dest_path, extract_to=None, expected_md5=None):
"""Downloading utility.
Downloads the specified test to the system location specified by the SACREBLEU environment variable.
:param source_path: the remote uri to download
:param dest_path: where to save the file
:param extract_to: for tarballs, where to extract to
:param expected_md5: the MD5 sum
:return: the set of processed file names
"""
import urllib.request
import ssl
outdir = os.path.dirname(dest_path)
os.makedirs(outdir, exist_ok=True)
lockfile = os.path.join(outdir, f'{os.path.basename(dest_path)}.lock')
with portalocker.Lock(lockfile, 'w', timeout=60):
if os.path.exists(dest_path):
check_md5sum(dest_path, expected_md5)
else:
sacrelogger.info(f"Downloading {source_path} to {dest_path}")
try:
with urllib.request.urlopen(source_path) as f, open(dest_path, 'wb') as out:
out.write(f.read())
except ssl.SSLError:
sacrelogger.warning('An SSL error was encountered in downloading the files. If you\'re on a Mac, '
'you may need to run the "Install Certificates.command" file located in the '
'"Python 3" folder, often found under /Applications')
sys.exit(1)
check_md5sum(dest_path, expected_md5)
# Extract the tarball
if extract_to is not None:
extract_tarball(dest_path, extract_to)
def download_test_set(test_set, langpair=None):
"""Downloads the specified test to the system location specified by the SACREBLEU environment variable.
:param test_set: the test set to download
:param langpair: the language pair (needed for some datasets)
:return: the set of processed file names
"""
if test_set not in DATASETS:
raise Exception(f"No such test set {test_set}")
outdir = os.path.join(SACREBLEU_DIR, test_set)
os.makedirs(outdir, exist_ok=True)
expected_checksums = DATASETS[test_set].get('md5', [None] * len(DATASETS[test_set]))
for dataset, expected_md5 in zip(DATASETS[test_set]['data'], expected_checksums):
tarball = os.path.join(outdir, os.path.basename(dataset))
rawdir = os.path.join(outdir, 'raw')
download_file(dataset, tarball, extract_to=rawdir, expected_md5=expected_md5)
file_paths = []
# Process the files into plain text
languages = get_langpairs_for_testset(test_set) if langpair is None else [langpair]
for pair in languages:
src, tgt = pair.split('-')
rawfile = DATASETS[test_set][pair][0]
field = None # used for TSV files
if rawfile.endswith('.tsv'):
field, rawfile = rawfile.split(':', maxsplit=1)
field = int(field)
rawpath = os.path.join(rawdir, rawfile)
outpath = os.path.join(outdir, f'{pair}.{src}')
process_to_text(rawpath, outpath, field=field)
file_paths.append(outpath)
refs = DATASETS[test_set][pair][1:]
for i, ref in enumerate(refs):
field = None
if ref.endswith('.tsv'):
field, ref = ref.split(':', maxsplit=1)
field = int(field)
rawpath = os.path.join(rawdir, ref)
if len(refs) >= 2:
outpath = os.path.join(outdir, f'{pair}.{tgt}.{i}')
else:
outpath = os.path.join(outdir, f'{pair}.{tgt}')
process_to_text(rawpath, outpath, field=field)
file_paths.append(outpath)
return file_paths
def get_langpairs_for_testset(testset: str) -> List[str]:
"""Return a list of language pairs for a given test set."""
return list(filter(lambda x: re.match(r'\w\w\-\w\w', x), DATASETS.get(testset, {}).keys()))
def get_available_testsets() -> List[str]:
"""Return a list of available test sets."""
return sorted(DATASETS.keys(), reverse=True)
def get_available_origlangs(test_sets, langpair) -> List[str]:
"""Return a list of origlang values in according to the raw SGM files."""
if test_sets is None:
return []
origlangs = set()
for test_set in test_sets.split(','):
rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[test_set][langpair][0])
if rawfile.endswith('.sgm'):
with smart_open(rawfile) as fin:
for line in fin:
if line.startswith('<doc '):
doc_origlang = re.sub(r'.* origlang="([^"]+)".*\n', '\\1', line)
origlangs.add(doc_origlang)
return sorted(list(origlangs))
def filter_subset(systems, test_sets, langpair, origlang, subset=None):
"""Filter sentences with a given origlang (or subset) according to the raw SGM files."""
if origlang is None and subset is None:
return systems
if test_sets is None or langpair is None:
raise ValueError('Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).')
re_origlang = re.compile(r'.* origlang="([^"]+)".*\n')
re_id = re.compile(r'.* docid="([^"]+)".*\n')
indices_to_keep = []
for test_set in test_sets.split(','):
rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[test_set][langpair][0])
if not rawfile.endswith('.sgm'):
raise Exception(f'--origlang and --subset supports only *.sgm files, not {rawfile!r}')
if subset is not None:
if test_set not in SUBSETS:
raise Exception('No subset annotation available for test set ' + test_set)
doc_to_tags = SUBSETS[test_set]
number_sentences_included = 0
with smart_open(rawfile) as fin:
include_doc = False
for line in fin:
if line.startswith('<doc '):
if origlang is None:
include_doc = True
else:
doc_origlang = re_origlang.sub(r'\1', line)
if origlang.startswith('non-'):
include_doc = doc_origlang != origlang[4:]
else:
include_doc = doc_origlang == origlang
if subset is not None:
doc_id = re_id.sub(r'\1', line)
if not re.search(subset, doc_to_tags.get(doc_id, '')):
include_doc = False
if line.startswith('<seg '):
indices_to_keep.append(include_doc)
number_sentences_included += 1 if include_doc else 0
return [[sentence for sentence, keep in zip(sys, indices_to_keep) if keep] for sys in systems]
def print_subset_results(metrics, full_system, full_refs, args):
w = args.width
origlangs = args.origlang if args.origlang else \
get_available_origlangs(args.test_set, args.langpair)
if len(origlangs) == 0:
print('No subset information found. Consider using --origlang argument.')
return
results = defaultdict(list)
for origlang in origlangs:
subsets = [None]
if args.subset is not None:
subsets += [args.subset]
elif all(t in SUBSETS for t in args.test_set.split(',')):
subsets += COUNTRIES + DOMAINS
for subset in subsets:
system, *refs = filter_subset(
[full_system, *full_refs], args.test_set, args.langpair, origlang, subset)
if len(system) == 0:
continue
key = f'origlang={origlang}'
if subset in COUNTRIES:
key += f' country={subset}'
elif subset in DOMAINS:
key += f' domain={subset}'
for metric in metrics.values():
score = metric.corpus_score(system, refs)
results[key].append((len(system), score))
max_left_width = max([len(k) for k in results.keys()]) + 1
max_metric_width = max([len(val[1].name) for val in list(results.values())[0]])
for key, scores in results.items():
key = Color.format(f'{key:<{max_left_width}}', 'yellow')
for n_system, score in scores:
print(f'{key}: sentences={n_system:<6} {score.name:<{max_metric_width}} = {score.score:.{w}f}')
```
#### File: sacrebleu/test/test_api.py
```python
import pytest
from sacrebleu.utils import get_available_testsets, get_langpairs_for_testset
from sacrebleu.utils import get_source_file, get_reference_files
from sacrebleu.dataset import DATASETS
test_api_get_data = [
("wmt19", "de-en", 1, "Schöne Münchnerin 2018: Schöne Münchnerin 2018 in Hvar: Neun Dates", "The Beauty of Munich 2018: the Beauty of Munich 2018 in Hvar: Nine dates"),
("mtnt1.1/train", "ja-en", 10, "0歳から100歳の女性が登場する海外のスキンケアCM", "The overseas skin care commercial in which 0 to 100 year old females appear."),
("wmt19/google/ar", "en-de", 1, "Welsh AMs worried about 'looking like muppets'", "Walisische Abgeordnete befürchten als ,Idioten’ dazustehen."),
]
@pytest.mark.parametrize("testset, langpair, sentno, source, reference", test_api_get_data)
def test_api_get_source(testset, langpair, sentno, source, reference):
with open(get_source_file(testset, langpair)) as fh:
line = fh.readlines()[sentno - 1].strip()
assert line == source
@pytest.mark.parametrize("testset, langpair, sentno, source, reference", test_api_get_data)
def test_api_get_reference(testset, langpair, sentno, source, reference):
with open(get_reference_files(testset, langpair)[0]) as fh:
line = fh.readlines()[sentno - 1].strip()
assert line == reference
def test_api_get_available_testsets():
"""
Loop over the datasets directly, and ensure the API function returns
the test sets found.
"""
available = get_available_testsets()
assert type(available) is list
assert "wmt19" in available
assert "wmt05" not in available
for testset in DATASETS.keys():
assert testset in available
assert "slashdot_" + testset not in available
def test_api_get_langpairs_for_testset():
"""
Loop over the datasets directly, and ensure the API function
returns each language pair in each test set.
"""
for testset in DATASETS.keys():
available = get_langpairs_for_testset(testset)
assert type(available) is list
for langpair in DATASETS[testset].keys():
# skip non-language keys
if "-" not in langpair:
assert langpair not in available
else:
assert langpair in available
assert "slashdot_" + langpair not in available
``` |
{
"source": "jhculb/base_python_project",
"score": 2
} |
#### File: utils/logger/handlers.py
```python
import logging
import sys
from logging.handlers import TimedRotatingFileHandler, SocketHandler
from pathlib import Path
from application.main.config import settings
from import ConfigReaderInstance
logging_config = ConfigReaderInstance.yaml.read_config_from_file(
settings.LOG_CONFIG_FILENAME)
class Handlers:
def __init__(self):
self.formatter = logging.Formatter(logging_config.FORMATTER)
self.log_filename = Path().joinpath(
settings.APP_CONFIG.LOGS_DIR, logging_config.FILENAME)
self.rotation = logging_config.ROTATION
def get_console_handler(self):
"""
:return:
"""
console_handler = logging.StreamHandler(sys.stdout.flush())
console_handler.setFormatter(self.formatter)
return console_handler
def get_file_handler(self):
"""
:return:
"""
file_handler = TimedRotatingFileHandler(
self.log_filename, when=self.rotation)
file_handler.setFormatter(self.formatter)
return file_handler
def get_socket_handler(self):
socket_handler = SocketHandler('127.0.0.1', 19996) # default listening address
return socket_handler
def get_handlers(self):
return [self.get_console_handler(), self.get_file_handler(), self.get_socket_handler()]
```
#### File: utils/routers/hello_world.py
```python
from fastapi.responses import JSONResponse
from fastapi.routing import APIRouter
from app.utils.logger.instantiate import logger_instance
from app.utils.logger.instantiate import db_instance
_db = db_instance
router = APIRouter(prefix='/health-check')
logger = logger_instance.get_logger(__name__)
@router.get('/')
async def health_check():
logger.info('Health Check⛑')
await _db.insert_single_db_record({"Status": "OK"})
return JSONResponse(content='OK⛑', status_code=200)
``` |
{
"source": "jhd2best/harmony-ops",
"score": 2
} |
#### File: devops/create_https_wss_endpoints_aws/helpers.py
```python
import argparse
import os
import subprocess
import sys
import boto3
import json
ap = argparse.ArgumentParser(description='parse the network type')
# param to define network, required
ap.add_argument("-n", action="store", dest='network_value', required=True, help="define network type")
# param to check if update endpoints needed, optional
ap.add_argument('-u', '--update', action="store_true", help="update targets for the endpoints only", default=False)
args = ap.parse_args()
current_work_path = os.path.dirname(os.path.realpath(__file__))
if args.network_value:
network_config = current_work_path + '/' + args.network_value + '.json'
def parse_network_config(param):
""" load the network configuration file, retrieve the value by its key """
with open(network_config, 'r') as f:
network_config_dict = json.load(f)
return network_config_dict[param]
def shcmd2(cmd, ignore_error=False):
""" customized version of shcmd created by aw """
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
output = proc.stdout.read()
output_string = output.decode("utf-8")
return output_string
def retrieve_instance_region(ip):
""" deduce instance region from its ipv4 """
cmd = "host {ip}".format(ip=ip)
resp = shcmd2(cmd)
info = resp.split('.')
if info[-4] == 'compute':
region = info[-5]
elif info[-4] == 'compute-1':
region = 'us-east-1'
else:
raise ValueError("cannot deduce region from name {}".format(info))
return region
def verify_nodes_same_region(region, array_ip):
for ip in array_ip:
if retrieve_instance_region(ip) != region:
sys.exit(
"All nodes registered for the same endpoints should be located in the region, if not, gracefully exit!! ")
def create_name_target_group(shard, id_domain):
ret = []
tg_prefix = 'tg-s' + str(shard) + '-api-' + id_domain + '-'
ret.append(tg_prefix + 'https')
ret.append(tg_prefix + 'wss')
return ret
def retrieve_instance_id(array_instance_ip):
""" mapping from instance-ip -> instance-id """
array_instance_id = []
for ip in array_instance_ip:
region = retrieve_instance_region(ip)
ec2_client = boto3.client('ec2', region_name=region)
response = ec2_client.describe_instances(Filters=[{'Name': 'ip-address', 'Values': [ip]}])
instance_id = response["Reservations"][0]["Instances"][0]["InstanceId"]
array_instance_id.append(instance_id)
return array_instance_id
def create_name_record_set(shard, id_domain):
ret = [
'api.s' + str(shard) + '.' + id_domain,
'ws.s' + str(shard) + '.' + id_domain
]
return ret
```
#### File: devops/get_blockchain/get_blockchain.py
```python
import requests
import json
import argparse
import sys
num_blockchain = []
num_bad_get_blocks = []
hash_blockchain = []
hash_bad_get_blocks = []
def parse_args():
parser = argparse.ArgumentParser(description='Simple script to get all blocks of a blockchain')
parser.add_argument("endpoint", help="endpoint of blockchain to fetch")
parser.add_argument("--max-height", dest="max_height", default=None, type=int, help="set the max block height, "
"default is None.")
parser.add_argument("--min-height", dest="min_height", default=None, type=int, help="set the min block height, "
"default is None.")
parser.add_argument("--by-hash", dest="by_hash", action="store_true", help="get blockchain by hashes "
"instead of number (not implemented)")
parser.add_argument("--no-txs", dest="get_txs", action="store_false", help="do NOT get full tx data")
parser.add_argument("--stats", dest="stats", action="store_true", help="get stats after processing blockchain")
parser.add_argument("--print", dest="print", action="store_true", help="print blockchain data once done")
return parser.parse_args()
def get_block_number(block_num, endpoint, get_tx_info=False):
url = endpoint
payload = json.dumps({
"jsonrpc": "2.0",
"method": "hmy_getBlockByNumber",
"params": [
str(hex(block_num)),
get_tx_info
],
"id": 1
})
headers = {
'Content-Type': 'application/json'
}
response = requests.request('POST', url, headers=headers, data=payload, allow_redirects=False, timeout=30)
try:
returned = json.loads(response.content)["result"]
return returned
except Exception: # Catch all to not halt
num_bad_get_blocks.append({
'block-num': block_num,
'reason': f"Failed to json load block {block_num}. Content: {response.content}"
})
print(f"\n[!!!] Failed to json load block {block_num}. Content: {response.content}\n")
def get_block_hash(block_hash, endpoint, get_tx_info=False):
url = endpoint
payload = json.dumps({
"jsonrpc": "2.0",
"method": "hmy_getBlockByNumber",
"params": [
block_hash if block_hash.startswith('0x') else '0x' + block_hash,
get_tx_info
],
"id": 1
})
headers = {
'Content-Type': 'application/json'
}
response = requests.request('POST', url, headers=headers, data=payload, allow_redirects=False, timeout=30)
try:
returned = json.loads(response.content)["result"]
return returned
except Exception: # Catch all to not halt
hash_bad_get_blocks.append({
'block-hash': block_hash,
'reason': f"Failed to json load block {block_hash}. Content: {response.content}"
})
print(f"\n[!!!] Failed to json load block {block_hash}. Content: {response.content}\n")
def get_latest_header(endpoint):
url = endpoint
payload = json.dumps({
"jsonrpc": "2.0",
"method": "hmy_latestHeader",
"params": [
],
"id": 1
})
headers = {
'Content-Type': 'application/json'
}
response = requests.request('POST', url, headers=headers, data=payload, allow_redirects=False, timeout=30)
return json.loads(response.content)["result"]
def get_curr_block_height(endpoint):
header = get_latest_header(endpoint)
return int(header["blockNumber"])
def stats(data):
print("\n=== Stats for fetched blocks ===\n")
type_count = {}
total_tx_count = 0
staking_tx_count = 0
plain_tx_count = 0
plain_tx_amt_count = 0
print(f"Total Blocks Fetched: {len(data)}")
for blk in data:
if 'stakingTransactions' in blk.keys():
total_tx_count += len(blk['stakingTransactions'])
for stx in blk['stakingTransactions']:
staking_tx_count += 1
if 'type' in stx:
if stx['type'] not in type_count:
type_count[stx['type']] = 0
type_count[stx['type']] += 1
if 'transactions' in blk.keys():
total_tx_count += len(blk['transactions'])
for tx in blk['transactions']:
plain_tx_count += 1
if 'value' in tx:
atto_amt = int(tx['value'], 16)
plain_tx_amt_count += atto_amt * 1e-18
print(f"Total tx count: {total_tx_count}")
print(f"Plain tx count: {plain_tx_count}")
print(f"Total amount sent via plain tx: {plain_tx_amt_count}")
print(f"Staking tx count: {staking_tx_count}")
print(f"Staking tx type count breakdown: {json.dumps(type_count, indent=4)}")
if __name__ == "__main__":
args = parse_args()
max_height = get_curr_block_height(args.endpoint) if args.max_height is None else args.max_height
min_height = 0 if args.min_height is None else args.min_height
assert max_height > min_height
total_blocks_count = max_height - min_height
for k, i in enumerate(reversed(range(min_height, max_height))):
if not args.print:
sys.stdout.write(f"\rFetched {k}/{total_blocks_count} blocks")
sys.stdout.flush()
if not args.by_hash:
block = get_block_number(i, args.endpoint, args.get_txs)
if block is None:
num_bad_get_blocks.append({
'block-num': i,
'reason': f"Block {i} had a null response: {block}"
})
print(f"\n[!] WARNING block {i} had a null response: {block}\n")
elif block['stakingTransactions'] is None or type(block['stakingTransactions']) != list:
num_bad_get_blocks.append({
'block-num': i,
'reason': f"Block {i} had a null response for staking transactions: {block['stakingTransactions']}"
})
print(f"\n[!] WARNING Block {i} had a null response for staking transactions: "
f"{block['stakingTransactions']}\n")
elif block['transactions'] is None or type(block['transactions']) != list:
num_bad_get_blocks.append({
'block-num': i,
'reason': f"Block {i} had a null response for plain transactions: {block['transactions']}"
})
print(f"\n[!] WARNING block {i} had a null response for plain transactions: "
f"{block['transactions']}\n")
num_blockchain.append(block if block else {})
else:
raise Exception("Not implemented")
if not args.print:
sys.stdout.write(f"\r")
sys.stdout.flush()
if not args.by_hash:
if not args.print:
print(f"\nTotal bad loads with number: {len(num_bad_get_blocks)}")
with open(f'blockchain.json', 'w') as f:
json.dump(num_blockchain, f, indent=4)
with open(f'blockchain-bad-load.json', 'w') as f:
json.dump(num_bad_get_blocks, f, indent=4)
if args.stats:
stats(num_blockchain)
if args.print:
print(json.dumps(num_blockchain))
else:
if not args.print:
print(f"\nTotal bad loads with hash: {len(hash_bad_get_blocks)}")
with open(f'blockchain.json', 'w') as f:
json.dump(hash_blockchain, f, indent=4)
with open(f'blockchain-bad-load.json', 'w') as f:
json.dump(hash_bad_get_blocks, f, indent=4)
if args.stats:
stats(num_blockchain)
if args.print:
print(json.dumps(hash_blockchain))
```
#### File: monitoring/balance-monitorV2/collect_data.py
```python
import argparse
import os, sys, re
import json
import time
import common
import pandas as pd
import decimal as dec
import datetime as dt
def collect_data(address_list, round, output_dir = None):
results = []
for a in address_list:
req = json.dumps(common.current_balance_request(a.address))
result = common.request(common.api_base % int(a.shard), req)
new_entry = {"address": a.address, "shard": a.shard, "index": round}
if result == None:
new_entry["balance"] = dec.Decimal('NAN')
else:
new_entry["balance"] = common.format_balance(result["result"])
results.append(new_entry)
if output_dir:
output_path = os.path.join(output_dir, timestamp.strftime("%b%d%Y_%H%M"))
write_output(pd.DataFrame(results), output_path)
else:
return pd.DataFrame(results)
def write_output(collected_data, output_path) -> int:
v_print("Writing output to %s" % output_path)
with open(output_path, 'w') as f:
collected_data.to_csv(f, index = False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-address-list', dest = 'address_list', required = True, help = 'List of ONE addresses to track')
parser.add_argument('-output-dir', dest = 'output_dir', default = os.getcwd(), help = 'Directory for data dump')
parser.add_argument('--verbose', action = 'store_true', help = 'Verbose for debug')
args = parser.parse_args()
if args.verbose:
def v_print(s):
print(s)
else:
def v_print(s):
return
if not os.path.exists(args.output_dir):
os.mkdirs(output_path)
address_list = common.read_addresses(args.address_list)
collect_data(address_list, args.output_dir)
``` |
{
"source": "jhdarcy/needful",
"score": 3
} |
#### File: needful/_slide_mixins/_bokeh_figure.py
```python
from json import dumps
from secrets import token_hex
from typing import Union, Optional, Type
from bokeh.plotting.figure import Figure as _Figure
from bokeh.themes.theme import Theme
from bokeh.embed import json_item
from bokeh.embed.util import FromCurdoc
from .grid_object import GridObject
from .._utils import check_type
class BokehFigure(GridObject):
"""Represents a Bokeh figure, with an additional function to return the Bokeh JS code.
Parameters
----------
fig: bokeh.plotting.figure.Figure
A Bokeh Figure object.
row: int
The grid row in which to place this plot.
column: int
The grid column in which to place this plot.
row_span: int, default=1
The number of rows for this plot to span.
col_span: int, default=1
The number of columns for this plot to span.
css_class: str, optional
The name of the CSS class (or classes) to apply to this object.
bokeh_theme: bokeh.themes.theme.Theme, optional
A Bokeh theme object to apply to the plot.
"""
def __init__(self,
fig: _Figure,
row: int,
column: int,
row_span: int = 1,
col_span: int = 1,
css_class: Optional[str] = None,
bokeh_theme: Optional[Union[Theme, Type[FromCurdoc]]] = None
):
# Note: not as strict on the Bokeh object typechecking here, as the caller should have already done that.
# Will this stuff us up at some point? Who knows!
self.fig = fig
self.theme = bokeh_theme
self._check_and_set(row, column, row_span, col_span, css_class)
# Generate an ID for this figure - we'll use this later.
self.fig_id = token_hex(5)
def get_plot_js(self) -> str:
"""Create the Bokeh.embed.embed_item(...) JS function for this plot.
Note: the required <div> tags for the plot are generated by the `get_div()` function.
Returns
-------
str
The Bokeh.embed.embed_item(...) function for this plot.
"""
bokeh_dict = json_item(self.fig, target=self.fig_id, theme=self.theme)
bokeh_js_str = f"Bokeh.embed.embed_item({dumps(bokeh_dict)})"
return bokeh_js_str
def get_div(self) -> str:
"""Get the required <div></div> HTML tags to display this plot.
Returns
-------
str
"""
div_str = f'<div {self._style_str}><div id="{self.fig_id}"></div></div>'
return div_str
@property
def _style_str(self):
css_str = f"grid-column : {self.column} / {self.column + self.col_span}; grid-row : {self.row} / {self.row + self.row_span};"
css_str += f" width: {self.fig.width}px; height: {self.fig.height}px;"
if self.css_class:
class_str = f' class="{self.css_class}"'
else:
class_str = ""
style_str = f'style="{css_str}"{class_str}'
return style_str
def __str__(self):
return self.get_div()
```
#### File: needful/_slide_mixins/images.py
```python
from base64 import b64encode
from pathlib import Path
from typing import Optional, Union
from PIL import Image as PILImage
from .grid_object import GridObject
from .mixin import SlideMixin
from .._utils import check_exists, check_type, check_sanity_int
class Image(GridObject):
"""Represents an image on the slide.
Parameters
----------
image_file: str, pathlib.Path or
A string or pathlib.Path object representing the path to the image.
row: int
The grid row in which to place this image.
column: int
The grid column in which to place this image.
row_span: optional, int
The number of rows for this image to span (defaults to `1`).
col_span: optional, int
The number of columns for this image to span (defaults to `1`).
width_pct: optional, int
The percentage of the original image width to scale by. Defaults to 100 (no resizing).
css_class: str, optional
The name of the CSS class (or classes) to apply to this object.
"""
def __init__(self,
image_path: Union[str, Path],
row: int,
column: int,
row_span: int = 1,
col_span: int = 1,
width_pct: int = 100,
css_class: Optional[str] = None
):
# Check provided image_path is either string or Path object, then check that it exists.
check_type("image_path", image_path, Union[str, Path])
check_exists(image_path, "Image")
self.image_file = image_path
self._check_and_set(row, column, row_span, col_span, css_class)
check_sanity_int("width_pct", width_pct)
self.width_pct = width_pct
def get_div(self) -> str:
"""Get the required <div></div> HTML tags to display this image.
Returns
-------
str
"""
# Read in the image, convert to string with Base64.
with open(self.image_file, 'rb') as f:
img = b64encode(f.read()).decode()
# Use Pillow to also open the image and get its size - we'll use this to scale the image if we need to.
img_size = PILImage.open(f).size
img_width = int(self.width_pct / 100 * img_size[0])
img_style_str = f'style="justify-self:center"'
html_str = f'<div {self._style_str}><center><img src="data:image;base64, {img}" {img_style_str} width={img_width}px></center></div>'
return html_str
class ImageMixin(SlideMixin):
"""Adds Image functionality to the Slide class."""
def add_image(self,
image_path: Union[str, Path],
row: int,
column: int,
row_span: int = 1,
col_span: int = 1,
width_pct: int = 100,
css_class: Optional[str] = None,
):
"""Add an image to this slide, in the specified row and column.
Parameters
----------
image_path: str or pathlib.Path
A string or pathlib.Path object representing the path to the image.
row: int
The grid row in which to place this image.
column: int
The grid column in which to place this image.
row_span: int, default=1
The number of rows for this image to span (defaults to `1`).
col_span: int, default=1
The number of columns for this image to span (defaults to `1`).
width_pct: int, default=100
The percentage of the original image width to scale by. Defaults to 100 (no resizing).
css_class : str, optional
The CSS class (or classes) to apply to this image. Multiple CSS classes are applied in a single string,
separated by a space. I.e. `css_class = "class1 class2"`.
"""
image = Image(image_path, row, column, row_span, col_span, width_pct, css_class)
self._check_grid_pos(row, column)
self._elements.append(image)
``` |
{
"source": "jhdark/divHretention",
"score": 2
} |
#### File: divHretention/divHretention/inventory_T_c.py
```python
import numpy as np
import scipy as sp
from scipy.interpolate import interp1d
from .process_T_c_data import points, data
from inference.gp_tools import GpRegressor
from inference.gp_tools import RationalQuadratic
import divHretention
def inv(points, time=1e7):
''' returns a 1D array'''
values = []
for p in points:
for d in data:
if d["T"] == p[0] and d["c"] == p[1]:
values.append(interp1d(d["t"], d["inventory"])(time))
break
return np.asarray(values)
def estimate_inventory_with_gp_regression(time=1e7):
"""Estimate the monoblock inventory in H/m based on FESTIM results at a
given time.
The regression is made on T, log(c_surface), log(inventory)
Args:
time (float, optional): Exposure time in seconds. Defaults to 1e7.
Returns:
GpRegressor: callable, usage GP(600, np.log10(1e20)) see
https://inference-tools.readthedocs.io/en/stable/GpRegressor.html
"""
# with inference-tools
sim_points = []
z = []
for p in points:
if 320 <= p[0] <= 1100 and 1e20 <= p[1] <= 1e23:
sim_points.append([p[0], np.log10(p[1])])
z.append(np.log10(inv([p], time=time)))
sim_points = np.array(sim_points)
# Train the GP on the data
GP = GpRegressor(
sim_points[:: divHretention.step_mb],
z[:: divHretention.step_mb],
kernel=RationalQuadratic)
# evaluate the estimate
Nx, Ny = 50, 10
gp_x = np.linspace(320, 1100, Nx)
gp_y = np.log10(np.logspace(20, 23, Ny))
gp_coords = [(i, j) for i in gp_x for j in gp_y]
mu, sig = GP(gp_coords)
return GP
if __name__ == "__main__":
pass
```
#### File: divHretention/plotting/plot_along_divertor.py
```python
import matplotlib.pyplot as plt
from matplotlib import ticker
import numpy as np
from divHretention import Exposition
correspondance_dict = {
"arc_length": {
"label": "Distance along divertor (m)"
},
"ion_energy": {
"yscale": "log",
"label": "Incident energy (eV)",
"linestyle": "solid"
},
"atom_energy": {
"yscale": "log",
"label": "Incident energy (eV)",
"linestyle": "dashed"
},
"ion_flux": {
"yscale": "log",
"label": "Incident flux (m$^{-2}$ s$^{-1}$)",
"linestyle": "solid"
},
"atom_flux": {
"yscale": "log",
"label": "Incident flux (m$^{-2}$ s$^{-1}$)",
"linestyle": "dashed"
},
"heat_flux": {
"yscale": "log",
"label": "Heat flux (W m$^{-2}$)"
},
"ion_angle": {
"label": "Angle of incidence (°)",
"linestyle": "solid"
},
"atom_angle": {
"label": "Angle of incidence (°)",
"linestyle": "dashed"
},
"T_surf": {
"label": r"$T_\mathrm{surface}$ (K)"
},
"c_surf": {
"yscale": "log",
"label": r"$c_\mathrm{surface}$ (m$^{-3}$)"
},
"inventory": {
"yscale": "log",
"label": "Inventory per \n unit thickness (H/m)"
},
"stdev_inv": {
},
}
class plot_along_divertor():
"""Plots some quantities along the divertor arc length. This class works
with csv files (see :func:`Exposition()
<divHretention.extract_data.Exposition>`).
Args:
filenames (list, optional): The CSV file names. Defaults to [].
filetypes (list or str, optional): The CSV file types
("WEST" or "ITER"). Defaults to [].
quantities (list, optional): The quantities to be plotted
(see :py:meth:`correspondance_dict` in
:py:mod:`plot_along_divertor`). Defaults to ["stdev_inv"].
figsize (tuple, optional): The size of the figure. Defaults to (8, 8).
plot_sigma (bool, optional): If true, the 95% confidence interval will
be plotted. Defaults to True.
overlap_ions_atoms (bool, optional): If True, energies, fluxes and
angles of ions and atoms will be plotted on the same plot.
Defaults to True.
colors (list, optional): List of matplotlib colors. The length of
`colors` must be the same as `filetypes`. Defaults to None.
Raises:
ValueError: if missing the filetypes argument
"""
def __init__(
self, filenames=[], filetypes=[], quantities=["stdev_inv"],
figsize=(8, 8), plot_sigma=True, overlap_ions_atoms=True,
colors=None, **kwargs):
self.count = 0
self.filenames = []
if len(filenames) > 0 and len(filetypes) == 0:
raise ValueError("Missing filetypes argument")
if type(filetypes) is str:
self.filetypes = [filetypes for _ in filenames]
else:
self.filetypes = filetypes[:]
self.quantities = quantities
self.plot_sigma = plot_sigma
self.overlap_ions_atoms = overlap_ions_atoms
self.nrows, self.axs_ids = self.compute_nrows()
self.fig, self.axs = \
plt.subplots(
figsize=figsize, nrows=self.nrows,
ncols=1, sharex="col", **kwargs)
self.axs[-1].set_xlabel("Distance along divertor (m)")
if colors is None:
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
for filename, filetype, color in \
zip(filenames, self.filetypes, colors):
self.add_case(filename, filetype, color)
def compute_nrows(self):
"""Compute the number of rows needed
Returns:
int, list: number of rows, list of axes
"""
if not self.overlap_ions_atoms:
N = len(self.quantities)
axs_ids = [i for i in range(N)]
else:
N = 0
axs_ids = []
all_suffixes = ["_energy", "_angle", "_flux"]
suffixes = {}
for quantity in self.quantities:
has_suffix = False
for suffix in all_suffixes:
if quantity.endswith(suffix) and quantity != "heat_flux":
has_suffix = True
if suffix not in suffixes:
suffixes[suffix] = N
axs_ids.append(N)
N += 1
else:
axs_ids.append(suffixes[suffix])
N += 0
if not has_suffix:
axs_ids.append(N)
N += 1
return N, axs_ids
def add_case(self, filename, filetype, color):
"""Adds a new exposure case to the plot.
Args:
filename (str): The CSV file name.
filetype (str): The CSV file type
("WEST" or "ITER").
color (str): the color of the line.
Raises:
ValueError: If a quantity is unknown.
"""
self.count += 1
self.filenames.append(filename)
label = "Case {}".format(self.count)
correspondance_dict = create_correspondance_dict(filename, filetype)
arc_length = correspondance_dict["arc_length"]["var"]
if self.nrows == 1:
axs = [self.axs]
else:
axs = self.axs
for quantity, ax_id in zip(self.quantities, self.axs_ids):
if quantity not in correspondance_dict:
raise ValueError(quantity + " is unknown")
plt.sca(axs[ax_id])
if "yscale" in correspondance_dict[quantity]:
plt.yscale(correspondance_dict[quantity]["yscale"])
if "label" in correspondance_dict[quantity]:
plt.ylabel(correspondance_dict[quantity]["label"])
line, = plt.plot(
arc_length, correspondance_dict[quantity]["var"],
color=color)
# use different linestyles if ions/atoms overlap
if self.axs_ids.count(ax_id) > 1:
line.set_linestyle(correspondance_dict[quantity]["linestyle"])
# plot confidence interval
if quantity == "inventory" and self.plot_sigma:
sigma = correspondance_dict["stdev_inv"]["var"]
inventory = correspondance_dict[quantity]["var"]
plt.fill_between(
arc_length,
10**(2*sigma + np.log10(inventory)),
10**(-2*sigma + np.log10(inventory)),
facecolor=line.get_color(), alpha=0.3)
plt.legend()
def show(self):
"""Similar to matplotlib.pyplot.show()
"""
plt.show()
def create_correspondance_dict(filename, filetype):
"""Creates a correspondance dictionary link for the case
Args:
filename (str): The CSV file name.
filetype (str): The CSV file type
("WEST" or "ITER").
Returns:
dict: correspondance dictionary with a "var" key for each quantity
"""
my_exposition = Exposition(filename, filetype)
correspondance_dict["arc_length"]["var"] = my_exposition.arc_length
correspondance_dict["ion_energy"]["var"] = my_exposition.E_ion
correspondance_dict["atom_energy"]["var"] = my_exposition.E_atom
correspondance_dict["ion_flux"]["var"] = my_exposition.ion_flux
correspondance_dict["atom_flux"]["var"] = my_exposition.atom_flux
correspondance_dict["heat_flux"]["var"] = my_exposition.net_heat_flux
correspondance_dict["ion_angle"]["var"] = my_exposition.angles_ions
correspondance_dict["atom_angle"]["var"] = my_exposition.angles_atoms
correspondance_dict["T_surf"]["var"] = my_exposition.temperature
correspondance_dict["c_surf"]["var"] = my_exposition.concentration
correspondance_dict["inventory"]["var"] = my_exposition.inventory
correspondance_dict["stdev_inv"]["var"] = my_exposition.stdev_inv
return correspondance_dict
class plot_T_c_inv_along_divertor(plot_along_divertor):
"""Plots the temperature, concentration and inventory distribution along
the divertor
"""
def __init__(self, **kwargs):
super().__init__(
quantities=["T_surf", "c_surf", "inventory"],
**kwargs)
class plot_particle_exposure_along_divertor(plot_along_divertor):
"""Plots the exposure condition (particle fluxes, energies and angles)
along the divertor
"""
def __init__(self, **kwargs):
quantities = [
"atom_flux", "ion_flux",
"ion_energy", "atom_energy",
"ion_angle", "atom_angle",
]
super().__init__(
quantities=quantities,
**kwargs)
def plot_inv_with_uncertainty(x, y, stdev, zscore=2, alpha_fill=0.3, **kwargs):
"""Plots the inventory along the divertor with the associated uncertainity
filled
Args:
x (numpy.array): Arc length (m) along the divertor
y (numpy.array): Inventory per unit thickness (H/m)
stdev (numpy.array): standard deviation
zscore (float): userdefined zscore corresponding to a confidence interval. Defaults to 2.
alpha_fill (float, optional): Opacity of the filled region between 0 and 1. Defaults to 0.3.
"""
# check that zscore is a float
if type(zscore) not in [float, int]:
raise TypeError("zscore should be a float")
line, = plt.plot(x, y, **kwargs)
plt.fill_between(
x,
10**(zscore*stdev + np.log10(y)),
10**(-zscore*stdev + np.log10(y)),
facecolor=line.get_color(), alpha=alpha_fill)
```
#### File: divHretention/divHretention/process_T_c_data.py
```python
import re
import os
from os import path
import numpy as np
import csv
from scipy.stats import linregress
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
from divHretention import list_of_low_temp_files, list_of_high_temp_files
from .data import mb_high_temp
from .data import mb_low_temp
def fit_powerlaw(x, y):
slope, intercept, r_value, p_value, std_err = \
linregress(np.log10(x), np.log10(y))
a = 10**intercept
b = slope
return a, b
points = []
data = []
# extract high temp data
strings = list_of_high_temp_files
count = 0
for s in strings:
match_number = re.compile(r'-?\ *[0-9]+\.?[0-9]*(?:[Ee]\ *-?\ *[0-9]+)?')
e = re.findall(match_number, s)
points.append([float(e[i])*10**float(e[i+1]) for i in [0, 2]])
data.append({})
data[-1]["T"] = points[-1][0]
data[-1]["c"] = points[-1][1]
t = []
inventory = []
with pkg_resources.path(mb_high_temp, s) as file_path:
with open(file_path, 'r') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
next(plots)
for row in plots:
t.append(float(row[0]))
inventory.append(
2*(float(row[-1]) +
float(row[-2]) +
float(row[-3])))
# extrapolate to small times
a, b = fit_powerlaw(t, inventory)
t_ = np.logspace(2, 4, num=100)
inventory_ = a*t_**b
data[-1]["t"] = t_.tolist() + t
data[-1]["inventory"] = inventory_.tolist() + inventory
# extract low temp data
L = 30e-3
strings = list_of_low_temp_files
for s in strings:
match_number = re.compile(r'-?\ *[0-9]+\.?[0-9]*(?:[Ee]\ *-?\ *[0-9]+)?')
e = re.findall(match_number, s)
a = [float(e[i])*10**float(e[i+1]) for i in [0, 2]]
points.append(a)
data.append({})
data[-1]["T"] = points[-1][0]
data[-1]["c"] = points[-1][1]
t = []
inventory = []
with pkg_resources.path(mb_low_temp, s) as file_path:
with open(file_path, 'r') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
next(plots)
for row in plots:
t.append(float(row[0]))
inventory.append(
L*(float(row[-1]) +
float(row[-2]) +
float(row[-3])))
a, b = fit_powerlaw(t, inventory)
t_ = np.logspace(5, 7, num=100)
inventory_ = a*t_**b
data[-1]["t"] = t + t_.tolist()
data[-1]["inventory"] = inventory + inventory_.tolist()
T_ = 320
for c in [*np.logspace(22 + np.log10(2), 23, num=7), *np.logspace(21 + np.log10(2), 22, num=7), *np.logspace(20 + np.log10(2), 21, num=7)]:
points.append([T_, c])
data.append({})
data[-1]["T"] = T_
data[-1]["c"] = c
t = np.logspace(2, 7, num=100)
D = 1.326e-10
n = 6.93e25
e = (t*2*D*c/n)**0.5
inv = n*e*L
data[-1]["t"] = t
data[-1]["inventory"] = inv
points = np.asarray(points)
# print(len(points), len(data))
``` |
{
"source": "jhdavislab/ms_datasheet",
"score": 3
} |
#### File: jhdavislab/ms_datasheet/ms_datasheet_gui.py
```python
from matplotlib import cm
from gooey import GooeyParser
from gooey import Gooey
from ms_datasheet import plot_datapage
__VERSION__="0.1.1"
@Gooey(default_size=(600, 650), program_name='Davis Lab mass spec datasheet')
def main():
parser = GooeyParser(description='Plot a single datasheet for a given therom RAW file.\nRequires "pyextractMS", "MSFileReader", numpy, matplotlib, gooey')
parser.add_argument('file', type=str,
help='path to the file to analyze', widget='FileChooser')
parser.add_argument('--display', default=True, action='store_false',
help='just display the plot, but do not save it')
parser.add_argument('--extension', default='.pdf', type=str,
help='string for figure filetype (e.g. .pdf, .png)')
parser.add_argument('--spectra_rt_range', default='40,80', type=str,
help='RT range over which to calculate spectra histograms; format is start,end. e.g. --spectra_rt_range 1,120')
parser.add_argument('--tic_rt_range', default='1,120', type=str,
help='RT range over which to plot tics; format is start,end. e.g. --tic_rt_range 1,120')
parser.add_argument('--exclude_iRTs', default=False, action='store_true',
help='exclude XICs, MS1, MS2 spectra for standard iRT peptides at expected retention times. Default is to include this information.\
do not use this option unless you have not added pierce iRT peptides in your sample')
args = parser.parse_args()
file_name= vars(args)['file']
savefig= vars(args)['display']
fig_extension = vars(args)['extension']
spectra_rt_range = vars(args)['spectra_rt_range']
tic_rt_range = vars(args)['tic_rt_range']
exclude_iRTs = vars(args)['exclude_iRTs']
print('****generating datasheet for file: ' +file_name+'....****\n')
print('****to do the same from the commandline, run the following:')
command = 'python ms_datasheet.py '+file_name+' --spectra_rt_range ' + spectra_rt_range + ' --tic_rt_range ' + tic_rt_range + ' --extension ' + fig_extension
if not savefig:
command+=' --display'
if exclude_iRTs:
command+=' --exclude_iRTs'
print(command)
plot_datapage(file_name, savefig=savefig, fig_extension=fig_extension, colors=cm.get_cmap(name='plasma'),
spectra_rt_range=spectra_rt_range, tic_rt_range=tic_rt_range, exclude_iRTs=exclude_iRTs)
print('****plotting complete')
if __name__ =='__main__':
main()
```
#### File: jhdavislab/ms_datasheet/ms_datasheet.py
```python
import pyextractMS
from matplotlib import pylab as plt
from matplotlib import rcParams
import MSFileReader
from matplotlib import cm
from os import path
import time
import argparse
__VERSION__="0.1.2"
def set_style_single_inj(exclude_iRTs):
rcParams['axes.labelsize'] = 9
rcParams['xtick.labelsize'] = 8
rcParams['ytick.labelsize'] = 8
rcParams['legend.fontsize'] = 8
rcParams['grid.alpha'] = 0.5
rcParams['grid.color'] = "grey"
rcParams['axes.formatter.limits'] = [-4,4]
figsize = (8,10)
fig = plt.figure(constrained_layout=True, figsize=figsize)
if exclude_iRTs is False:
gs = fig.add_gridspec(ncols=2, nrows=5)
else:
gs = fig.add_gridspec(ncols=2, nrows=3)
ax1 = fig.add_subplot(gs[0, 0]) #bpc
ax2 = fig.add_subplot(gs[0, 1]) #tic
ax3 = fig.add_subplot(gs[1, 0]) #ms1 injec times
ax4 = fig.add_subplot(gs[1, 1]) #ms2 inject times
ax5 = fig.add_subplot(gs[2, :]) #pressure trace
if exclude_iRTs is False:
ax6 = fig.add_subplot(gs[3, :]) #iRT XICs
ax7 = fig.add_subplot(gs[4, 0]) #iRT MS1
ax8 = fig.add_subplot(gs[4, 1]) #iRT MS2
axes = [ax1,ax2,ax3,ax4,ax5,ax6,ax7,ax8]
else:
axes = [ax1,ax2,ax3,ax4,ax5]
return [fig, axes]
def plot_datapage(file_name, savefig=True, fig_extension='.pdf', colors=cm.get_cmap(name='plasma'), spectra_rt_range='1,120', tic_rt_range='1,120', exclude_iRTs=False):
fig,axes = set_style_single_inj(exclude_iRTs)
rawfile = MSFileReader.ThermoRawfile(file_name)
title = file_name + ' - ' + time.ctime(path.getmtime(file_name)) + '\n' + rawfile.GetSeqRowInstrumentMethod().split('\\')[-1]
fig.suptitle(title)
print('plotting bpc...')
pyextractMS.plot_bpc(rawfile, fig_axis=axes[0], rt_range=[float(i) for i in tic_rt_range.split(',')])
print('plotting tic...')
pyextractMS.plot_tic(rawfile, fig_axis=axes[1], rt_range=[float(i) for i in tic_rt_range.split(',')])
print('plotting MS1 inj. times...')
pyextractMS.plot_inj_times(rawfile, 1, fig_axis=axes[2], rt_range=[float(i) for i in spectra_rt_range.split(',')])
print('plotting MS2 inj. times...')
pyextractMS.plot_inj_times(rawfile, 2, fig_axis=axes[3], rt_range=[float(i) for i in spectra_rt_range.split(',')])
print('plotting pressure traces...')
pyextractMS.plot_pressure_traces(rawfile, fig_axis=axes[4])
if exclude_iRTs is False:
##### iRT peptide of interest 3###
#'TASEFDSAIAQDK':[695.8324,46.75]#
##################################
ms1_peptide = 'TASEFDSAIAQDK'
ms1_charge = 2
ms1_mz = 695.8324
ms1_rt = 53.5
ms1_mass_accuracy = 0.01
ms1_rt_window = 7.5
rt_range = [ms1_rt-ms1_rt_window, ms1_rt+ms1_rt_window]
print('plotting iRT XICs...')
pyextractMS.plot_xics(rawfile, fig_axis=axes[5])
print('plotting MS1 spectra...')
full_xic = pyextractMS.get_xic(rawfile, rt_range=rt_range,
mz_range=[ms1_mz-ms1_mass_accuracy,ms1_mz+ms1_mass_accuracy])
ms1_peak_max_rt, ms1_peak_max_intensity = pyextractMS.get_max_rt(full_xic)
ms1_peak_max_scan = rawfile.ScanNumFromRT(ms1_peak_max_rt)
ms1_peak_max_rt = round(ms1_peak_max_rt, 1)
ms1_mz_range = pyextractMS.calc_mz_range(ms1_peptide, ms1_mz, labeling='None', charge=ms1_charge, offset=2)
ms1 = pyextractMS.extract_ms_spectra(rawfile, ms1_peak_max_scan, ms1_mz_range)
pyextractMS.plot_ms_spectra(ms1, title=ms1_peptide+'|'+str(ms1_peak_max_rt)+'min.|mz='+str(ms1_mz), fig_axis=axes[6], y_label='intensity (MS1)', x_label='mass/charge')
axes[6].axvline(ms1_mz, linestyle='--', color='red', alpha=0.75, linewidth=1)
print('plotting MS2 spectra...')
#plot iRT MS2 @max
relevant_ms2_scans = pyextractMS.get_ms2_scans(rawfile, ms1_mz, rt_range = rt_range)
try:
nearest_ms2_scan = pyextractMS.find_nearest(relevant_ms2_scans, ms1_peak_max_scan)
ms2 = pyextractMS.extract_ms_spectra(rawfile, nearest_ms2_scan)
ms2_rt = str(round(rawfile.RTFromScanNum(nearest_ms2_scan), 1))
ms2_scan = rawfile.GetTrailerExtraForScanNum(nearest_ms2_scan)
ms2_iso = str(round(rawfile.GetPrecursorMassForScanNum(nearest_ms2_scan, 2), 1))
ms2_width = str(ms2_scan['MS2 Isolation Width'])
pyextractMS.plot_ms_spectra(ms2, title=ms2_iso +'+/-'+ms2_width + ' | ' + ms2_rt,
fig_axis=axes[7], circle_size=0, x_label='mass/charge', y_label='intensity (MS2)')
except ValueError:
print("Could not find a MS2 scan covering the precursor "+ str(ms1_mz) + " in the RT range +" + str(rt_range))
if savefig:
fig.savefig(file_name+fig_extension)
else:
plt.show()
def main():
parser = argparse.ArgumentParser(description='Plot a single datasheet for a given file')
parser.add_argument('file', type=str,
help='path to the file to analyze')
parser.add_argument('--display', default=True, action='store_false',
help='just display the plot, but do not save it')
parser.add_argument('--extension', default='.pdf', type=str,
help='string for figure filetype (e.g. .pdf, .png)')
parser.add_argument('--spectra_rt_range', default='40,80', type=str,
help='RT range over which to calculate spectra histograms; format is start,end. e.g. --spectra_rt_range 1,120')
parser.add_argument('--tic_rt_range', default='1,120', type=str,
help='RT range over which to plot tics; format is start,end. e.g. --tic_rt_range 1,120')
parser.add_argument('--exclude_iRTs', default=False, action='store_true',
help='exclude XICs, MS1, MS2 spectra for standard iRT peptides at expected retention times. Default is to include this information.\
do not use this option unless you have not added pierce iRT peptides in your sample')
args = parser.parse_args()
file_name= vars(args)['file']
savefig= vars(args)['display']
fig_extension = vars(args)['extension']
spectra_rt_range = vars(args)['spectra_rt_range']
tic_rt_range = vars(args)['tic_rt_range']
exclude_iRTs = vars(args)['exclude_iRTs']
print('\n****generating datasheet for file: ' +file_name+'....****')
plot_datapage(file_name, savefig=savefig, fig_extension=fig_extension, colors=cm.get_cmap(name='plasma'),
spectra_rt_range=spectra_rt_range, tic_rt_range=tic_rt_range, exclude_iRTs=exclude_iRTs)
print('****plotting complete')
if __name__ =='__main__':
main()
``` |
{
"source": "jhdcs/colcon-ros",
"score": 2
} |
#### File: task/catkin/__init__.py
```python
from colcon_cmake.task.cmake import get_variable_from_cmake_cache
from colcon_core.logging import colcon_logger
from colcon_core.shell import create_environment_hook
logger = colcon_logger.getChild(__name__)
def create_pythonpath_environment_hook(build_base, install_base, pkg_name):
"""
Create a hook script for each primary shell to prepend to the PYTHONPATH.
:param str build_base: The path of the build directory
:param Path install_base: The path of the install prefix
:param str pkg_name: The package name
:returns: The relative paths to the created hook scripts
:rtype: list
"""
hooks = []
# catkin packages might use `--install-layout deb`
# therefore getting the used install directory from the CMake cache
# since it might not match distutils.sysconfig.get_python_lib()
rel_python_path = get_variable_from_cmake_cache(
build_base, 'PYTHON_INSTALL_DIR')
# prepend Python specific path to PYTHONPATH if it exists
if rel_python_path:
abs_python_path = install_base / rel_python_path
logger.log(1, "checking '%s'" % abs_python_path)
if abs_python_path.exists():
hooks += create_environment_hook(
'catkin_pythonpath', install_base, pkg_name,
'PYTHONPATH', rel_python_path, mode='prepend')
return hooks
``` |
{
"source": "JH-debug/Series_Develop",
"score": 3
} |
#### File: hw04/my_favorite_movie_star/app.py
```python
from pymongo import MongoClient
from flask import Flask, render_template, jsonify, request
app = Flask(__name__)
client = MongoClient('localhost', 27017)
db = client.get_database('sparta')
# HTML 화면 보여주기
@app.route('/')
def home():
return render_template('index.html')
# API 역할을 하는 부분
@app.route('/api/list', methods=['GET'])
def show_stars():
# 1. db에서 mystar 목록 전체를 검색합니다. ID는 제외하고 like 가 많은 순으로 정렬합니다.
# 참고) find({},{'_id':False}), sort()를 활용하면 굿!
stars = list(db.mystar.find({}, {'_id': False}).sort('like', -1))
# 2. 성공하면 success 메시지와 함께 stars_list 목록을 클라이언트에 전달합니다.
return jsonify({'result': 'success', 'stars_list': stars})
@app.route('/api/like', methods=['POST'])
def like_star():
# 1. 클라이언트가 전달한 name_give를 name_receive 변수에 넣습니다.
name_receive = request.form['name_give']
# 2. mystar 목록에서 find_one으로 name이 name_receive와 일치하는 star를 찾습니다.
star = db.mystar.find_one({'name': name_receive})
# 3. star의 like 에 1을 더해준 new_like 변수를 만듭니다.
new_like = star['like'] + 1
# 4. mystar 목록에서 name이 name_receive인 문서의 like 를 new_like로 변경합니다.
# 참고: '$set' 활용하기!
db.mystar.update_one({'name': name_receive}, {'$set': {'like': new_like}})
# 5. 성공하면 success 메시지를 반환합니다.
return jsonify({'result': 'success'})
@app.route('/api/delete', methods=['POST'])
def delete_star():
# 1. 클라이언트가 전달한 name_give를 name_receive 변수에 넣습니다.
name_receive = request.form['name_give']
# 2. mystar 목록에서 delete_one으로 name이 name_receive와 일치하는 star를 제거합니다.
db.mystar.delete_one({'name': name_receive})
# 3. 성공하면 success 메시지를 반환합니다.
return jsonify({'result': 'success'})
if __name__ == '__main__':
app.run('0.0.0.0', port=5000, debug=True)
```
#### File: hw04/my_favorite_movie_star/init_db.py
```python
import requests
from bs4 import BeautifulSoup
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
db = client.sparta
# DB에 저장할 영화인들의 출처 url을 가져옵니다.
def get_urls():
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
data = requests.get('https://movie.naver.com/movie/sdb/rank/rpeople.nhn', headers=headers)
soup = BeautifulSoup(data.text, 'html.parser')
trs = soup.select('#old_content > table > tbody > tr')
urls = []
for tr in trs:
a = tr.select_one('td.title > a')
if a is not None:
base_url = 'https://movie.naver.com/'
url = base_url + a['href']
urls.append(url)
return urls
# 출처 url로부터 영화인들의 사진, 이름, 최근작 정보를 가져오고 mystar 콜렉션에 저장합니다.
def insert_star(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
data = requests.get(url, headers=headers)
soup = BeautifulSoup(data.text, 'html.parser')
name = soup.select_one('#content > div.article > div.mv_info_area > div.mv_info.character > h3 > a').text
img_url = soup.select_one('#content > div.article > div.mv_info_area > div.poster > img')['src']
recent_work = soup.select_one(
'#content > div.article > div.mv_info_area > div.mv_info.character > dl > dd > a:nth-child(1)').text
doc = {
'name': name,
'img_url': img_url,
'recent': recent_work,
'url': url,
'like': 0
}
db.mystar.insert_one(doc)
print('완료!', name)
# 기존 mystar 콜렉션을 삭제하고, 출처 url들을 가져온 후, 크롤링하여 DB에 저장합니다.
def insert_all():
db.mystar.drop() # mystar 콜렉션을 모두 지워줍니다.
urls = get_urls()
for url in urls:
insert_star(url)
### 실행하기
insert_all()
``` |
{
"source": "jhdewitt/sltk",
"score": 2
} |
#### File: jhdewitt/sltk/slpy.py
```python
import signal, os
import cv2
import sys
import urllib
import optparse
import numpy as np
import time
from Queue import Queue
from threading import Thread
from datetime import datetime, timedelta
from OSC import OSCServer, OSCClient, OSCMessage, OSCStreamingClient
from subprocess import call
from subprocess import Popen
from sys import platform as _platform
from time import sleep
import copy
# track frame timing
class TimeState:
start = -1
frame_n = 0
cur_framet = -1
last_framet = -1
cur_ts = -1
last_ts = -1
ts_late = -1
ts_lateavg = -1
ts_latesm = 0.9
dt = 0.1
dt_avg = 0.1
dt_sm = 0.9
show_dt = 0
show_dtavg = 0
show_dtsm = 0.9
lag_checknow = True
lag_iterations = 5
lag_avgdark = 0
lag_avglight = 0
lag_integrationtime = 10
img_size_last = 1650000
img_size_avg = 1650000
img_size_avg_sm = 0.1
def __init__(self,now):
start = now
def new_frame(self,now,timestamp,size):
self.frame_n = self.frame_n + 1
self.last_framet = self.cur_framet
self.cur_framet = now
self.last_ts = self.cur_ts
self.cur_ts = timestamp
self.ts_late = self.cur_framet-self.cur_ts
self.ts_lateavg = self.ts_lateavg*self.ts_latesm + self.ts_late*(1-self.ts_latesm)
self.dt = self.cur_ts-self.last_ts
self.dt_avg = self.dt_avg*self.dt_sm + self.dt*(1-self.dt_sm)
self.img_size_last = size
diff = abs(self.img_size_avg-self.img_size_last)
if diff > 1000:
self.img_size_avg_sm = 0.85
if diff > 5000:
self.img_size_avg_sm = 0.1
if diff > 10000:
self.img_size_avg_sm = 0.01
self.img_size_avg = self.img_size_avg*self.img_size_avg_sm + (1-self.img_size_avg_sm)*self.img_size_last
##################
# PATTERN TYPES
# generic pattern specification type
class StructuredLightPattern(object):
kind = 'pat'
name = ''
inv = 0
r = 1
g = 1
b = 1
go = False
shown = False
saved = False
go_t = -1
show_t = -1
save_t = -1
show_idx = -1
dt = -1
def __init__(self):
pass
def setcolor(self,r,g,b):
self.r = r
self.g = g
self.b = b
def setname(name):
self.name = name
# pattern = x or y axis gray code binary single bitfield mapped to luminance with rgb weights
class GrayCodeBinaryPattern(StructuredLightPattern):
axis = 0
bit = 0
def __init__(self,inv,axis,bit):
self.kind = 'gcb'
self.inv = inv
self.axis = axis
self.bit = bit
def tostr(self):
ret = ""
ret += self.kind + ", "
ret += self.inv + ", "
ret += self.r + ", "
ret += self.g + ", "
ret += self.b + ", "
ret += self.axis + ", "
ret += self.bit
return ret
# pattern = x or y axis sine function mapped to luminance with rgb weights
class SinusoidPattern(StructuredLightPattern):
axis = 0
freq = 1
phase = 0
def __init__(self,inv,axis,freq,phase):
self.kind = 'sin'
self.inv = inv
self.axis = axis
self.freq = freq
self.phase = phase
def tostr(self):
ret = ""
ret += self.kind + ", "
ret += self.inv + ", "
ret += self.r + ", "
ret += self.g + ", "
ret += self.b + ", "
ret += self.axis + ", "
ret += self.freq + ", "
ret += self.phase
return ret
# pattern = rgb triplet applied to every pixel
class MonochromePattern(StructuredLightPattern):
def __init__(self,inv,r,g,b):
self.kind = 'rgb'
self.inv = inv
self.r = r
self.g = g
self.b = b
def tostr(self):
ret = ""
ret += self.kind + ", "
ret += self.inv + ", "
ret += self.r + ", "
ret += self.g + ", "
ret += self.b
return ret
#########################
# SEQUENCE BUFFER TYPE
# collection of sequences to be batched together
# sequence of sequences of patterns
class SequenceBuffer(object):
kind = 'seqbuf'
name = ''
go = False
completed = False
start_t = -1
stop_t = -1
runs_total = 1
runs = 1
steps = 50
idx = 0
seq = []
def __init__(self,outname):
self.name = outname
def is_empty(self):
if len(seq) == 0:
return True
return False
def should_go(self):
if self.go and len(self.seq) > 0 and self.idx < len(self.seq):
return self.seq[self.idx].idx < len(self.seq[self.idx].pat)
return True
#return self.seq[self.idx].go
else:
return False
def num_pats(self):
count = 0
for sub_seq in self.seq:
count += len(sub_seq.pat)
return count
# wipe structure
def reset(self):
del self.seq[:]
self.seq = []
go = False
completed = False
name = ''
start_t = -1
stop_t = -1
runs_total = 1
runs = 1
steps = 0
def add_sequence(self,newseq):
#self.seq.append(copy.deepcopy(newseq))
self.seq.append(newseq)
def write_list(self,outpath):
f = open('%s/sequence.yaml'%outpath,'w')
f.write("%YAML:1.0\n")
for sub_seq in self.seq:
f.write( sub_seq.generate_raw_yaml() )
def log(self):
ret = ''
i = 0
#ret += ("len(seq) = %d" % len(self.seq))
for sub_seq in self.seq:
#print(i,sub_seq.generate_raw_yaml())
ret += "[ %s with %d %s patterns ]\n" % (sub_seq.kind, len(sub_seq.pat), sub_seq.pat[0].kind)
i += 1
return ret
#########################
# SEQUENCE TYPES
# container to hold sequence of patterns
class PatternSequence(object):
kind = 'seq'
name = ''
showinv = False
go = False
completed = False
start_t = -1
stop_t = -1
r = 1.0
g = 1.0
b = 1.0
idx = 0
pat = []
pat_path = []
def __init__(self):
self.kind = 'seq'
pass
def gen():
pass
def is_primed(self,delay):
if len(self.pat) > 0:
if self.idx == 0 or (self.idx > 0 and self.pat[max(0,self.idx-1)].save_t + delay < time.time()):
return True
else:
return False
#return self.pat[self.idx].go
def commit_image(self,path,newidx,newframetime):
self.pat_path.append(path)
self.pat[self.idx].saved = True
self.pat[self.idx].save_t = time.time()
self.pat[self.idx].show_t = newframetime
#self.pat[self.idx].show_idx = newidx
self.idx += 1
def clear_list(self):
del self.pat[:]
del self.pat_path[:]
self.pat = []
self.pat_path = []
def reset(self):
self.go = False
self.completed = False
self.start_t = -1
self.stop_t = -1
self.idx = 0
self.gen()
class GrayCodeBinaryPatternSequence(PatternSequence):
bits = 0
showx = True
showy = False
frame_mult = 1
def __init__(self,bits,showx,showy,showinv,rv,gv,bv):
self.kind = 'gcb_seq'
self.bits = bits
self.showx = showx
self.showy = showy
self.showinv = showinv
self.r = rv
self.g = gv
self.b = bv
self.gen()
frame_mult = 2 if (showx and showy) else 1
frame_mult *= 2 if showinv else 1
def print_debug(self):
for idx in range(len(self.pat)):
pat = self.pat[idx]
print('%3d %s %3d %3d %3d : (%0.3f %0.3f %0.3f)' %(idx,pat.kind,pat.axis,pat.bit,pat.inv,pat.r,pat.g,pat.b))
def gen(self):
self.clear_list()
if self.showx:
for x in range(self.bits):
idx = self.bits-x-1
pat = GrayCodeBinaryPattern(0,0,idx)
pat.setcolor(self.r,self.g,self.b)
self.pat.append(pat)
if self.showinv:
patinv = GrayCodeBinaryPattern(1,0,idx)
patinv.setcolor(self.r,self.g,self.b)
self.pat.append(patinv)
if self.showy:
for y in range(self.bits):
idx = self.bits-y-1
pat = GrayCodeBinaryPattern(0,1,idx)
pat.setcolor(self.r,self.g,self.b)
self.pat.append(pat)
if self.showinv:
patinv = GrayCodeBinaryPattern(1,1,idx)
patinv.setcolor(self.r,self.g,self.b)
self.pat.append(patinv)
def generate_raw_yaml(self):
ret = ""
ret += "gcb_images:\n"
for idx in range(len(self.pat_path)):
ret += str(" - \"%s\"\n" % self.pat_path[idx])
ret += "gcb_param:\n"
ret += str(" - \"%s\"\n" % self.bits)
ret += str(" - \"%s\"\n" % self.showx)
ret += str(" - \"%s\"\n" % self.showy)
ret += str(" - \"%s\"\n" % self.showinv)
ret += str(" - \"%s\"\n" % self.r)
ret += str(" - \"%s\"\n" % self.g)
ret += str(" - \"%s\"\n" % self.b)
#ret += "gcb_patterns:\n"
#for pat in self.pat:
# ret += str(" - \"%s\"n" % pat.axis)
#for idx in range(len(self.pat)):
# ret += str(" - \"%s\"\n" % self.pat[idx].axis)
#if len(self.pat) != len(self.pat_path):
# ret += str("gcb_images_N:%d\n"%len(self.pat))
# ret += str("gcb_images_N_path:%d\n"%len(self.pat_path))
return ret
def write_list(self,outpath):
f = open('%s/list.yaml'%outpath,'w')
f.write( "%YAML:1.0\n" )
f.write( self.generate_raw_yaml() )
def setname(name):
self.name = name
@classmethod
def fromcolor(self,bits,showx,showy,showinv,r,g,b):
self.bits = bits
self.showx = showx
self.showy = showy
self.showinv = showinv
self.r = r
self.g = g
self.b = b
class SinusoidPatternSequence(PatternSequence):
frames = 3
showx = True
showy = False
def __init__(self,frames,showx,showy,showinv,r,g,b):
self.kind = 'sin_seq'
self.frames = frames
self.showx = showx
self.showy = showy
self.showinv = showinv
self.r = r
self.g = g
self.b = b
self.reset()
frame_mult = 2 if (showx and showy) else 1
frame_mult *= 2 if showinv else 1
def print_debug(self):
for idx in range(len(self.pat)):
pat = self.pat[idx]
print('%3d %s %3d %3d %3d : (%0.3f %0.3f %0.3f)' %(idx,pat.kind,pat.axis,pat.freq,pat.phase,pat.r,pat.g,pat.b))
def gen(self):
self.clear_list()
pat = 0
if self.showinv:
pat = SinusoidPattern(1-self.r,1-self.g,1-self.b)
else:
pat = SinusoidPattern(self.r,self.g,self.b)
self.pat.append(pat)
def generate_raw_yaml(self):
ret = "sin_images:\n"
for idx in range(len(self.pat_path)):
ret += str(" - \"%s\"\n" % self.pat_path[idx])
if len(self.pat) != len(self.pat_path):
ret += str("sin_images_N:%d\n"%len(self.pat))
ret += str("sin_images_N_path:%d\n"%len(self.pat_path))
return ret
def write_list(self,outpath):
f = open('%s/sin_list.yaml'%outpath,'w')
f.write( "%YAML:1.0\n" )
f.write( self.generate_raw_yaml() )
class MonochromePatternSequence(PatternSequence):
def addColor(self,inv,r,g,b):
pat = MonochromePattern(inv,r,g,b)
self.pat.append(pat)
def addColor(self,r,g,b):
pat = MonochromePattern(False,r,g,b)
self.pat.append(pat)
def __init__(self):
self.showinv = False
def __init__(self,inv,r,g,b):
self.kind = 'rgb_seq'
self.showinv = inv
self.r = r
self.g = g
self.b = b
self.reset()
def print_debug(self):
for idx in range(len(self.pat)):
pat = self.pat[idx]
print('%3d %s %3d : (%0.3f %0.3f %0.3f)' %(idx,pat.kind,pat.inv,pat.r,pat.g,pat.b))
def gen(self):
self.clear_list()
pat = 0
if self.showinv:
pat = MonochromePattern(False,1-self.r,1-self.g,1-self.b)
else:
pat = MonochromePattern(False,self.r,self.g,self.b)
self.pat.append(pat)
def generate_raw_yaml(self):
patN = len(self.pat)
ret = "rgb_images:\n"
for idx in range(len(self.pat_path)):
ret += str(" - \"%s\"\n" % self.pat_path[idx])
if patN != len(self.pat_path):
ret += str("rgb_images_N:%d\n" % patN)
ret += str("rgb_images_N_path:%d\n"%len(self.pat_path))
ret += "rgb_values: !!opencv-matrix\n"
ret += " rows: %d\n" % patN
ret += " cols: 1\n"
ret += " dt: \"3f\"\n"
ret += " data: [ "
for idx in range(patN):
ret += "%0.6f, %0.6f, %0.6f" % ( self.pat[idx].r, self.pat[idx].g, self.pat[idx].b )
if idx < patN-1:
ret += ",\n "
ret += " ]"
return ret
def write_list(self,outpath):
print self.generate_raw_yaml()
f = open('%s/rgb_list.yaml'%outpath,'w')
f.write( "%YAML:1.0\n" )
f.write( self.generate_raw_yaml() )
``` |
{
"source": "jhdulaney/oh-dataselfie-source",
"score": 2
} |
#### File: main/tests/tests_management.py
```python
from django.test import TestCase, RequestFactory
import vcr
from django.conf import settings
from django.core.management import call_command
from open_humans.models import OpenHumansMember
import requests_mock
class ManagementTestCase(TestCase):
"""
test that files are parsed correctly
"""
def setUp(self):
"""
Set up the app for following tests
"""
settings.DEBUG = True
call_command('init_proj_config')
self.factory = RequestFactory()
data = {"access_token": '<PASSWORD>',
"refresh_token": '<PASSWORD>',
"expires_in": 36000}
self.oh_member = OpenHumansMember.create(oh_id='1234',
data=data)
self.oh_member.save()
self.user = self.oh_member.user
self.user.save()
@vcr.use_cassette('main/tests/fixtures/import_test_file.yaml',
record_mode='none')
def test_management_import_user(self):
self.assertEqual(len(OpenHumansMember.objects.all()),
1)
call_command('import_users',
infile='main/tests/fixtures/test_import.csv',
delimiter=',')
old_oh_member = OpenHumansMember.objects.get(oh_id='1234')
self.assertEqual(old_oh_member.refresh_token,
'bar')
new_oh_member = OpenHumansMember.objects.get(oh_id='2345')
self.assertEqual(new_oh_member.refresh_token,
'new_refresh')
``` |
{
"source": "jhdulaney/oh-loggather",
"score": 2
} |
#### File: oh-loggather/loggather/tasks.py
```python
from datetime import datetime, timezone
import io
from django.conf import settings
from celery import shared_task
from celery.utils.log import get_task_logger
from ohapi.api import get_all_results, upload_stream
from openhumans.models import OpenHumansMember
logger = get_task_logger(__name__)
def serialize_accesslogs(api_endpoint, oh_member, access_token, start_date, end_date):
"""
Groups logs by project, then converts from dict to csv, and finally uploads the
resultant csv files to aws.
"""
accesslog_api_url = "{0}/data-management/{1}/?access_token={2}".format(
settings.OPENHUMANS_OH_BASE_URL, api_endpoint, oh_member.get_access_token()
)
if start_date:
accesslog_api_url = "{0}&start_date={1}".format(accesslog_api_url, start_date)
if end_date:
accesslog_api_url = "{0}&end_date={1}".format(accesslog_api_url, end_date)
if api_endpoint == "newdatafileaccesslog":
access_point = "open-humans"
headers = [
"date",
"ip_address",
"user",
"datafile_id",
"datafile_source",
"datafile_created",
"datafile_user_id",
"datafile_basename",
"datafile_download_url",
"key_id",
"key_key",
"key_created",
"key_project_id",
"key_datafile_id",
"key_access_token",
"key_key_creation_ip_address",
]
else:
access_point = "aws"
headers = [
"time",
"remote_ip",
"request_id",
"operation",
"bucket_key",
"request_uri",
"status",
"bytes_sent",
"object_size",
"total_time",
"turn_around_time",
"referrer",
"user_agent",
"cipher_suite",
"host_header",
"datafile_id",
"datafile_source",
"datafile_created",
"datafile_user_id",
"datafile_basename",
"datafile_download_url",
]
timestamp = str(datetime.now(timezone.utc).isoformat())
accesslogs = get_all_results(accesslog_api_url)
# Group log events by project and serialize to lists
log_events = {}
for access_log in accesslogs:
try:
if access_log["datafile"]:
project = access_log["datafile"]["source"]
else:
continue
except KeyError:
# Sometimes, a log file gets deleted between an access event and log retrieval
# In these instances, skip the log
continue
row = []
for header in headers:
if header in access_log:
field = access_log[header]
if access_log[header] is None:
field = "-"
else:
field = str(access_log[header])
elif "datafile_" in header:
key = header[9:]
if key in access_log["datafile"]:
if access_log["datafile"][key] is None:
field = "-"
else:
field = str(access_log["datafile"][key])
elif "key_" in header:
key = header[4:]
if key in access_log["key"]:
if access_log["key"][key] is None:
field = "-"
else:
field = str(access_log["key"][key])
else:
field = "-"
row.append(field.strip(","))
if project in log_events.keys():
log_events[project].append(row)
else:
log_events[project] = [row]
# Combine lists for each project as csv files and upload
for project, items in log_events.items():
filename = "datalogs_{0}_{1}_{2}_{3}_{4}.csv".format(
access_point, project, start_date, end_date, timestamp
)
csv = ""
for row in items:
if csv:
csv = "{0}\n{1}".format(csv, ",".join(row))
else:
csv = ",".join(row)
csv = "{0}\n{1}".format(",".join(headers), csv) # Prepend the headers
f = io.StringIO(csv)
logger.info("Writing {0}".format(filename))
upload_stream(
f,
filename,
metadata={
"description": "Open Humans access logs: AWS side",
"tags": ["logs", "access logs", "AWS access logs"],
},
access_token=access_token,
)
@shared_task
def get_logs(oh_member_pk, start_date=None, end_date=None):
"""
Celery task to retrieve the specified set of logs and save them as files
"""
oh_member = OpenHumansMember.objects.get(pk=oh_member_pk)
access_token = oh_member.get_access_token()
serialize_accesslogs(
"newdatafileaccesslog", oh_member, access_token, start_date, end_date
)
serialize_accesslogs(
"awsdatafileaccesslog", oh_member, access_token, start_date, end_date
)
return
```
#### File: oh-loggather/loggather/views.py
```python
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.auth import logout
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.urls import reverse_lazy
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from ohapi.api import get_all_results
from openhumans.models import OpenHumansMember
from .tasks import get_logs
from .forms import RetrieveLogsForm
OLDEST_LOGS_DATE = (datetime.now() - timedelta(days=settings.LOG_RETENTION_DAYS)).date()
TODAY = datetime.now().date()
class IndexView(TemplateView):
template_name = "main/index.html"
def dispatch(self, request, *args, **kwargs):
"""
Override dispatch to provide redirect to dashboard if user is logged in.
"""
if request.user.is_authenticated:
return redirect("dashboard")
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
"""
Add auth url to context
"""
context = super().get_context_data(*args, **kwargs)
auth_url = OpenHumansMember.get_auth_url()
context.update({"auth_url": auth_url})
return context
class DashboardView(LoginRequiredMixin, FormView):
form_class = RetrieveLogsForm
success_url = reverse_lazy("dashboard")
template_name = "main/dashboard.html"
def get_context_data(self, *args, **kwargs):
"""
Get available files
"""
context = super().get_context_data(*args, **kwargs)
context.update(
{
"data_files": self.request.user.openhumansmember.list_files(),
"log_retention_days": settings.LOG_RETENTION_DAYS,
"oldest_date": OLDEST_LOGS_DATE,
"newest_date": TODAY,
}
)
return context
def form_valid(self, form):
"""
On clicking the 'retrieve logs' button, grabs the logs from the selected date range.
"""
start_date = self.request.POST.get("start_date")
end_date = self.request.POST.get("end_date")
get_logs.delay(
self.request.user.openhumansmember.pk,
start_date=start_date,
end_date=end_date,
)
messages.success(self.request, "Log retrieval initiated")
return HttpResponseRedirect(self.get_success_url())
class LogoutUserView(LoginRequiredMixin, TemplateView):
template_name = "main/index.html"
def post(self, request, **kwargs):
"""
Logout user.
"""
logout(request)
redirect_url = settings.LOGOUT_REDIRECT_URL
return redirect(redirect_url)
class AboutView(TemplateView):
template_name = "main/about.html"
``` |
{
"source": "jhdulaney/open-humans",
"score": 3
} |
#### File: open-humans/data_import/forms.py
```python
from django import forms
from .models import DataType
class DataTypeForm(forms.ModelForm):
"""
A form for creating and editing DataTypes.
"""
class Meta: # noqa: D101
model = DataType
fields = ["name", "parent", "description"]
def __init__(self, *args, **kwargs):
self.editor = kwargs.pop("editor")
return super().__init__(*args, **kwargs)
def clean_parent(self):
"""
Verify that the parent is not the object itself nor a descendent.
"""
parent = self.cleaned_data.get("parent")
if not parent:
return parent
if self.instance.id == parent.id:
raise forms.ValidationError(
"A DataType cannot be assigned to be its own parent."
)
elif self.instance in parent.all_parents:
raise forms.ValidationError(
"{0} is not an allowed parent, as it is a descendent of {1}.".format(
parent.name, self.instance.name
)
)
return parent
def clean_name(self):
"""
Verify that the name is case insensitive unique.
"""
name = self.cleaned_data.get("name")
try:
dt = DataType.objects.get(name__iexact=name)
except DataType.DoesNotExist:
dt = self.instance
if not dt == self.instance:
raise forms.ValidationError(
"Please provide a unique name for this datatype"
)
return name
def clean(self, *args, **kwargs):
if self.instance:
if not self.instance.editable:
raise forms.ValidationError(
"Not editable: in use by one or more approved projects."
)
self.instance.editor = self.editor
return super().clean(*args, **kwargs)
```
#### File: open-humans/data_import/models.py
```python
from collections import OrderedDict
import datetime
import logging
import os
import uuid
import arrow
from botocore.exceptions import ClientError
from django.conf import settings
from django.contrib.postgres.fields import JSONField
from django.core.validators import RegexValidator
from django.urls import reverse
from django.db import models
from django.utils import timezone
from ipware.ip import get_ip
from common import fields
from common.utils import full_url
from open_humans.models import Member
from .utils import get_upload_path
logger = logging.getLogger(__name__)
charvalidator = RegexValidator(
r"^[\w\-\s]+$",
"Only alphanumeric characters, space, dash, and underscore are allowed.",
)
def delete_file(instance, **kwargs): # pylint: disable=unused-argument
"""
Delete the DataFile's file from S3 when the model itself is deleted.
"""
instance.file.delete(save=False)
class DataFileKey(models.Model):
"""
Temporary key for accessing private files.
"""
created = models.DateTimeField(auto_now_add=True)
key = models.CharField(max_length=36, blank=False, unique=True, default=uuid.uuid4)
datafile_id = models.IntegerField()
ip_address = models.GenericIPAddressField(null=True)
access_token = models.CharField(max_length=64, null=True)
project_id = models.IntegerField(null=True)
# ^^ Not a foreign key due to circular deps, also when we serialize this
# model to json for storing in the log, we'd lose all the fancy, anyway
@property
def expired(self):
"""
Returns True if key is expired, False if not expired
Expiration set at one hour
"""
expiration = self.created + datetime.timedelta(hours=1)
if expiration > datetime.datetime.now(tz=expiration.tzinfo):
return False
return True
class DataFileManager(models.Manager):
"""
We use a manager so that subclasses of DataFile also get their
pre_delete signal connected correctly.
"""
def contribute_to_class(self, model, name):
super(DataFileManager, self).contribute_to_class(model, name)
models.signals.pre_delete.connect(delete_file, model)
class DataFile(models.Model):
"""
Represents a data file from a study or activity.
"""
objects = DataFileManager()
file = models.FileField(upload_to=get_upload_path, max_length=1024, unique=True)
metadata = JSONField(default=dict)
created = models.DateTimeField(auto_now_add=True)
source = models.CharField(max_length=32)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name="datafiles", on_delete=models.CASCADE
)
def __str__(self):
return str("{0}:{1}:{2}").format(self.user, self.source, self.file)
def download_url(self, request):
key = self.generate_key(request)
url = full_url(reverse("data-management:datafile-download", args=(self.id,)))
return "{0}?key={1}".format(url, key)
@property
def file_url_as_attachment(self):
"""
Get an S3 pre-signed URL specifying content disposation as attachment.
"""
return self.file.storage.url(self.file.name)
def generate_key(self, request):
"""
Generate new link expiration key
"""
new_key = DataFileKey(datafile_id=self.id)
if request:
# Log the entity that is requesting the key be generated
new_key.ip_address = get_ip(request)
try:
new_key.access_token = request.query_params.get("access_token", None)
except (AttributeError, KeyError):
new_key.access_token = None
try:
new_key.project_id = request.auth.id
except AttributeError:
# We do not have an accessing project
new_key.project_id = None
new_key.save()
return new_key.key
@property
def is_public(self):
return self.parent_project_data_file.is_public
def has_access(self, user=None):
return self.is_public or self.user == user
@property
def basename(self):
return os.path.basename(self.file.name)
@property
def description(self):
"""
Filled in by the data-processing server.
"""
return self.metadata.get("description", "")
@property
def tags(self):
"""
Filled in by the data-processing server.
"""
return self.metadata.get("tags", [])
@property
def size(self):
"""
Return file size, or empty string if the file key can't be loaded.
Keys should always load, but this is a more graceful failure mode.
"""
try:
return self.file.size
except (AttributeError, ClientError):
return ""
class NewDataFileAccessLog(models.Model):
"""
Represents a download of a datafile.
"""
date = models.DateTimeField(auto_now_add=True)
ip_address = models.GenericIPAddressField(null=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True
)
data_file = models.ForeignKey(
DataFile, related_name="access_logs", on_delete=models.SET_NULL, null=True
)
serialized_data_file = JSONField(default=dict, null=True)
data_file_key = JSONField(default=dict, null=True)
aws_url = models.CharField(max_length=400, null=True)
def __str__(self):
return str("{0} {1} {2} {3}").format(
self.date, self.ip_address, self.user, self.aws_url
)
class AWSDataFileAccessLog(models.Model):
"""
Logs every time a file is accessed on the Amazon side.
"""
created = models.DateTimeField(auto_now_add=True)
serialized_data_file = JSONField(default=dict, null=True)
oh_data_file_access_log = models.ManyToManyField(NewDataFileAccessLog)
# The following fields are populated from the AWS data
bucket_owner = models.CharField(max_length=100)
bucket = models.CharField(max_length=64)
time = models.DateTimeField()
remote_ip = models.GenericIPAddressField(null=True)
requester = models.CharField(max_length=64, null=True)
request_id = models.CharField(max_length=32, null=True)
operation = models.CharField(max_length=32, null=True)
bucket_key = models.CharField(max_length=500, null=True)
request_uri = models.CharField(max_length=500, null=True)
status = models.IntegerField(null=True)
error_code = models.CharField(max_length=64, null=True)
bytes_sent = models.BigIntegerField(null=True)
object_size = models.BigIntegerField(null=True)
total_time = models.IntegerField(null=True)
turn_around_time = models.IntegerField(null=True)
referrer = models.CharField(max_length=500, null=True)
user_agent = models.CharField(max_length=254, null=True)
version_id = models.CharField(max_length=128, null=True)
host_id = models.CharField(max_length=128, null=True)
signature_version = models.CharField(max_length=32, null=True)
cipher_suite = models.CharField(max_length=128, null=True)
auth_type = models.CharField(max_length=32, null=True)
host_header = models.CharField(max_length=64, null=True)
@property
def datafile(self):
"""
Helper that returns a queryset with the DataFile if it exists still, empty if not.
"""
datafile_id = self.serialized_data_file.get("id", None)
df = DataFile.objects.filter(id=datafile_id)
if df.count() == 1:
return df.get()
return None
class TestUserData(models.Model):
"""
Used for unit tests in public_data.tests; there's not currently a way to
make test-specific model definitions in Django (a bug open since 2009,
#7835)
"""
user = fields.AutoOneToOneField(
settings.AUTH_USER_MODEL,
related_name="test_user_data",
on_delete=models.CASCADE,
)
class DataType(models.Model):
"""
Describes the types of data a DataFile can contain.
"""
name = models.CharField(
max_length=128, blank=False, unique=True, validators=[charvalidator]
)
parent = models.ForeignKey(
"self", blank=True, null=True, related_name="children", on_delete=models.PROTECT
)
last_editor = models.ForeignKey(Member, on_delete=models.SET_NULL, null=True)
description = models.CharField(max_length=512, blank=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
history = JSONField(default=dict, editable=False)
def __str__(self):
parents = self.all_parents
if parents:
parents.reverse()
parents = [parent.name for parent in parents if parent]
parents = ":".join(parents)
return str("{0}:{1}").format(parents, self.name)
return self.name
def save(self, *args, **kwargs):
"""
Override save to record edit history and require an associated "editor".
"editor" is an instance-specific parameter; this avoids accepting an update
that is merely retaining the existing value for the "last_editor" field.
"""
if not self.editor:
raise ValueError("'self.editor' must be set when saving DataType.")
else:
self.last_editor = self.editor
self.history[arrow.get(timezone.now()).isoformat()] = {
"name": self.name,
"parent": self.parent.id if self.parent else None,
"description": self.description,
"editor": self.last_editor.id,
}
return super().save(*args, **kwargs)
@property
def history_sorted(self):
history_sorted = OrderedDict()
items_sorted = sorted(
self.history.items(), key=lambda item: arrow.get(item[0]), reverse=True
)
for item in items_sorted:
parent = (
DataType.objects.get(id=item[1]["parent"])
if item[1]["parent"]
else None
)
try:
editor = Member.objects.get(id=item[1]["editor"])
except Member.DoesNotExist:
editor = None
history_sorted[arrow.get(item[0]).datetime] = {
"name": item[1]["name"],
"parent": parent,
"description": item[1]["description"],
"editor": editor,
}
return history_sorted
@property
def editable(self):
"""
Return True if no approved projects are registered as using this.
"""
# Always true for a new instance that hasn't yet been saved:
if not self.id:
return True
approved_registered = self.datarequestproject_set.filter(approved=True)
if approved_registered:
return False
else:
return True
@property
def all_parents(self):
"""
Return list of parents, from immediate to most ancestral.
"""
parent = self.parent
parents = []
if parent:
while True:
if not parent:
break
parents.append(parent)
parent = parent.parent
return parents
@classmethod
def all_as_tree(cls):
"""
Dict tree of all datatypes. Key = parent & value = array of child dicts.
This method is intended to make all ancestry relationships available without
having to hit the database more than necessary.
"""
def _children(parent, all_datatypes):
children = {}
for dt in [dt for dt in all_datatypes if dt.parent == parent]:
children[dt] = _children(dt, all_datatypes)
return children
all_datatypes = list(DataType.objects.all())
roots = DataType.objects.filter(parent=None)
tree = {dt: _children(dt, all_datatypes) for dt in roots}
return tree
@classmethod
def sorted_by_ancestors(cls, queryset=None):
"""
Sort DataTypes by ancestors array of dicts containing 'datatype' and 'depth'.
"""
def _flatten(node, depth=0):
flattened = []
for child in sorted(node.keys(), key=lambda obj: obj.name):
flattened.append({"datatype": child, "depth": depth})
flattened = flattened + _flatten(node[child], depth=depth + 1)
return flattened
datatypes_tree = cls.all_as_tree()
return _flatten(datatypes_tree)
```
#### File: open-humans/open_humans/models.py
```python
import random
import re
from collections import OrderedDict
import arrow
from allauth.account.models import EmailAddress as AccountEmailAddress
from bs4 import BeautifulSoup
from django.apps import apps
from django.conf import settings
from django.contrib.auth.models import AbstractUser, UserManager
from django.contrib.auth.validators import ASCIIUsernameValidator
from django.contrib.postgres.fields import JSONField
from django.db import models
from django.db.models import Q
from django.utils.deconstruct import deconstructible
from django.utils.translation import gettext_lazy as _
import requests
from common.utils import LEGACY_APPS
from .storage import PublicStorage
from .testing import has_migration
def get_member_profile_image_upload_path(instance, filename):
"""
Construct the upload path for a given member and filename.
"""
return str("member/{0}/profile-images/{1}").format(instance.user.id, filename)
def get_grant_project_image_upload_path(instance, filename):
"""
Construct the upload path for an image for a ProjectGrant object.
"""
return str("grant-projects/{0}/{1}").format(instance.name, filename)
def random_member_id():
"""
Return a zero-padded string from 00000000 to 99999999 that's not in use by
any Member.
"""
def random_id():
return str("{0:08d}").format(random.randint(0, 99_999_999))
member_id = random_id()
while Member.objects.filter(member_id=member_id).count() > 0:
member_id = random_id()
return member_id
class UserEvent(models.Model):
"""
Holds logs of user events.
"""
user = models.ForeignKey("User", on_delete=models.CASCADE)
event_type = models.CharField(max_length=32)
timestamp = models.DateTimeField(auto_now_add=True)
data = JSONField(default=dict)
def __str__(self):
return str("{0}:{1}:{2}").format(
self.timestamp, self.user, repr(self.data)[0:50]
)
@deconstructible
class OpenHumansUsernameValidator(ASCIIUsernameValidator):
regex = r"^[A-Za-z_0-9]+$"
message = _(
"Enter a valid username. This value may contain only English letters, "
"numbers, and _ characters."
)
ohusernamevalidators = [OpenHumansUsernameValidator()]
class OpenHumansUserManager(UserManager):
"""
Allow user lookup by case-insensitive username or email address.
"""
def get_queryset(self):
"""
Alter the queryset to always get the member and the member's
public_data_participant; this reduces the number of queries for most
views.
"""
# need to check that the Member and PublicDataParticipant model exist;
# we do this by ensuring that the migration has ran (this is only
# important when tests are being run)
# TODO: check if this is still needed after the squash that happened
if not has_migration("open_humans", "0006_userevent_event_type"):
return super(OpenHumansUserManager, self).get_queryset()
return (
super(OpenHumansUserManager, self)
.get_queryset()
.select_related("member")
.select_related("member__public_data_participant")
)
def get_by_natural_key(self, username):
return self.get(Q(username__iexact=username) | Q(email__iexact=username))
class User(AbstractUser):
"""
The Django base user with case-insensitive username and email lookup.
"""
objects = OpenHumansUserManager()
def log(self, event_type, data):
"""
Log an event to this user.
"""
user_event = UserEvent(user=self, event_type=event_type, data=data)
user_event.save()
class Meta: # noqa: D101
db_table = "auth_user"
class Member(models.Model):
"""
Represents an Open Humans member.
"""
objects = models.Manager()
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
name = models.CharField(max_length=30)
profile_image = models.ImageField(
blank=True,
max_length=1024,
# Stored on S3
storage=PublicStorage(),
upload_to=get_member_profile_image_upload_path,
)
about_me = models.TextField(blank=True)
# When the model is saved and this field has changed we subscribe or
# unsubscribe the user from the Mailchimp list accordingly
newsletter = models.BooleanField(
default=True, verbose_name="Receive Open Humans news and updates"
)
allow_user_messages = models.BooleanField(
default=False, verbose_name="Allow members to contact me"
)
member_id = models.CharField(max_length=8, unique=True, default=random_member_id)
seen_pgp_interstitial = models.BooleanField(default=False)
password_reset_redirect = models.CharField(
max_length=254, default="", blank=True, null=True
)
def __str__(self):
return str(self.user)
@property
def primary_email(self):
"""
Get the EmailAddress from the django-accounts application, used to
check email validation.
"""
return AccountEmailAddress.objects.get_primary(self.user)
@property
def connections(self):
"""
Return a list of dicts containing activity and study connection
information. Connections represent data import relationships
(i.e., Open Humans is receiving data from this source).
"""
connections = {}
prefix_to_type = {"studies": "study", "activities": "activity"}
app_configs = apps.get_app_configs()
for app_config in app_configs:
if "." not in app_config.name:
continue
prefix = app_config.name.split(".")[0] # 'studies', 'activity'
connection_type = prefix_to_type.get(prefix) # 'study', 'activity'
if not connection_type:
continue
# TODO: Remove this when completing app removal.
if app_config.label in LEGACY_APPS:
continue
# all of the is_connected methods are written in a way that they
# work against the cached QuerySet of the EnrichedManager
connected = getattr(self.user, app_config.label).is_connected
# If connected, add to the dict.
if connected:
connections[app_config.label] = {
"type": connection_type,
"verbose_name": app_config.verbose_name,
"label": app_config.label,
"name": app_config.name,
"disconnectable": app_config.disconnectable,
}
return OrderedDict(
sorted(connections.items(), key=lambda x: x[1]["verbose_name"])
)
class EmailMetadata(models.Model):
"""
Metadata about email correspondence sent from a user's profile page.
"""
sender = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name="sender", on_delete=models.CASCADE
)
receiver = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name="receiver", on_delete=models.CASCADE
)
timestamp = models.DateTimeField(auto_now_add=True)
class BlogPost(models.Model):
"""
Store data about blogposts, to be displayed on the site.
"""
rss_id = models.CharField(max_length=120, unique=True)
title = models.CharField(max_length=120, blank=True)
summary_long = models.TextField(blank=True)
summary_short = models.TextField(blank=True)
image_url = models.CharField(max_length=2083, blank=True)
published = models.DateTimeField()
@classmethod
def create(cls, rss_feed_entry):
post = cls(rss_id=rss_feed_entry["id"])
post.summary_long = rss_feed_entry["summary"]
req = requests.get(rss_feed_entry["id"])
soup = BeautifulSoup(req.text, features="html.parser")
post.title = soup.find(attrs={"property": "og:title"})["content"][0:120]
post.summary_short = soup.find(attrs={"property": "og:description"})["content"]
image_url = soup.find(attrs={"property": "og:image"})["content"]
if "gravatar" not in image_url:
post.image_url = image_url
post.published = arrow.get(
soup.find(attrs={"property": "article:published_time"})["content"]
).datetime
post.save()
return post
@property
def published_day(self):
return arrow.get(self.published).format("ddd, MMM D YYYY")
class GrantProject(models.Model):
"""
Store data about an ongoing grant project.
"""
name = models.CharField(max_length=255, unique=True)
grant_date = models.DateField(null=True)
status = models.CharField(max_length=120)
github = models.TextField(blank=True)
grantee_name = models.CharField(max_length=255)
photo = models.ImageField(
blank=True,
max_length=1024,
# Stored on S3
storage=PublicStorage(),
upload_to=get_grant_project_image_upload_path,
)
blog_url = models.TextField()
project_desc = models.TextField()
def __str__(self):
return str(self.name)
```
#### File: open-humans/open_humans/tests.py
```python
from io import StringIO
import unittest
from allauth.account.models import EmailAddress, EmailConfirmation
from django.conf import settings
from django.contrib import auth
from django.core import mail, management
from django.db import IntegrityError
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from django.utils import timezone
from rest_framework.test import APITestCase
from mock import patch
from common.testing import BrowserTestCase, get_or_create_user, SmokeTestCase
from private_sharing.models import DataRequestProject
from .models import Member
UserModel = auth.get_user_model()
class SmokeTests(SmokeTestCase):
"""
A simple GET test for all of the simple URLs in the site.
"""
anonymous_urls = ["/account/login/", "/account/password/reset/", "/account/signup/"]
authenticated_or_anonymous_urls = [
"/",
"/about/",
"/activity/favorite-trance-tracks/",
"/activity/groovy-music/",
"/api/public-data/?username=beau",
"/api/public-data/?created_start=2/14/2016&created_end=2/14/2016",
"/api/public-data/sources-by-member/",
"/api/public-data/members-by-source/",
"/api/public-data/datatypes/",
"/api/public-data/projects/",
"/beau/",
"/community-guidelines/",
"/contact-us/",
"/copyright/",
"/data-use/",
"/member/beau/",
"/members/",
"/members/page/1/",
"/members/?sort=username",
"/members/page/1/?sort=username",
"/public-data/",
"/public-data-api/",
"/news/",
"/create/",
"/terms/",
"/gdpr/",
]
redirect_urls = [
"/account/delete/",
"/member/beau/email/",
"/member/me/",
"/member/me/account-settings/",
"/member/me/change-email/",
"/member/me/change-name/",
"/member/me/connections/",
# '/member/me/connections/delete/1/',
"/member/me/edit/",
"/member/me/joined/",
"/member/me/data/",
"/member/me/research-data/delete/pgp/",
"/member/me/research-data/delete/american_gut/",
"/member/me/research-data/delete/runkeeper/",
"/member/me/send-confirmation-email/",
"/public-data/activate-1-overview/",
"/public-data/activate-2-information/",
# require a POST
# '/public-data/activate-3-quiz/',
# '/public-data/activate-4-signature/',
# 301 redirect
# '/public-data/toggle-sharing/',
"/public-data/deactivate/",
]
authenticated_urls = redirect_urls + [
"/account/password/",
(
"/oauth2/authorize/?origin=external&response_type=code"
"&scope=go-viral%20read%20write&client_id=example-id-15"
),
]
def test_custom_404(self):
self.assert_status_code("/does-not-exist/", status_code=404)
def test_custom_500(self):
with self.assertRaises(Exception):
self.assert_status_code("/raise-exception/", status_code=500)
@override_settings(SSLIFY_DISABLE=True)
class OpenHumansUserTests(TestCase):
"""
Tests for our custom User class.
"""
fixtures = ["open_humans/fixtures/test-data.json"]
def setUp(self): # noqa
get_or_create_user("user1")
def test_lookup_by_username(self):
user1 = auth.authenticate(username="user1", password="<PASSWORD>")
self.assertEqual(user1.username, "user1")
def test_lookup_by_email(self):
user1 = auth.authenticate(username="<EMAIL>", password="<PASSWORD>")
self.assertEqual(user1.username, "user1")
def test_redirect_on_login(self):
"""
Redirect to previous page on login.
"""
first_redirect = "/"
first_response = self.client.post(
reverse("account_login"),
{"next": first_redirect, "login": "chickens", "password": "<PASSWORD>"},
)
self.assertEqual(first_response.status_code, 302)
self.assertEqual(first_response.url, first_redirect)
second_redirect = "/api/public-data/?source=direct-sharing-1"
second_response = self.client.post(
reverse("account_login"),
{"next": second_redirect, "login": "chickens", "password": "<PASSWORD>"},
)
self.assertEqual(second_response.status_code, 302)
self.assertEqual(second_response.url, second_redirect)
def test_password_reset(self):
"""
Test that password reset works and that we we redirect to the proper
place when a password reset is made.
"""
redirect = "/"
response_request_reset = self.client.post(
reverse("account_reset_password"),
{"next_t": redirect, "email": "<EMAIL>"},
)
self.assertEqual(response_request_reset.status_code, 302)
# We should now have mail in the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "[Open Humans] Password Reset E-mail")
reset_url = [
item
for item in mail.outbox[0].body.split("\n")
if "account/password/reset/key" in item
][0]
key = reset_url.split("/")[7]
# Go ahead and reset the mailbox
mail.outbox = []
do_reset_response = self.client.get(reset_url)
self.assertEqual(do_reset_response.status_code, 200)
self.assertContains(do_reset_response, "Set your new password")
do_reset_post_response = self.client.post(
reset_url, {"password": "<PASSWORD>", "password_confirm": "<PASSWORD>"}
)
self.assertEqual(do_reset_post_response.status_code, 302)
self.assertEqual(do_reset_post_response.url, redirect)
def test_lowercase_unique(self):
# Create a lowercase user2
UserModel.objects.create_user("user2", "<EMAIL>", "user2")
# Creating an uppercase USER2 should fail
self.assertRaises(
IntegrityError,
UserModel.objects.create_user,
"USER2",
"<EMAIL>",
"user2",
)
@unittest.skip("The way the django-oauth model handles the primary key has changed")
class CommandTests(TestCase):
"""
Tests for our management commands.
"""
fixtures = ["open_humans/fixtures/test-data.json"]
def setUp(self):
self.output = StringIO()
def test_bulk_email(self):
try:
import sys
out, sys.stdout = sys.stdout, StringIO()
management.call_command("bulk_email", "-h", stdout=self.output)
sys.stdout = out
except SystemExit as e:
if e.code != 0:
raise e
def test_setup_api(self):
management.call_command("setup_api", stdout=self.output)
def test_update_badges(self):
management.call_command("update_badges", stdout=self.output)
def test_user_connections_json(self):
management.call_command(
"user_connections_json", "/dev/null", stdout=self.output
)
def test_stats(self):
management.call_command("stats", "--days=365", stdout=self.output)
class WsgiTests(TestCase):
"""
Tests for our WSGI application.
"""
@staticmethod
def test_import():
from .wsgi import application # noqa, pylint: disable=unused-variable
class WelcomeEmailTests(TestCase):
"""
Tests for our welcome email.
"""
@patch("open_humans.signals.send_mail")
def test_send_welcome_email(self, mock):
user = get_or_create_user("email_test_user")
member = Member(user=user)
member.save()
email = user.emailaddress_set.all()[0]
email.verified = False
email.save()
confirmation = EmailConfirmation.create(email)
confirmation.sent = timezone.now()
confirmation.save()
# confirm the email; this sends the email_confirmed signals
confirmed_email = confirmation.confirm(request=mock)
self.assertTrue(confirmed_email is not None)
self.assertTrue(mock.called)
self.assertEqual(mock.call_count, 1)
self.assertEqual(mock.call_args[0][-1][0], "<EMAIL>")
class OpenHumansBrowserTests(BrowserTestCase):
"""
Browser tests of general Open Humans functionality.
"""
@unittest.skipIf(settings.NOBROWSER, "skipping browser tests")
def test_create_user(self):
driver = self.driver
driver.get(self.live_server_url)
driver.find_element_by_class_name("signup-link").click()
username = self.wait_for_element_id("signup-username")
username.clear()
username.send_keys("<PASSWORD>")
name = driver.find_element_by_id("signup-name")
name.clear()
name.send_keys("Test <PASSWORD>")
email = driver.find_element_by_id("email-address")
email.clear()
email.send_keys("<EMAIL>")
password = driver.find_element_by_id("signup-password")
password.clear()
password.send_keys("<PASSWORD>")
password_confirm = driver.find_element_by_id("signup-password-confirm")
password_confirm.clear()
password_confirm.send_keys("<PASSWORD>")
driver.find_element_by_name("terms").click()
driver.find_element_by_id("create-account").click()
self.assertEqual(
"Please verify your email address.",
driver.find_element_by_css_selector(
".call-to-action-3 > .container > h3"
).text,
)
@unittest.skipIf(settings.NOBROWSER, "skipping browser tests")
def test_remove_connection(self):
driver = self.driver
self.login()
driver.get(self.live_server_url + "/member/me/connections/")
driver.find_element_by_xpath(
"(//a[contains(text(),'Remove connection')])[1]"
).click()
driver.find_element_by_name("remove_datafiles").click()
driver.find_element_by_css_selector("label").click()
driver.find_element_by_css_selector("input.btn.btn-danger").click()
@override_settings(SSLIFY_DISABLE=True)
class HidePublicMembershipTestCase(APITestCase):
"""
Tests whether or not membership in public data activities is properly
hidden when requested.
"""
fixtures = ["open_humans/fixtures/test-data.json"]
def test_public_api(self):
"""
Tests the public API endpoints.
"""
user = UserModel.objects.get(username="bacon")
project = DataRequestProject.objects.get(id=1)
project_member = project.active_user(user)
project_member.set_visibility(visible_status=False)
results = self.client.get("/api/public-data/members-by-source/").data["results"]
result = {}
for item in results:
if item["source"] == "direct-sharing-1":
result = item
assert result["usernames"] == []
project_member.set_visibility(visible_status=True)
results = self.client.get("/api/public-data/members-by-source/").data["results"]
result = {}
for item in results:
if item["source"] == "direct-sharing-1":
result = item
assert result["usernames"] == ["bacon"]
``` |
{
"source": "JHE4N/Jogopython",
"score": 3
} |
#### File: JHE4N/Jogopython/snake-game.py
```python
import pygame
from random import randint
branco = (255,255,255)
preto = (0, 0, 0)
vermelho = (255, 0, 0)
verde = (0, 255, 0)
azul = (0, 0, 255)
try:
pygame.init()
except:
print('Deu algo de errado.')
largura = 640
altura = 480
tamanho = 10
relogio = pygame.time.Clock()
pygame.display.set_caption('Snake') #Nome do jogo
fundo = pygame.display.set_mode((largura, altura)) #definindo largura e altura da tela
font = pygame.font.SysFont(None, 25)
#Texto
def texto(msg, cor):
texto_um = font.render(msg, True, cor)
fundo.blit(texto_um, [(largura/2) -40, altura/2])
def cobra(cobraxy):
for xy in cobraxy:
pygame.draw.rect(fundo, branco, [xy[0], xy[1], tamanho, tamanho]) #desenhar o quadrado (serpente)
def maca(pos_x_maca, pos_y_maca):
pygame.draw.rect(fundo, vermelho, [pos_x_maca, pos_y_maca, tamanho, tamanho]) #desenhar o quadrado (serpente)
def jogo():
#Cobra
pos_x_cobra = randint(0, (largura - tamanho) /10) *10 #posição x
pos_y_cobra = randint(0, (altura - tamanho) /10) *10 #posição y
#Maça
pos_x_maca = randint(0, (largura - tamanho) /10) *10 #posição x
pos_y_maca = randint(0, (altura - tamanho) /10) *10 #posição y
velocidade_x = 0
velocidade_y = 0
cobraxy = []
comprimento_cobra = 1
sair = True
fim_de_jogo = False
while sair:
while fim_de_jogo:
fundo.fill(branco)
texto('Game Over. Pressione r para continuar', vermelho)
pygame.display.update()
for event in pygame.event.get(): #Enquanto acontecer eventos, entra no loop
if event.type == pygame.QUIT: #Se apertar no [X] que fecha o jogo
sair = False
#Eventos para quando andar para qualquer direção
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT and velocidade_x != tamanho: #Se andar pra esquerda
velocidade_y = 0
velocidade_x = -tamanho
if event.key == pygame.K_RIGHT and velocidade_x != tamanho: #Se andar pra direita
velocidade_y = 0
velocidade_x = tamanho
if event.key == pygame.K_UP and velocidade_y != tamanho: #Se andar pra cima
velocidade_x = 0
velocidade_y = -tamanho
if event.key == pygame.K_DOWN and velocidade_y != tamanho: #Se andar pra baixo
velocidade_x = 0
velocidade_y = tamanho
if event.key == pygame.K_r:
jogo()
print(event) #printar no prompt o que ta acontecendo
fundo.fill(preto) #cor de fundo
pos_x_cobra += velocidade_x #andar no eixo x
pos_y_cobra += velocidade_y #andar no eixo y
cobra_inicio = []
cobra_inicio.append(pos_x_cobra)
cobra_inicio.append(pos_y_cobra)
cobraxy.append(cobra_inicio)
if len(cobraxy) > comprimento_cobra:
del cobraxy[0]
cobra(cobraxy)
maca(pos_x_maca, pos_y_maca)
pygame.display.update() #recarregar a janela
relogio.tick(15)
if pos_x_cobra == pos_x_maca and pos_y_cobra == pos_y_maca: #Se a serpente comer a maça
print("pegou a maca")
comprimento_cobra+=10
#Maça
pos_x_maca = randint(0, (largura - tamanho) /10) *10 #posição x
pos_y_maca = randint(0, (altura - tamanho) /10) *10 #posição y
#Eventos para caso a serpente chegar na borda da tela
if pos_x_cobra >= largura:
print('game over')
fim_de_jogo = True
if pos_x_cobra < 1:
print('game over')
fim_de_jogo = True
if pos_y_cobra >= altura:
print('game over')
fim_de_jogo = True
if pos_y_cobra < 1:
print('game over')
fim_de_jogo = True
#Caso a cobra se morder
if any(Bloco == cobra_inicio for Bloco in cobraxy[:-1]):
print('game over')
fim_de_jogo = True
#pygame.quit()
#quit()
jogo()
``` |
{
"source": "jheaff1/rules_foreign_cc",
"score": 2
} |
#### File: third_party/gperftools/gperftools_repositories.bzl
```python
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
def gperftools_repositories():
maybe(
http_archive,
name = "gperftools",
build_file = Label("//gperftools:BUILD.gperftools.bazel"),
sha256 = "1ee8c8699a0eff6b6a203e59b43330536b22bbcbe6448f54c7091e5efb0763c9",
strip_prefix = "gperftools-2.7",
urls = ["https://github.com/gperftools/gperftools/releases/download/gperftools-2.7/gperftools-2.7.tar.gz"],
)
```
#### File: third_party/openssl/openssl_setup.bzl
```python
load("@rules_perl//perl:deps.bzl", "perl_register_toolchains", "perl_rules_dependencies")
def openssl_setup():
perl_rules_dependencies()
perl_register_toolchains()
```
#### File: foreign_cc/private/make_script.bzl
```python
def create_make_script(
root,
inputs,
make_commands):
"""Constructs Make script to be passed to cc_external_rule_impl.
Args:
root (str): sources root relative to the $EXT_BUILD_ROOT
inputs (struct): An InputFiles provider
make_commands (list): Lines of bash which invoke make
Returns:
list: Lines of bash which make up the build script
"""
ext_build_dirs = inputs.ext_build_dirs
script = pkgconfig_script(ext_build_dirs)
script.append("##symlink_contents_to_dir## $$EXT_BUILD_ROOT$$/{} $$BUILD_TMPDIR$$".format(root))
script.append("##enable_tracing##")
script.extend(make_commands)
script.append("##disable_tracing##")
return script
def pkgconfig_script(ext_build_dirs):
"""Create a script fragment to configure pkg-config
Args:
ext_build_dirs (list): A list of directories (str)
Returns:
list: Lines of bash that perform the update of `pkg-config`
"""
script = []
if ext_build_dirs:
for ext_dir in ext_build_dirs:
script.append("##increment_pkg_config_path## $$EXT_BUILD_DEPS$$/" + ext_dir.basename)
script.append("echo \"PKG_CONFIG_PATH=$${PKG_CONFIG_PATH:-}$$\"")
script.extend([
"##define_absolute_paths## $$EXT_BUILD_DEPS$$ $$EXT_BUILD_DEPS$$",
"##define_sandbox_paths## $$EXT_BUILD_DEPS$$ $$EXT_BUILD_ROOT$$",
])
return script
```
#### File: rules_foreign_cc/toolchains/toolchains.bzl
```python
load(":built_toolchains.bzl", _built_toolchains = "built_toolchains")
load(":prebuilt_toolchains.bzl", _prebuilt_toolchains = "prebuilt_toolchains")
# Re-expose the built toolchains macro
built_toolchains = _built_toolchains
# Re-expose the prebuilt toolchains macro
prebuilt_toolchains = _prebuilt_toolchains
# buildifier: disable=unnamed-macro
def preinstalled_toolchains():
"""Register toolchains for various build tools expected to be installed on the exec host"""
native.register_toolchains(
str(Label("//toolchains:preinstalled_cmake_toolchain")),
str(Label("//toolchains:preinstalled_make_toolchain")),
str(Label("//toolchains:preinstalled_ninja_toolchain")),
)
``` |
{
"source": "jheaff1/rules_python",
"score": 2
} |
#### File: gazelle/modules_mapping/def.bzl
```python
def _modules_mapping_impl(ctx):
modules_mapping = ctx.actions.declare_file(ctx.attr.modules_mapping_name)
args = ctx.actions.args()
args.add(modules_mapping.path)
args.add_all([whl.path for whl in ctx.files.wheels])
ctx.actions.run(
inputs = ctx.files.wheels,
outputs = [modules_mapping],
executable = ctx.executable._generator,
arguments = [args],
use_default_shell_env = False,
)
return [DefaultInfo(files = depset([modules_mapping]))]
modules_mapping = rule(
_modules_mapping_impl,
attrs = {
"modules_mapping_name": attr.string(
default = "modules_mapping.json",
doc = "The name for the output JSON file.",
mandatory = False,
),
"wheels": attr.label_list(
allow_files = True,
doc = "The list of wheels, usually the 'all_whl_requirements' from @<pip_repository>//:requirements.bzl",
mandatory = True,
),
"_generator": attr.label(
cfg = "exec",
default = "//gazelle/modules_mapping:generator",
executable = True,
),
},
doc = "Creates a modules_mapping.json file for mapping module names to wheel distribution names.",
)
``` |
{
"source": "jheard-tw/region_cache",
"score": 3
} |
#### File: region_cache/region_cache/region_cache.py
```python
import datetime
from urllib.parse import urlparse
import redis
import pickle
from .region import Region
from logging import getLogger
_logger = getLogger('region_cache')
class RegionCache(object):
"""
This is the flask extension itself. Initialize this when you initialize all of your other extensions.
"""
def __init__(
self,
root='root',
serializer=pickle,
host='localhost',
port=6379,
db=0,
password=<PASSWORD>,
op_timeout=None,
reconnect_on_timeout=True,
timeout_backoff=None,
raise_on_timeout=False,
rr_host=None,
rr_port=0,
rr_password=<PASSWORD>,
*args,
**kwargs
):
"""
Construct a new RegionCache object.
Pass in params, or if you are using a with Flask or Celery, you can control with config vars.
:param root (optional str): Default 'root' The key to use for the base region.
:param serializer (optional pickle-like object): Default = pickle. Flask/Celery config is
REGION_CACHE_SERIALIZER.
:param host (optional str): Default localhost The hostname of the redis master instance. Flask/Celery config is
REGION_CACHE_HOST.
:param port (int): Default 6379. The port of the redis master instance. Flask/Celery config is
REGION_CACHE_PORT.
:param db (int): Default 0. The db number to use on the redis master instance. Flask/Celery config is
REGION_CACHE_DB.
:param password (optional int): The password to use for the redis master instance. Flask/Celery config is
REGION_CACHE_PASSWORD.
:param op_timeout (optional number): Default = no timeout. A timeout in seconds after which an operation will
fail. Flask/Celery config is REGION_CACHE_OP_TIMEOUT.
:param reconnect_on_timeout (optional bool): Default = False. Whether to close the connection and reconnect on
timeout. Flask/Celery config is REGION_CACHE_OP_TIMEOUT_RECONNECT.
:param reconnect_backoff (optional int): Seconds that we should wait before trying to reconnect to the cache.
:param raise_on_timeout (optional bool): Default = False. If false, we catch the exception and return None for
readonly operations.Otherwise raise redis.TimeoutError. Flask/Celery config is
REGION_CACHE_OP_TIMEOUT_RAISE.
:param rr_host (optional str): Default None. The host for a redis read-replica, if it exists. Flask/Celery
config is REGION_CACHE_RR_HOST.
:param rr_port (int): The port for a redis read-replica. Flask/Celery config is REGION_CACHE_RR_PORT.
:param rr_password (str): The password for the redis read replica. Flask/Celery config is
REGION_CACHE_RR_PASSWORD.
:param args: Arguments to pass to StrictRedis. Flask/Celery config is REGION_CACHE_REDIS_ARGS.
:param kwargs: Extra options to pass to StrictRedis. Flask/Celery config is REGION_CACHE_REDIS_OPTIONS.
"""
self._serializer = serializer
self._regions = {}
self._w_conn = None
self._r_conn = None
self._root_name = root
self._op_timeout = op_timeout
self._reconnect_on_timeout = reconnect_on_timeout
self._raise_on_timeout = raise_on_timeout
self._reconnect_backoff = timeout_backoff
self._last_timeout = None
self._reconnect_after = None
self._host = host
self._port = port
self._db = db
self._password = password
self._rr_host = rr_host
self._rr_port = rr_port or 6379
self._rr_password = <PASSWORD>
self._args = args
self._kwargs = kwargs
self._root = None
def init_app(self, app):
"""
Configure this object as a flask or celery extension through the flask config.
:param app: A flask or celery app.
:return: None
"""
if app.config.get('REGION_CACHE_OP_TIMEOUT', None):
self._reconnect_on_timeout = app.config.get('REGION_CACHE_OP_TIMEOUT_RECONNECT', self._reconnect_on_timeout)
self._reconnect_backoff = app.config.get('REGION_CACHE_RECONNECT_BACKOFF', self._reconnect_backoff)
self._op_timeout = app.config.get('REGION_CACHE_OP_TIMEOUT', self._op_timeout)
self._raise_on_timeout = app.config.get('REGION_CACHE_OP_TIMEOUT_RAISE', self._raise_on_timeout)
if self._reconnect_backoff:
self._reconnect_backoff = float(self._reconnect_backoff)
if self._op_timeout:
self._op_timeout = float(self._op_timeout)
if 'REGION_CACHE_URL' in app.config:
redis_url_parsed = urlparse(app.config['REGION_CACHE_URL'])
self._host = redis_url_parsed.hostname
self._port = redis_url_parsed.port or 6379
self._db = int(redis_url_parsed.path[1:])
self._password = <PASSWORD>
else:
self._host = app.config.get('REGION_CACHE_HOST', 'localhost')
self._port = app.config.get('REGION_CACHE_PORT', 6379)
self._password = app.config.get('REGION_CACHE_PASSWORD', None)
# if there's a read replica to connect to.
if 'REGION_CACHE_RR_URL' in app.config:
redis_url_parsed = urlparse(app.config['REGION_CACHE_RR_URL'])
self._rr_host = redis_url_parsed.hostname
self._rr_port = redis_url_parsed.port or 6379
self._rr_password = <PASSWORD>
else:
self._rr_host = app.config.get('REGION_CACHE_RR_HOST', None)
self._rr_port = app.config.get('REGION_CACHE_RR_PORT', None)
self._rr_password = app.config.get('REGION_CACHE_RR_PASSWORD', None)
self._args += tuple(app.config.get('REGION_CACHE_REDIS_ARGS', ()))
self._kwargs.update(app.config.get('REGION_CACHE_REDIS_OPTIONS', {}))
self._root = self.region()
def invalidate_connections(self):
_logger.debug("Invalidating connections")
if self._r_conn and self._r_conn is not self._w_conn:
self._r_conn.connection_pool.disconnect()
if self._w_conn:
self._w_conn.connection_pool.disconnect()
self._r_conn = None
self._w_conn = None
self._last_timeout = datetime.datetime.utcnow()
if self._reconnect_backoff:
self._reconnect_after = self._last_timeout + datetime.timedelta(self._reconnect_backoff)
def is_disconnected(self):
if not (self._w_conn and self._r_conn) and self._reconnect_after:
if datetime.datetime.utcnow() < self._reconnect_after:
return True
return False
@property
def conn(self):
"""
The master connection to redis.
"""
if not self._w_conn:
_logger.debug("Attempting connection to redis on %s", self._host)
self._reconnect_after = None
kwargs = dict(**self._kwargs)
if self._op_timeout:
kwargs['socket_timeout'] = self._op_timeout
try:
self._w_conn = redis.StrictRedis(
host=self._host,
port=self._port,
db=self._db,
password=<PASSWORD>,
*self._args,
**kwargs
)
except Exception:
_logger.exception("Failed to (re)connect to redis on %s.", self._host)
self.invalidate_connections()
return self._w_conn
@property
def read_conn(self):
"""
A connection suitable for doing readonly operations against redis. Uses a read-replica if configured.
"""
if not self._r_conn:
self._reconnect_after = None
if self._rr_host:
_logger.debug('Attempting to connect to read replica redis on %s', self._rr_host)
try:
self._r_conn = redis.StrictRedis(
host=self._rr_host,
port=self._rr_port,
db=self._db,
password=<PASSWORD>._rr_password,
*self._args,
**self._kwargs
)
return self._r_conn
except Exception:
_logger.exception("Failed to (re)connect to redis on %s", self._rr_host)
self.invalidate_connections()
else:
return self.conn
else:
return self._r_conn
def region(self, name=None, timeout=None, update_resets_timeout=True, serializer=None):
"""
Return a (possibly existing) cache region.
:param name: (str) The name of the region. Should be a dot-separated string.
:param timeout: (int) Default=None. The TTL (secs) that the region should live before invalidating.
:param update_resets_timeout: Default=True. Updating the cache should start the timeout over again for the
whole region.
:param serializer: (serializer) Default=None. An alternative serializer to the default for the region.
:return: Region
"""
if name is None:
name = self._root_name
if name in self._regions:
return self._regions[name]
names = name.split('.') if '.' in name else [name]
names.reverse()
if name != self._root_name and not name.startswith('root.'):
names.append(self._root_name)
parts = []
fqname = ''
while names:
parts.append(names.pop())
fqname = '.'.join(parts)
if fqname not in self._regions:
_logger.info("Initializing region %s", fqname)
self._regions[fqname] = Region(
self, fqname,
timeout=timeout,
update_resets_timeout=update_resets_timeout,
serializer=serializer or self._serializer
)
return self._regions[fqname]
def clear(self):
"""
Invalidate and empty this cache region and all its sub-regions.
:return: None
"""
_logger.info("Clearing entire cache")
self.region().invalidate() # invalidate the root cache region will cascade down.
```
#### File: region_cache/tests/test_region_cache.py
```python
import pytest
from collections import namedtuple
from region_cache import RegionCache
@pytest.fixture(params=[
{'REGION_CACHE_URL': 'redis://localhost:6379/5'},
{
'REGION_CACHE_URL': 'redis://localhost:6379/5',
'REGION_CACHE_RR_URL': 'redis://localhost:6379/5'
},
{
'REGION_CACHE_HOST': 'localhost',
'REGION_CACHE_PORT': 6379,
'REGION_CACHE_DB': 5,
'REGION_CACHE_RR_HOST': 'localhost',
'REGION_CACHE_RR_PORT': 6379
},
{
'REGION_CACHE_HOST': 'localhost',
'REGION_CACHE_PORT': 6379,
'REGION_CACHE_DB': 5,
'REGION_CACHE_OP_TIMEOUT': 0.5
},
{
'REGION_CACHE_HOST': 'localhost',
'REGION_CACHE_PORT': 6379,
'REGION_CACHE_DB': 5,
'REGION_CACHE_OP_TIMEOUT': 0.5,
'REGION_CACHE_OP_TIMEOUT_RAISE': False,
'REGION_CACHE_OP_TIMEOUT_RECONNECT': True,
'REGION_CACHE_REDIS_OPTIONS': {
'max_connections': 3
}
}
])
def app(request):
return namedtuple('app', ['config'])(config=request.param)
@pytest.fixture()
def region_cache(app):
c = RegionCache()
c.init_app(app)
c.conn.flushall()
yield c
c.conn.flushall()
@pytest.fixture()
def region(region_cache):
r = region_cache.region('example_region')
yield r
r.invalidate()
@pytest.fixture()
def region_with_timeout(region_cache):
r = region_cache.region('timed_region', timeout=2)
yield r
r.invalidate()
def test_init_app(app):
c = RegionCache()
c.init_app(app)
assert c.conn
assert c.conn.ping()
assert c._root
assert c._root_name in c._regions
assert c._regions[c._root_name] is c._root
assert len(c._regions) == 1
def test_subregions(region_cache):
r = region_cache.region('abc.xyz')
assert '{region_cache._root_name}.abc'.format(region_cache=region_cache) in region_cache._regions
assert '{region_cache._root_name}.abc.xyz'.format(region_cache=region_cache) in region_cache._regions
assert 'abc.xyz' not in region_cache._regions
assert 'xyz' not in region_cache._regions
r1 = region_cache.region('xml', timeout=60)
assert r1._timeout == 60
r2 = r1.region('json')
assert r2._timeout == 60
def test_region_context_manager(region):
with region as r:
r['key1'] = 0
r['key2'] = 1
assert 'key1' in region
assert 'key2' in region
assert region._region_cache.conn.hget(region.name, 'key1') is not None
assert region._region_cache.conn.hget(region.name, 'key2') is not None
def test_invalidate(region):
region['key'] = 'value'
region.invalidate()
assert 'key' not in region
assert region._region_cache.conn.hget(region.name, 'key') is None
sb = region.region('sub')
sb['key2'] = 'value'
region.invalidate()
assert region._region_cache.conn.hget(sb.name, 'key2') is None
assert 'key2' not in sb
def test_invalidate_region(region_cache, region):
region['key'] = 'value'
region_cache.region('root').invalidate()
assert 'key' not in region
assert region._region_cache.conn.hget(region.name, 'key') is None
sb = region.region('sub')
sb['key2'] = 'value'
region.invalidate()
assert region._region_cache.conn.hget(sb.name, 'key2') is None
assert 'key2' not in sb
def test_items(region):
region['foo'] = 'bar'
assert region['foo'] == 'bar'
assert region._region_cache.conn.hget(region.name, 'foo') is not None
del region['foo']
assert pytest.raises(KeyError, lambda: region['foo'])
def test_children(region):
sb = region.region('sub')
assert sb in list(region.children())
def test_iter(region, region_cache):
region['foo'] = 'bar'
assert [x for x in region]
region.invalidate()
def test_invalidate_on(region):
import blinker
s = blinker.signal('named_signal')
t = blinker.signal('other_signal')
region['key'] = 'value'
region.invalidate_on(s, t)
s.send('nothing',in_='particular')
assert 'key' not in region
assert region._region_cache.conn.hget(region.name, 'key') is None
region['key'] = 'value'
t.send('nothing', in_='particular')
assert 'key' not in region
assert region._region_cache.conn.hget(region.name, 'key') is None
def test_cached(region):
called = [0]
@region.cached
def foobar(k, x=None):
called[0] += 1
return k
foobar(1)
assert called[0] == 1
foobar(1)
assert called[0] == 1
def test_get_or_compute(region):
x = region.get_or_compute('computed_key', 0)
assert 'computed_key' in region
assert region['computed_key'] == 0
assert x == 0
y = region.get_or_compute('computed_key2', lambda: 200)
assert y == 200
assert 'computed_key2' in region
assert region['computed_key2'] == 200
def test_invalidate_connections(region_cache):
region_cache.invalidate_connections()
assert region_cache._w_conn is None
assert region_cache._r_conn is None
def test_reconnect_backoff(region, region_cache):
region['key1'] = 0
region['key2'] = 1
region_cache._reconnect_backoff = 5 # 5 second backoff before trying to reconnect
region_cache.invalidate_connections()
assert region_cache.is_disconnected()
with pytest.raises(KeyError):
region['key1']
assert region_cache._w_conn is None
assert region_cache._r_conn is None
def test_timeout_with_context(region_with_timeout):
with region_with_timeout as r:
r['key1'] = 0
r['key2'] = 1
assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key1') is not None
assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key2') is not None
assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) > 0
assert 'key1' in region_with_timeout
assert 'key2' in region_with_timeout
import time
time.sleep(1)
assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) > 0
assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key1') is not None
assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key2') is not None
assert 'key1' in region_with_timeout
assert 'key2' in region_with_timeout
time.sleep(1.5)
assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) == -2
assert 'key1' not in region_with_timeout
assert 'key2' not in region_with_timeout
def test_timeout(region_with_timeout):
region_with_timeout['key1'] = 0
region_with_timeout['key2'] = 1
assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key1') is not None
assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key2') is not None
assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) > 0
assert 'key1' in region_with_timeout
assert 'key2' in region_with_timeout
import time
time.sleep(1)
assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) > 0
assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key1') is not None
assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key2') is not None
assert 'key1' in region_with_timeout
assert 'key2' in region_with_timeout
time.sleep(1.5)
assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) == -2
assert 'key1' not in region_with_timeout
assert 'key2' not in region_with_timeout
# make sure we can recreate the region.
region_with_timeout['key1'] = 0
region_with_timeout['key2'] = 1
assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key1') is not None
assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key2') is not None
assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) > 0
assert 'key1' in region_with_timeout
assert 'key2' in region_with_timeout
subregion = region_with_timeout.region("subregion")
subregion['key1'] = 0
subregion['key2'] = 1
assert subregion._region_cache.conn.hget(subregion.name, 'key1') is not None
assert subregion._region_cache.conn.hget(subregion.name, 'key2') is not None
assert subregion._region_cache.conn.ttl(subregion.name) > 0
assert 'key1' in subregion
assert 'key2' in subregion
import time
time.sleep(1)
assert subregion._region_cache.conn.ttl(subregion.name) > 0
assert subregion._region_cache.conn.hget(subregion.name, 'key1') is not None
assert subregion._region_cache.conn.hget(subregion.name, 'key2') is not None
assert 'key1' in subregion
assert 'key2' in subregion
time.sleep(1.5)
assert subregion._region_cache.conn.ttl(subregion.name) == -2
assert 'key1' not in subregion
assert 'key2' not in subregion
# make sure we can recreate the region.
subregion['key1'] = 0
subregion['key2'] = 1
assert subregion._region_cache.conn.hget(subregion.name, 'key1') is not None
assert subregion._region_cache.conn.hget(subregion.name, 'key2') is not None
assert subregion._region_cache.conn.ttl(subregion.name) > 0
assert 'key1' in subregion
assert 'key2' in subregion
``` |
{
"source": "jheasly/open-health-inspection-scraper",
"score": 3
} |
#### File: scraper/helpers/scoring.py
```python
import pymongo
import logging
import sys
class Scoring(object):
def __init__(self, settings):
self.score_logger = logging.getLogger('Scoring')
self.score_logger.setLevel(logging.INFO)
# Set up database connection (pulled from settings)
connection = pymongo.MongoClient(
host=settings['MONGODB_SERVER'],
port=int(settings['MONGODB_PORT'])
)
db = connection[settings['MONGODB_DB']]
if settings['MONGODB_USER'] and settings['MONGODB_PWD']:
db.authenticate(settings['MONGODB_USER'], settings['MONGODB_PWD'])
self.collection = db[settings['MONGODB_COLLECTION']]
'''
Weights for calculating the total vendor score.
The integer indicates how many inspections to use in the score and the
decimals are the weights for each inspection in descending order by date
'''
self.weights = {3: [0.6, 0.3, 0.1],
2: [0.7, 0.3],
1: [1.0]}
# The min and max range for the score
self.score_range = [0, 100]
# The per inspection scoring. The system is additive.
self.scoring = {'base': -2.0,
'critical': -1.5,
'repeat': -1.5,
'corrected': 0.5}
def score_vendors(self):
vendors = self.collection.find({}, {'inspections': 1, '_id': 1, 'guid': 1})
for vendor in vendors:
inspection_scores = []
if 'inspections' in vendor:
for inspection in vendor['inspections']:
score = self.score_range[1] # Set score to max
# These are the actual calculations for the per inspection score. Modify here to use
# different criteria
if 'violations' in inspection:
for violation in inspection['violations']:
score += self.scoring['base']
if violation['critical']:
score += self.scoring['critical']
if violation['repeat']:
score += self.scoring['repeat']
if violation['corrected']:
score += self.scoring['corrected']
score = max(score, self.score_range[0]) # Make sure score does not go below min
inspection_scores.append([inspection['date'], score])
# Update each inspection in DB with score
self.collection.update({'_id': vendor['_id'],
'inspections.date': inspection['date']},
{'$set': {'inspections.$.score': score}},
False,
False)
# Set the correct weight to use based on number of inspections the vendor has
num_inspections = min(max(self.weights), len(inspection_scores))
if num_inspections > 0:
current_weights = self.weights[num_inspections]
vendor_score = self.score_range[0]
for i, inspection in enumerate(sorted(inspection_scores, reverse=True)):
if i < num_inspections:
vendor_score += inspection[1]*current_weights[i]
self.collection.update({'_id': vendor['_id']},
{'$set': {'score': vendor_score}},
False,
False)
self.score_logger.info('Record ' + vendor['guid'] + ' scored ' + str(vendor_score))
```
#### File: scraper/helpers/vendor_helpers.py
```python
import scrapy
import re
import json
import logging
from pymongo import MongoClient
from urllib import parse, request
from slugify import slugify
from datetime import datetime
from scrapy.utils.project import get_project_settings
logger = logging.getLogger('Vendor Helpers')
def connect_db():
settings = get_project_settings()
connection = MongoClient(host=settings['MONGODB_SERVER'],
port=int(settings['MONGODB_PORT']))
db = connection[settings['MONGODB_DB']]
if settings['MONGODB_USER'] and settings['MONGODB_PWD']:
db.authenticate(settings['MONGODB_USER'], settings['MONGODB_PWD'])
return db[settings['MONGODB_COLLECTION']]
def get_urls(self,response):
# Returns absolute URLS from Javascript
scripts = response.xpath('//script/text()').extract()
urls = list(filter(None, map(get_function_urls, scripts)))
if len(urls) == 1:
return urls[0]
else:
return None
def get_function_urls(script):
# Extracts URLS from functions and returns as a list
url_list = re.findall('(?<=function\s)(.*)(?:\(thisEvent\)\s{\n)(?:location\s\=\s\")(.*)(?:\")', script)
return [url[1] for url in url_list]
def vendor_address(location):
parts = location.split(',')
return ','.join(parts[0:(len(parts)-2)]).strip()
def vendor_city(location):
parts = location.split(',')
return parts[len(parts)-2].split('VA')[0].strip()
def vendor_search_name(name):
return slugify(name, separator = ' ')
def vendor_guid(url):
if url:
matches = re.match('(http://healthspace.com/Clients/VDH/)(.*)(/web.nsf/formFacility.xsp\?id=)(.*)',url, flags=re.I)
if matches:
return matches.group(4)
return None
def get_lat_lng(address):
existing_lat_lng = address_compare(address)
if existing_lat_lng is None:
if address['street'] is not None and address['city'] is not None:
# Take a dict of address parts and call SmartyStreets to geocode it.
settings = get_project_settings()
ss_id = settings['SS_ID']
ss_token = settings['SS_TOKEN']
if ss_id is not None and ss_token is not None:
# If address is a PO Box, skip
if re.search('P(\.)?O(\.)?(\sBox\s)[0-9]+', address['street']) is None and address['street'] != '':
logger.debug(address)
url = 'https://api.smartystreets.com/street-address?'
url += 'state=' + parse.quote(address['state'])
url += '&city=' + parse.quote(address['city'])
url += '&auth-id=' + str(ss_id)
url += '&auth-token=' + str(ss_token)
url += '&street=' + parse.quote(address['street'])
response = request.urlopen(url)
data = json.loads(response.read().decode('utf-8'))
if len(data) == 1:
logger.debug('Geocoded ' + str(address))
lat_lng = {'type': 'Point',
'coordinates': [data[0]['metadata']['longitude'], data[0]['metadata']['latitude']]}
return lat_lng
else:
logger.debug('Could not geocode address ' + str(address))
logger.debug(response.status)
logger.debug(response.info())
logger.debug(data)
return None
logger.debug('Address is current and has already been geocoded')
return existing_lat_lng
def needs_geocoding(address):
existing_lat_lng = address_compare(address)
if existing_lat_lng is None:
if address['street'] is not None and address['city'] is not None:
if re.search('P(\.)?O(\.)?(\sBox\s)[0-9]+', address['street']) is None and address['street'] != '':
return True
return None
def needs_geocoding_date(address):
existing_lat_lng = address_compare(address)
if existing_lat_lng is None:
if address['street'] is not None and address['city'] is not None:
if re.search('P(\.)?O(\.)?(\sBox\s)[0-9]+', address['street']) is None and address['street'] != '':
return datetime.utcnow()
return None
def address_compare(address):
collection = connect_db()
existing = collection.find_one({
'guid': address['guid'],
'address': address['street'],
'city': address['city'],
'geo': { '$exists': True }
}, {'geo': 1})
if existing is not None:
return existing['geo']
return None
def vendor_category(type):
# Lookup the vendor type in a dict and return a broader category
categories = {'Adult care home food service': 'Medical',
'Adult Care Home Food Service': 'Medical',
'Adult Day Care Food Service': 'Medical',
'Bed & Breakfast': 'Hospitality',
'Bed & Breakfast Food Service': 'Hospitality',
'Carry Out Food Service Only': 'Grocery',
'Caterer': 'Restaurant',
'Child Care Food Service': 'Education',
'Commissary': 'Grocery',
'Convenience Store Food Service': 'Grocery',
'Dept. of Juvenile Justice Food Service': 'Government',
'Fast Food Restaurant': 'Restaurant',
'Fast Food Restaurant/Caterer': 'Restaurant',
'Full Service Restaurant': 'Restaurant',
'Full Service Restaurant/Caterer': 'Restaurant',
'Grocery Store Food Service': 'Grocery',
'Group Home Food Service': 'Medical',
'Hospital Food Service': 'Medical',
'Hotel Continental Breakfast': 'Hospitality',
'Hotel continental breakfast': 'Hospitality',
'Institution': 'Government',
'Jail Food Service': 'Government',
'Local Convenience Store Food Service': 'Grocery',
'Local Grocery Store Food Service': 'Grocery',
'Mobile Food Unit': 'Mobile Food',
'Mobile food unit': 'Mobile Food',
'Nursing Home Food Service': 'Medical',
'Other Food Service': 'Other',
'Private College Food Service': 'Education',
'Private Elementary School Food Service': 'Education',
'Private Elementry School Food Service': 'Education',
'Private High School Food Service': 'Education',
'Private Middle or High School Food Service': 'Education',
'Public Elementary School Food Service': 'Education',
'Public Elementry School Food Service': 'Education',
'Public Middle or High School Food Service': 'Education',
'Public Primary School Food Service': 'Education',
'Public school kitchen': 'Education',
'Residential Child Care Institution Food Service': 'Education',
'Restaurant': 'Restaurant',
'Seasonal Fast Food Restaurant': 'Restaurant',
'Seasonal Full Service Restaurant': 'Restaurant',
'Snack Bar': 'Grocery',
'State College Food Service': 'Education',
'State Institution Food Service': 'Government',
'Summer Camp Food Service': 'Education',
'Summer camp kitchen': 'Education',
'Summer Food Service Program Feeding Site': 'Education',
'Summer Food Service Program Kitchen': 'Education'}
if type in categories:
return categories[type]
else:
return 'Other'
```
#### File: open-health-inspection-scraper/scraper/items.py
```python
from scrapy.item import Item, Field
from scrapy.loader import ItemLoader
from scrapy.loader.processors import Identity, MapCompose, TakeFirst, Join
from scraper.helpers.vendor_helpers import *
from slugify import slugify
from datetime import datetime
import re
def format_date(date_string):
return datetime.strptime(date_string, "%b %d, %Y")
class VendorItem(Item):
locality_id = Field(
output_processor=TakeFirst()
)
locality = Field(
output_processor=TakeFirst()
)
locality_url = Field(
output_processor=TakeFirst()
)
vendor_id = Field(
output_processor=TakeFirst()
)
guid = Field(
input_processor=MapCompose(vendor_guid),
output_processor=TakeFirst()
)
name = Field(
output_processor=TakeFirst()
)
url = Field(
output_processor=TakeFirst()
)
address = Field(
input_processor=MapCompose(vendor_address),
output_processor=TakeFirst()
)
city = Field(
input_processor=MapCompose(vendor_city),
output_processor=TakeFirst()
)
vendor_location = Field(
output_processor=TakeFirst()
)
last_inspection_date = Field(
input_processor=MapCompose(format_date),
output_processor=TakeFirst()
)
search_name = Field(
input_processor=MapCompose(vendor_search_name),
output_processor=TakeFirst()
)
type = Field(
output_processor=TakeFirst()
)
status = Field(
output_processor=TakeFirst()
)
phone = Field(
output_processor=TakeFirst()
)
slug = Field(
input_processor=MapCompose(slugify),
output_processor=TakeFirst()
)
category = Field(
input_processor=MapCompose(vendor_category),
output_processor=TakeFirst()
)
geo = Field(
# disable geocoding until SmartyStreets replacement is found
#input_processor=MapCompose(get_lat_lng),
#output_processor=TakeFirst()
)
needs_geocoding = Field(
input_processor=MapCompose(needs_geocoding),
output_processor=TakeFirst()
)
needs_geocoding_date = Field(
input_processor=MapCompose(needs_geocoding_date),
output_processor=TakeFirst()
)
inspections = Field()
class VendorItemLoader(ItemLoader):
default_item_class = VendorItem
class InspectionItem(Item):
vendor_guid = Field(
output_processor=TakeFirst()
)
date = Field(
input_processor=MapCompose(format_date),
output_processor=TakeFirst()
)
type = Field(
output_processor=TakeFirst()
)
risk_rating = Field(
output_processor=TakeFirst()
)
followup_required = Field(
output_processor=TakeFirst()
)
comments = Field(
input_processor=Join(''),
output_processor=TakeFirst()
)
violations = Field()
class InspectionItemLoader(ItemLoader):
default_item_class = InspectionItem
```
#### File: open-health-inspection-scraper/scraper/pipelines.py
```python
import logging
from pymongo import MongoClient
from scrapy.exceptions import DropItem
from scraper.items import VendorItem, InspectionItem
from datetime import datetime
from scrapy.utils.project import get_project_settings
logger = logging.getLogger('Mongo Pipeline')
class MongoDBPipeline(object):
def __init__(self):
settings = get_project_settings()
connection = MongoClient(host=settings['MONGODB_SERVER'],
port=int(settings['MONGODB_PORT']))
db = connection[settings['MONGODB_DB']]
if settings['MONGODB_USER'] and settings['MONGODB_PWD']:
db.authenticate(settings['MONGODB_USER'], settings['MONGODB_PWD'])
self.collection = db[settings['MONGODB_COLLECTION']]
def process_item(self, item, spider):
# Vendor Data
if isinstance(item, VendorItem):
vendor = dict(item)
# Check if vendor exists, if so just update
if self.collection.find({'guid': item['guid']}).count() > 0:
# Remove empty inspections array for existing vendors
vendor.pop('inspections', None)
self.collection.update({
'guid': vendor['guid']
}, {'$set': vendor})
logger.debug('Updated vendor ' + str(vendor['guid']))
else:
# If the vendor is new insert
self.collection.insert_one(vendor)
logger.debug('Added new vendor ' + str(vendor['guid']))
# Inspection Data
if isinstance(item, InspectionItem):
inspection = dict(item)
# Remove vendor_guid because we don't want it in the dict
# we just passed it to use for lookup
vendor_guid = inspection.pop('vendor_guid')
# Make sure the vendor exists, if not log a warning
if self.collection.find({'guid': vendor_guid}).count() > 0:
# Check if the inspection exists
existing = self.collection.find({
'guid': vendor_guid,
'inspections': {
'$elemMatch': {
'date': inspection['date']
}
}
}, {'inspections': {
'$elemMatch': {
'date': inspection['date']
}
}
})
# If it exists, update
if existing.count() > 0:
result = self.collection.update({
'guid': vendor_guid,
'inspections': {
'$elemMatch': {
'date': inspection['date']
}
}
}, {'$set': {
'inspections.$': inspection
}
})
if result['n'] is not 1:
logger.warn('Could not update inspection from ' + inspection['date'].strftime("%m/%d/%Y") + ' for vendor ' + vendor_guid)
else:
logger.debug('Updated inspection from ' + inspection['date'].strftime("%m/%d/%Y") + ' for vendor ' + vendor_guid)
# If it is new, push the inspection into the inspections array
else:
result = self.collection.update({
'guid': vendor_guid
}, {
'$push': {'inspections': inspection}
})
# Check to see that it inserted correctly
if result['n'] is not 1:
logger.warn('Could not add inspection from ' + inspection['date'].strftime("%m/%d/%Y") + ' for vendor ' + vendor_guid)
else:
logger.debug('Added new inspection from ' + inspection['date'].strftime("%m/%d/%Y") + ' for vendor ' + vendor_guid)
else:
logger.warn('Attempted to add/update inspection but could not find vendor ' + vendor_guid)
```
#### File: scraper/spiders/healthspace_spider.py
```python
import scrapy
import logging
import shutil
from scrapy import Selector, Request
from scraper.helpers import vendor_helpers, inspection_helpers
from scraper.helpers.scoring import Scoring
from scraper.items import VendorItemLoader, InspectionItemLoader
from urllib import parse
logger = logging.getLogger('Healthspace Spider')
class HealthSpaceSpider(scrapy.Spider):
name = "healthspace"
allowed_domains = ["healthspace.com"]
start_urls = [
"https://healthspace.com/clients/oregon/state/statewebportal.nsf/module_healthRegions.xsp?showview=region"
]
def closed(self, reason):
# If the spider completes successfully, cleanup the job
# so we can start fresh the next time
if reason == 'finished' and 'JOBDIR' in self.settings:
shutil.rmtree(self.settings['JOBDIR'])
scoring = Scoring(self.settings)
scoring.score_vendors()
def parse(self, response):
# Initial parse of district pages
localities = response.xpath('//tr/td')
for locality in localities:
locality_info = {
'name': locality.xpath('./a/text()').extract_first(),
'url': locality.xpath('./a/@href').extract_first(),
'id': locality.xpath('./a/@id').extract_first()
}
if locality_info['url']:
#Skip the locality splash page
locality_info['url'] = parse.urljoin(locality_info['url'], 'web.nsf/module_facilities.xsp?module=Food')
yield Request(locality_info['url'], callback=self.locality_catalog_parse,
meta={'locality_info': locality_info,
'page_num': 1,
'cookiejar': locality_info['name']})
# Each locality needs a separate cookiejar
# so that paging works correctly in the
# catalog parse.
def locality_catalog_parse(self,response):
'''
Receives the locality_info and main vendor catalog page
Extracts all URLs from vendor page, sends each new URL
to the vendor parser.
'''
locality_info = response.meta['locality_info']
logger.info('Parsing ' + str(locality_info['name']) + ' Page ' + str(response.meta['page_num']))
# Check if another page is available for this locality, if so send
# it back to the parser. Uses the 'Next' button on the locality
# page, which triggers a POST request to get more vendors.
if response.xpath('//a[contains(@id, "Next__lnk")]'):
ajax_id = 'view:_id1:_id258:panel1'
page_body = {
'$$viewid': response.xpath('//form/input[@name="$$viewid"]/@value').extract_first(),
'$$xspexecid': response.xpath('//a[contains(@id, "Next__lnk")]/parent::span/parent::div/@id').extract_first(),
'$$xspsubmitid': response.xpath('//a[contains(@id, "Next__lnk")]/parent::span/@id').extract_first(),
'$$xspsubmitscroll': '0|0',
'$$xspsubmitvalue': response.xpath('//form/input[@name="$$xspsubmitvalue"]/@value').extract_first(),
}
# POST body includes a field that references it's own value.
page_body[response.xpath('//form/@id').extract_first()] = response.xpath('//form/@id').extract_first()
page_url = response.url + '&$$ajaxid=' + parse.quote('view:_id1:_id258:panel1')
yield Request(response.url, callback=self.locality_catalog_parse,
method='POST',
body=parse.urlencode(page_body),
meta={'locality_info':locality_info,
'page_num': response.meta['page_num']+1,
'cookiejar': response.meta['cookiejar']},
dont_filter=True) # Need dont_filter so the job
# tracker will accept the same
# URL more than once
# Get HTML links
urls = response.xpath('//tr/td/a/@href').extract()
# Get Javascript links
js_urls = vendor_helpers.get_urls(self,response)
if js_urls is not None:
urls.extend(js_urls)
#Push to Vendor Pages
for url in urls:
vendor_url = response.urljoin(url)
yield Request(vendor_url, callback=self.vendor_parser,
meta={'locality_info':locality_info,
'cookiejar': 'vendors'})
# Create a separate cookiejar for
# vendors so that the locality pages
# don't lose state.
def vendor_parser(self,response):
'''
Extracts core vendor information from pages which is then processed into MongoDB.
Also extracts links to inspections and passes that to the inspection parser.
'''
locality_info = response.meta['locality_info']
vendor_loader = VendorItemLoader(response=response)
vendor_loader.add_value('locality_id', locality_info['id'])
vendor_loader.add_value('locality', locality_info['name'])
vendor_loader.add_value('locality_url', locality_info['url'])
vendor_loader.add_xpath('vendor_id', '//tr/td/span[contains(@id,"documentIdCF1")]/text()')
vendor_loader.add_value('guid', response.url)
vendor_loader.add_xpath('name', '//tr/td/span[contains(@id,"nameCF1")]/text()')
vendor_loader.add_value('search_name', vendor_loader.get_output_value('name'))
vendor_loader.add_value('url', response.url)
vendor_loader.add_xpath('vendor_location', '//tr/td/span[contains(@id,"facilityAddressCF1")]/text()')
vendor_loader.add_value('address', vendor_loader.get_output_value('vendor_location'))
vendor_loader.add_value('city', vendor_loader.get_output_value('vendor_location'))
vendor_loader.add_xpath('last_inspection_date', '//tr/td/span[contains(@id,"lastInspectionCF1")]/text()')
vendor_loader.add_xpath('type', '//tr/td/span[contains(@id,"subTypeCF1")]/text()')
vendor_loader.add_xpath('category', '//tr/td/span[contains(@id,"subTypeCF1")]/text()')
vendor_loader.add_xpath('status', '//tr/td/span[contains(@id,"statusCF1")]/text()')
vendor_loader.add_xpath('phone', '//tr/td/span[contains(@id,"phoneCF1")]/text()')
vendor_loader.add_value('slug', vendor_loader.get_output_value('name') + ' ' + vendor_loader.get_output_value('vendor_location'))
address = {
'guid': vendor_loader.get_output_value('guid'),
'street': vendor_loader.get_output_value('address'),
'city': vendor_loader.get_output_value('city'),
'state': 'VA'
}
# Removed geocoding until a SmartyStreets replacement is found
#vendor_loader.add_value('geo', address)
vendor_loader.add_value('needs_geocoding', address)
vendor_loader.add_value('needs_geocoding_date', address)
# Load vendor info
yield vendor_loader.load_item()
# Grab inspection links and hand to parser.
# Get HTML links
urls = response.xpath('//tr/td/a/@href').extract()
# Get Javascript links
js_urls = inspection_helpers.get_inspection_urls(self,response)
if js_urls is not None:
urls.extend(js_urls)
# Parse vendor inspections
for url in urls:
inspection_url = response.urljoin(url)
yield Request(inspection_url, callback=self.inspection_parser,
meta={'vendor_guid':vendor_loader.get_output_value('guid'),
'cookiejar': 'inspections'})
# Create a separate cookiejar for
# inspections so that the locality pages
# don't lose state.
def inspection_parser(self, response):
'''
Extracts core inspection and violation data which is processed into MongoDB.
'''
inspection_loader = InspectionItemLoader(response=response)
inspection_loader.add_value('vendor_guid', response.meta['vendor_guid'])
inspection_loader.add_xpath('date', '//*[contains(@id,"inspectionDateCF1")]/text()')
inspection_loader.add_xpath('type', '//*[contains(@id,"inspTypeCF1")]/text()')
inspection_loader.add_xpath('risk_rating', '//*[contains(@id,"riskRatingEB1")]/text()')
inspection_loader.add_xpath('followup_required', '//*[contains(@id,"fuiReqCF1")]/text()')
inspection_loader.add_xpath('comments', '//*[contains(@id, "commentsCF1")]/div/font/text()')
violations = []
violation_items = response.xpath('//div[contains(@class,"violation-panel")]')
for violation_item in violation_items:
observation_title = violation_item.xpath('.//span[contains(@id, "violationCF3")]/text()').extract_first()
critical = violation_item.xpath('.//a[contains(@id,"violationCritSetLink1")]/text()').extract_first()
violations.append({
'code': violation_item.xpath('.//span[contains(@id,"violationCodeCF1")]/text()').extract_first(),
'description': violation_item.xpath('.//span[contains(@id, "violationCF9")]/text()').extract_first(),
'observation': violation_item.xpath('.//span[contains(@id, "violationCF4")]/text()').extract_first(),
'correction': violation_item.xpath('.//span[contains(@id, "violationCF9")]/text()').extract_first(),
'corrected': "(CORRECTED DURING INSPECTION)" in observation_title,
'critical': critical is "critical",
'repeat': "(REPEAT)" in observation_title
})
inspection_loader.add_value('violations', violations)
yield inspection_loader.load_item()
``` |
{
"source": "jhechavarria/WindowedPy",
"score": 3
} |
#### File: WindowedPy/windows/StartPage.py
```python
import tkinter as tk
from tkinter import ttk
from windows._Window import _Window
class StartPage(_Window):
def __init__(self, parent, controller):
_Window.__init__(self, parent, controller)
frm = tk.Frame(self)
frm.pack(side="top", fill="both")
btn1 = ttk.Button(frm, text="About Page >", command=lambda:self.goto("AboutPage"))
btn1.pack(side="left", fill="both", expand=True)
btn2 = ttk.Button(frm, text="Config Page >", command=lambda:self.goto("ConfigPage"))
btn2.pack(side="right", fill="both", expand=True)
lbl = ttk.Label(self, text="Welcome to My App!")
lbl.pack(padx=10, pady=20)
def call(self):
self.title("Start Page")
``` |
{
"source": "jheckjr/practice",
"score": 3
} |
#### File: practice/hackerrank/modifiedkaprekar.py
```python
import sys
def process(f):
p = int(input().strip())
q = int(input().strip()) + 1
count = 0
for i in range(p,q):
square = i*i
num_digits = len(str(square))
sum = 0
if num_digits == 1:
sum = square
else:
if num_digits % 2 == 0:
l = int(str(square)[:int(num_digits/2)])
r = int(str(square)[int(num_digits/2):num_digits])
else:
l = int(str(square)[:int(num_digits/2)])
r = int(str(square)[int(num_digits/2):num_digits])
sum = l + r
if sum == i:
print(i, end=' ')
count += 1
if count == 0:
print('INVALID RANGE')
def test():
with open('funnystring.txt', 'r') as f:
result = process(f)
assert result == [1, 9, 45, 55, 99]
print("Test passed")
test()
```
#### File: practice/hackerrank/selfbalancingtree.py
```python
/* Node is defined as :
typedef struct node
{
int val;
struct node* left;
struct node* right;
int ht;
} node; */
void postorder (node * p, int indent=0)
{
if(p != NULL) {
if(p->right) {
postorder(p->right, indent+4);
}
if (indent) {
cout << setw(indent) << ' ';
}
if (p->right) cout<<" /\n" << setw(indent) << ' ';
cout<< p->val << " " << p->ht << "\n ";
if(p->left) {
cout << setw(indent) << ' ' <<" \\\n";
postorder(p->left, indent+4);
}
}
}
node * add(node * n, int val)
{
if (n == nullptr) {
node * new_node = (node*)malloc(sizeof(node));
new_node->val = val;
return new_node;
} else if (val < n->val) {
n->left = add(n->left, val);
n->ht = n->left->ht + 1;
} else if (n->val < val) {
n->right = add(n->right, val);
n->ht = n->right->ht + 1;
}
return n;
}
int get_balance(node * left, node * right) {
if (left != nullptr && right != nullptr) {
return left->ht - right->ht;
} else if (left != nullptr) {
return left->ht + 1;
} else if (right != nullptr) {
return (-1 * right->ht) - 1;
} else {
return 0;
}
}
node * rotate_left(node * pivot);
node * rotate_right(node * pivot);
node * rotate_left(node * pivot) {
node * rotator = pivot->right;
if (rotator->left != nullptr && rotator->right != nullptr) {
if (rotator->right->ht < rotator->left->ht) {
rotator = rotate_right(rotator);
}
} else if (rotator->left != nullptr) {
rotator = rotate_right(rotator);
}
node * inside = rotator->left;
pivot->right = inside;
pivot->ht -= 2;
rotator->left = pivot;
if (pivot->ht < 0) {
pivot->ht += 1;
rotator->ht += 1;
}
return rotator;
}
node * rotate_right(node * pivot) {
node * rotator = pivot->left;
if (rotator->left != nullptr && rotator->right != nullptr) {
if (rotator->left->ht < rotator->right->ht) {
rotator = rotate_left(rotator);
}
} else if (rotator->right != nullptr) {
rotator = rotate_left(rotator);
}
node * inside = rotator->right;
pivot->left = inside;
pivot->ht -= 2;
rotator->right = pivot;
if (pivot->ht < 0) {
pivot->ht += 1;
rotator->ht += 1;
}
return rotator;
}
node * rotate_tree(node * n, bool imbal, int val) {
int balance = get_balance(n->left, n->right);
node * r;
// Recurse right if right bigger than left
if (balance < -1) {
r = rotate_tree(n->right, true, val);
// r is rotator, n is pivot
if (r == nullptr) {
return rotate_left(n);
} else {
n->right = r;
return n;
}
}
// Recurse left if left bigger than right
else if (1 < balance) {
r = rotate_tree(n->left, true, val);
// r is rotator, n is pivot
if (r == nullptr) {
return rotate_right(n);
} else {
n->left = r;
return n;
}
}
// Return null if rotator point found
else {
if (!imbal) {
if (val < n->val && n->left != nullptr) {
n->left = rotate_tree(n->left, false, val);
} else if (val > n->val && n->right != nullptr) {
n->right = rotate_tree(n->right, false, val);
}
return n;
} else {
return nullptr;
}
}
}
node * insert(node * root,int val)
{
root = add(root, val);
//postorder(root);
//cout << "\n\n\n";
root = rotate_tree(root, false, val);
//postorder(root);
root = add(root, val);
return root;
}
//input
// 3
// / \
// 2 4
// \
// 5
// output
// 3
// / \
// 2 5
// / \
// 4 6
``` |
{
"source": "jh-ecomp/jira-etl",
"score": 3
} |
#### File: jh-ecomp/jira-etl/jira_requests.py
```python
import requests
from requests import Session
from requests.auth import HTTPBasicAuth
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from requests.exceptions import Timeout
import traceback
def mount_jira_session(jira_domain):
auth = HTTPBasicAuth('fake_user', '<PASSWORD>')
url = f'{jira_domain}'
retries = Retry(total=5, backoff_factor=1)
session = Session()
session.mount('http://', HTTPAdapter(max_retries=retries))
response = None
try:
response = session.get(url, auth=auth)
except Timeout as t:
print(f'Session request timeout {t}')
traceback.print_exc()
except Exception as e:
print(f'Session request exception {e}')
traceback.print_exc()
return response
def get_jira_main_request(main_request, jql, fields, expand, start_at, max_results, cookies):
url = f'{main_request}?jql={jql}&fields={fields}&expand={expand}&startAt={start_at}&maxResults={max_results}'
response = None
try:
response = requests.get(url=url, cookies=cookies, timeout=60)
except Timeout as t:
print(f'Jira request timeout {t}')
traceback.print_exc()
except Exception as e:
print(f'Jira request exception {e}')
traceback.print_exc()
if response is None:
return response
try:
issues = response.json()
except Exception as e:
print(f'Failed to load response content cause {e}')
issues = None
return issues
def get_jira_issues(jira_config, cookies):
issues_list = list()
start_at = int(jira_config['start_at'])
max_results = int(jira_config['max_results'])
total = 0
while True:
jira_issues = get_jira_main_request(jira_config['main_request'], jira_config['jql'], jira_config['fields'],
jira_config['expand'], start_at, max_results, cookies)
if jira_issues is not None:
total = jira_issues['total']
for issue in jira_issues['issues']:
issues_list.append(issue)
if start_at + max_results >= total:
break
start_at += max_results
return issues_list
``` |
{
"source": "jheddings/hass-core",
"score": 2
} |
#### File: components/history_stats/helpers.py
```python
from __future__ import annotations
import datetime
import logging
import math
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
from homeassistant.helpers.template import Template
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DURATION_START = "start"
DURATION_END = "end"
@callback
def async_calculate_period(
duration: datetime.timedelta | None,
start_template: Template | None,
end_template: Template | None,
) -> tuple[datetime.datetime, datetime.datetime]:
"""Parse the templates and return the period."""
bounds: dict[str, datetime.datetime | None] = {
DURATION_START: None,
DURATION_END: None,
}
for bound, template in (
(DURATION_START, start_template),
(DURATION_END, end_template),
):
# Parse start
if template is None:
continue
try:
rendered = template.async_render()
except (TemplateError, TypeError) as ex:
if ex.args and not ex.args[0].startswith(
"UndefinedError: 'None' has no attribute"
):
_LOGGER.error("Error parsing template for field %s", bound, exc_info=ex)
raise
if isinstance(rendered, str):
bounds[bound] = dt_util.parse_datetime(rendered)
if bounds[bound] is not None:
continue
try:
bounds[bound] = dt_util.as_local(
dt_util.utc_from_timestamp(math.floor(float(rendered)))
)
except ValueError as ex:
raise ValueError(
f"Parsing error: {bound} must be a datetime or a timestamp: {ex}"
) from ex
start = bounds[DURATION_START]
end = bounds[DURATION_END]
# Calculate start or end using the duration
if start is None:
assert end is not None
assert duration is not None
start = end - duration
if end is None:
assert start is not None
assert duration is not None
end = start + duration
return start, end
def pretty_duration(hours: float) -> str:
"""Format a duration in days, hours, minutes, seconds."""
seconds = int(3600 * hours)
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if days > 0:
return "%dd %dh %dm" % (days, hours, minutes)
if hours > 0:
return "%dh %dm" % (hours, minutes)
return "%dm" % minutes
def pretty_ratio(
value: float, period: tuple[datetime.datetime, datetime.datetime]
) -> float:
"""Format the ratio of value / period duration."""
if len(period) != 2 or period[0] == period[1]:
return 0.0
ratio = 100 * 3600 * value / (period[1] - period[0]).total_seconds()
return round(ratio, 1)
def floored_timestamp(incoming_dt: datetime.datetime) -> float:
"""Calculate the floored value of a timestamp."""
return math.floor(dt_util.as_timestamp(incoming_dt))
```
#### File: components/zwave_js/services.py
```python
from __future__ import annotations
import asyncio
import logging
from typing import Any
import voluptuous as vol
from zwave_js_server.client import Client as ZwaveClient
from zwave_js_server.const import CommandStatus
from zwave_js_server.exceptions import SetValueFailed
from zwave_js_server.model.node import Node as ZwaveNode
from zwave_js_server.model.value import get_value_id
from zwave_js_server.util.multicast import async_multicast_set_value
from zwave_js_server.util.node import (
async_bulk_set_partial_config_parameters,
async_set_config_parameter,
)
from homeassistant.components.group import expand_entity_ids
from homeassistant.const import ATTR_AREA_ID, ATTR_DEVICE_ID, ATTR_ENTITY_ID
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.helpers import device_registry as dr, entity_registry as er
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from . import const
from .config_validation import BITMASK_SCHEMA, VALUE_SCHEMA
from .helpers import async_get_nodes_from_targets
_LOGGER = logging.getLogger(__name__)
def parameter_name_does_not_need_bitmask(
val: dict[str, int | str | list[str]]
) -> dict[str, int | str | list[str]]:
"""Validate that if a parameter name is provided, bitmask is not as well."""
if (
isinstance(val[const.ATTR_CONFIG_PARAMETER], str)
and const.ATTR_CONFIG_PARAMETER_BITMASK in val
):
raise vol.Invalid(
"Don't include a bitmask when a parameter name is specified",
path=[const.ATTR_CONFIG_PARAMETER, const.ATTR_CONFIG_PARAMETER_BITMASK],
)
return val
def broadcast_command(val: dict[str, Any]) -> dict[str, Any]:
"""Validate that the service call is for a broadcast command."""
if val.get(const.ATTR_BROADCAST):
return val
raise vol.Invalid(
"Either `broadcast` must be set to True or multiple devices/entities must be "
"specified"
)
class ZWaveServices:
"""Class that holds our services (Zwave Commands) that should be published to hass."""
def __init__(
self,
hass: HomeAssistant,
ent_reg: er.EntityRegistry,
dev_reg: dr.DeviceRegistry,
) -> None:
"""Initialize with hass object."""
self._hass = hass
self._ent_reg = ent_reg
self._dev_reg = dev_reg
@callback
def async_register(self) -> None:
"""Register all our services."""
@callback
def get_nodes_from_service_data(val: dict[str, Any]) -> dict[str, Any]:
"""Get nodes set from service data."""
val[const.ATTR_NODES] = async_get_nodes_from_targets(
self._hass, val, self._ent_reg, self._dev_reg
)
return val
@callback
def has_at_least_one_node(val: dict[str, Any]) -> dict[str, Any]:
"""Validate that at least one node is specified."""
if not val.get(const.ATTR_NODES):
raise vol.Invalid(f"No {const.DOMAIN} nodes found for given targets")
return val
@callback
def validate_multicast_nodes(val: dict[str, Any]) -> dict[str, Any]:
"""Validate the input nodes for multicast."""
nodes: set[ZwaveNode] = val[const.ATTR_NODES]
broadcast: bool = val[const.ATTR_BROADCAST]
if not broadcast:
has_at_least_one_node(val)
# User must specify a node if they are attempting a broadcast and have more
# than one zwave-js network.
if (
broadcast
and not nodes
and len(self._hass.config_entries.async_entries(const.DOMAIN)) > 1
):
raise vol.Invalid(
"You must include at least one entity or device in the service call"
)
first_node = next((node for node in nodes), None)
# If any nodes don't have matching home IDs, we can't run the command because
# we can't multicast across multiple networks
if first_node and any(
node.client.driver.controller.home_id
!= first_node.client.driver.controller.home_id
for node in nodes
):
raise vol.Invalid(
"Multicast commands only work on devices in the same network"
)
return val
@callback
def validate_entities(val: dict[str, Any]) -> dict[str, Any]:
"""Validate entities exist and are from the zwave_js platform."""
val[ATTR_ENTITY_ID] = expand_entity_ids(self._hass, val[ATTR_ENTITY_ID])
invalid_entities = []
for entity_id in val[ATTR_ENTITY_ID]:
entry = self._ent_reg.async_get(entity_id)
if entry is None or entry.platform != const.DOMAIN:
const.LOGGER.info(
"Entity %s is not a valid %s entity.", entity_id, const.DOMAIN
)
invalid_entities.append(entity_id)
# Remove invalid entities
val[ATTR_ENTITY_ID] = list(set(val[ATTR_ENTITY_ID]) - set(invalid_entities))
if not val[ATTR_ENTITY_ID]:
raise vol.Invalid(f"No {const.DOMAIN} entities found in service call")
return val
self._hass.services.async_register(
const.DOMAIN,
const.SERVICE_SET_CONFIG_PARAMETER,
self.async_set_config_parameter,
schema=vol.Schema(
vol.All(
{
vol.Optional(ATTR_AREA_ID): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(ATTR_DEVICE_ID): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(const.ATTR_CONFIG_PARAMETER): vol.Any(
vol.Coerce(int), cv.string
),
vol.Optional(const.ATTR_CONFIG_PARAMETER_BITMASK): vol.Any(
vol.Coerce(int), BITMASK_SCHEMA
),
vol.Required(const.ATTR_CONFIG_VALUE): vol.Any(
vol.Coerce(int), BITMASK_SCHEMA, cv.string
),
},
cv.has_at_least_one_key(
ATTR_DEVICE_ID, ATTR_ENTITY_ID, ATTR_AREA_ID
),
parameter_name_does_not_need_bitmask,
get_nodes_from_service_data,
has_at_least_one_node,
),
),
)
self._hass.services.async_register(
const.DOMAIN,
const.SERVICE_BULK_SET_PARTIAL_CONFIG_PARAMETERS,
self.async_bulk_set_partial_config_parameters,
schema=vol.Schema(
vol.All(
{
vol.Optional(ATTR_AREA_ID): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(ATTR_DEVICE_ID): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(const.ATTR_CONFIG_PARAMETER): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_VALUE): vol.Any(
vol.Coerce(int),
{
vol.Any(
vol.Coerce(int), BITMASK_SCHEMA, cv.string
): vol.Any(vol.Coerce(int), BITMASK_SCHEMA, cv.string)
},
),
},
cv.has_at_least_one_key(
ATTR_DEVICE_ID, ATTR_ENTITY_ID, ATTR_AREA_ID
),
get_nodes_from_service_data,
has_at_least_one_node,
),
),
)
self._hass.services.async_register(
const.DOMAIN,
const.SERVICE_REFRESH_VALUE,
self.async_poll_value,
schema=vol.Schema(
vol.All(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(
const.ATTR_REFRESH_ALL_VALUES, default=False
): cv.boolean,
},
validate_entities,
)
),
)
self._hass.services.async_register(
const.DOMAIN,
const.SERVICE_SET_VALUE,
self.async_set_value,
schema=vol.Schema(
vol.All(
{
vol.Optional(ATTR_AREA_ID): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(ATTR_DEVICE_ID): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(const.ATTR_COMMAND_CLASS): vol.Coerce(int),
vol.Required(const.ATTR_PROPERTY): vol.Any(
vol.Coerce(int), str
),
vol.Optional(const.ATTR_PROPERTY_KEY): vol.Any(
vol.Coerce(int), str
),
vol.Optional(const.ATTR_ENDPOINT): vol.Coerce(int),
vol.Required(const.ATTR_VALUE): VALUE_SCHEMA,
vol.Optional(const.ATTR_WAIT_FOR_RESULT): cv.boolean,
vol.Optional(const.ATTR_OPTIONS): {cv.string: VALUE_SCHEMA},
},
cv.has_at_least_one_key(
ATTR_DEVICE_ID, ATTR_ENTITY_ID, ATTR_AREA_ID
),
get_nodes_from_service_data,
has_at_least_one_node,
),
),
)
self._hass.services.async_register(
const.DOMAIN,
const.SERVICE_MULTICAST_SET_VALUE,
self.async_multicast_set_value,
schema=vol.Schema(
vol.All(
{
vol.Optional(ATTR_AREA_ID): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(ATTR_DEVICE_ID): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(const.ATTR_BROADCAST, default=False): cv.boolean,
vol.Required(const.ATTR_COMMAND_CLASS): vol.Coerce(int),
vol.Required(const.ATTR_PROPERTY): vol.Any(
vol.Coerce(int), str
),
vol.Optional(const.ATTR_PROPERTY_KEY): vol.Any(
vol.Coerce(int), str
),
vol.Optional(const.ATTR_ENDPOINT): vol.Coerce(int),
vol.Required(const.ATTR_VALUE): VALUE_SCHEMA,
vol.Optional(const.ATTR_OPTIONS): {cv.string: VALUE_SCHEMA},
},
vol.Any(
cv.has_at_least_one_key(
ATTR_DEVICE_ID, ATTR_ENTITY_ID, ATTR_AREA_ID
),
broadcast_command,
),
get_nodes_from_service_data,
validate_multicast_nodes,
),
),
)
self._hass.services.async_register(
const.DOMAIN,
const.SERVICE_PING,
self.async_ping,
schema=vol.Schema(
vol.All(
{
vol.Optional(ATTR_AREA_ID): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(ATTR_DEVICE_ID): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
},
cv.has_at_least_one_key(
ATTR_DEVICE_ID, ATTR_ENTITY_ID, ATTR_AREA_ID
),
get_nodes_from_service_data,
has_at_least_one_node,
),
),
)
async def async_set_config_parameter(self, service: ServiceCall) -> None:
"""Set a config value on a node."""
nodes = service.data[const.ATTR_NODES]
property_or_property_name = service.data[const.ATTR_CONFIG_PARAMETER]
property_key = service.data.get(const.ATTR_CONFIG_PARAMETER_BITMASK)
new_value = service.data[const.ATTR_CONFIG_VALUE]
for node in nodes:
zwave_value, cmd_status = await async_set_config_parameter(
node,
new_value,
property_or_property_name,
property_key=property_key,
)
if cmd_status == CommandStatus.ACCEPTED:
msg = "Set configuration parameter %s on Node %s with value %s"
else:
msg = (
"Added command to queue to set configuration parameter %s on Node "
"%s with value %s. Parameter will be set when the device wakes up"
)
_LOGGER.info(msg, zwave_value, node, new_value)
async def async_bulk_set_partial_config_parameters(
self, service: ServiceCall
) -> None:
"""Bulk set multiple partial config values on a node."""
nodes = service.data[const.ATTR_NODES]
property_ = service.data[const.ATTR_CONFIG_PARAMETER]
new_value = service.data[const.ATTR_CONFIG_VALUE]
for node in nodes:
cmd_status = await async_bulk_set_partial_config_parameters(
node,
property_,
new_value,
)
if cmd_status == CommandStatus.ACCEPTED:
msg = "Bulk set partials for configuration parameter %s on Node %s"
else:
msg = (
"Added command to queue to bulk set partials for configuration "
"parameter %s on Node %s"
)
_LOGGER.info(msg, property_, node)
async def async_poll_value(self, service: ServiceCall) -> None:
"""Poll value on a node."""
for entity_id in service.data[ATTR_ENTITY_ID]:
entry = self._ent_reg.async_get(entity_id)
assert entry # Schema validation would have failed if we can't do this
async_dispatcher_send(
self._hass,
f"{const.DOMAIN}_{entry.unique_id}_poll_value",
service.data[const.ATTR_REFRESH_ALL_VALUES],
)
async def async_set_value(self, service: ServiceCall) -> None:
"""Set a value on a node."""
nodes: set[ZwaveNode] = service.data[const.ATTR_NODES]
command_class = service.data[const.ATTR_COMMAND_CLASS]
property_ = service.data[const.ATTR_PROPERTY]
property_key = service.data.get(const.ATTR_PROPERTY_KEY)
endpoint = service.data.get(const.ATTR_ENDPOINT)
new_value = service.data[const.ATTR_VALUE]
wait_for_result = service.data.get(const.ATTR_WAIT_FOR_RESULT)
options = service.data.get(const.ATTR_OPTIONS)
for node in nodes:
value_id = get_value_id(
node,
command_class,
property_,
endpoint=endpoint,
property_key=property_key,
)
# If value has a string type but the new value is not a string, we need to
# convert it to one. We use new variable `new_value_` to convert the data
# so we can preserve the original `new_value` for every node.
if (
value_id in node.values
and node.values[value_id].metadata.type == "string"
and not isinstance(new_value, str)
):
new_value_ = str(new_value)
else:
new_value_ = new_value
success = await node.async_set_value(
value_id,
new_value_,
options=options,
wait_for_result=wait_for_result,
)
if success is False:
raise SetValueFailed(
"Unable to set value, refer to "
"https://zwave-js.github.io/node-zwave-js/#/api/node?id=setvalue "
"for possible reasons"
)
async def async_multicast_set_value(self, service: ServiceCall) -> None:
"""Set a value via multicast to multiple nodes."""
nodes = service.data[const.ATTR_NODES]
broadcast: bool = service.data[const.ATTR_BROADCAST]
options = service.data.get(const.ATTR_OPTIONS)
if not broadcast and len(nodes) == 1:
const.LOGGER.info(
"Passing the zwave_js.multicast_set_value service call to the "
"zwave_js.set_value service since only one node was targeted"
)
await self.async_set_value(service)
return
command_class = service.data[const.ATTR_COMMAND_CLASS]
property_ = service.data[const.ATTR_PROPERTY]
property_key = service.data.get(const.ATTR_PROPERTY_KEY)
endpoint = service.data.get(const.ATTR_ENDPOINT)
value = {
"commandClass": command_class,
"property": property_,
"propertyKey": property_key,
"endpoint": endpoint,
}
new_value = service.data[const.ATTR_VALUE]
# If there are no nodes, we can assume there is only one config entry due to
# schema validation and can use that to get the client, otherwise we can just
# get the client from the node.
client: ZwaveClient = None
first_node: ZwaveNode = next((node for node in nodes), None)
if first_node:
client = first_node.client
else:
entry_id = self._hass.config_entries.async_entries(const.DOMAIN)[0].entry_id
client = self._hass.data[const.DOMAIN][entry_id][const.DATA_CLIENT]
first_node = next(
node
for node in client.driver.controller.nodes.values()
if get_value_id(node, command_class, property_, endpoint, property_key)
in node.values
)
# If value has a string type but the new value is not a string, we need to
# convert it to one
value_id = get_value_id(
first_node, command_class, property_, endpoint, property_key
)
if (
value_id in first_node.values
and first_node.values[value_id].metadata.type == "string"
and not isinstance(new_value, str)
):
new_value = str(new_value)
success = await async_multicast_set_value(
client=client,
new_value=new_value,
value_data={k: v for k, v in value.items() if v is not None},
nodes=None if broadcast else list(nodes),
options=options,
)
if success is False:
raise SetValueFailed("Unable to set value via multicast")
async def async_ping(self, service: ServiceCall) -> None:
"""Ping node(s)."""
const.LOGGER.warning(
"This service is deprecated in favor of the ping button entity. Service "
"calls will still work for now but the service will be removed in a "
"future release"
)
nodes: set[ZwaveNode] = service.data[const.ATTR_NODES]
await asyncio.gather(*(node.async_ping() for node in nodes))
``` |
{
"source": "jheddings/irc",
"score": 3
} |
#### File: irc/irc/connection.py
```python
from __future__ import absolute_import
import socket
def identity(x):
return x
class Factory:
"""
A class for creating custom socket connections.
To create a simple connection:
.. code-block:: python
server_address = ('localhost', 80)
Factory()(server_address)
To create an SSL connection:
.. code-block:: python
Factory(wrapper=ssl.wrap_socket)(server_address)
To create an SSL connection with parameters to wrap_socket:
.. code-block:: python
wrapper = functools.partial(ssl.wrap_socket, ssl_cert=get_cert())
Factory(wrapper=wrapper)(server_address)
To create an IPv6 connection:
.. code-block:: python
Factory(ipv6=True)(server_address)
Note that Factory doesn't save the state of the socket itself. The
caller must do that, as necessary. As a result, the Factory may be
re-used to create new connections with the same settings.
"""
family = socket.AF_INET
def __init__(self, bind_address=None, wrapper=identity, ipv6=False):
self.bind_address = bind_address
self.wrapper = wrapper
if ipv6:
self.family = socket.AF_INET6
def connect(self, server_address):
sock = self.wrapper(socket.socket(self.family, socket.SOCK_STREAM))
self.bind_address and sock.bind(self.bind_address)
sock.connect(server_address)
return sock
__call__ = connect
class AioFactory:
"""
A class for creating async custom socket connections.
To create a simple connection:
.. code-block:: python
server_address = ('localhost', 80)
Factory()(protocol_instance, server_address)
To create an SSL connection:
.. code-block:: python
Factory(ssl=True)(protocol_instance, server_address)
To create an IPv6 connection:
.. code-block:: python
Factory(ipv6=True)(protocol_instance, server_address)
Note that Factory doesn't save the state of the socket itself. The
caller must do that, as necessary. As a result, the Factory may be
re-used to create new connections with the same settings.
"""
def __init__(self, **kwargs):
self.connection_args = kwargs
def connect(self, protocol_instance, server_address):
return protocol_instance.loop.create_connection(
lambda: protocol_instance, *server_address, **self.connection_args
)
__call__ = connect
``` |
{
"source": "jheddings/notes2notion",
"score": 2
} |
#### File: jheddings/notes2notion/apple.py
```python
import re
import logging
import yaml
try:
from yaml import CLoader as YamlLoader
except ImportError:
from yaml import Loader as YamlLoader
################################################################################
def tell_app(app, *args):
import applescript
script = "\n".join(args)
res = applescript.tell.app(app, script)
if res.code != 0:
print(f'!! ERROR - {res.err}')
return None
# do some basic string to type mapping...
if res.out == 'null': return None
if res.out == 'false': return False
if res.out == 'true': return True
if len(res.out) == 0: return None
return res.out
################################################################################
def tell_notes(*args):
return tell_app('Notes', *args)
################################################################################
class Notes(object):
#---------------------------------------------------------------------------
def __init__(self):
self.logger = logging.getLogger('notes2notion.apple.Notes')
#---------------------------------------------------------------------------
def __iter__(self):
note_ids = self.get_all_ids()
self.logger.debug('starting iterator: %d notes', len(note_ids))
return Notes.Iterator(self, note_ids)
#---------------------------------------------------------------------------
def __getitem__(self, note_id):
return self.get(note_id)
#---------------------------------------------------------------------------
def get_all_ids(self):
# the 'notes' object serializes as a list of Core Data URL's...
notes_raw = tell_notes('return notes of default account')
notes = re.split(r', *', notes_raw)
self.logger.debug('parsing %d links', len(notes))
# extract the full core data identifier; the strings from our list
# contain extra identifiers at the beginning (note id x-coredata...)
note_ids = [
re.sub(r'^.*(x-coredata://.*/p[0-9]+)', r'\1', x) for x in notes
]
return note_ids
#---------------------------------------------------------------------------
def delete(self, note_id):
self.logger.debug('deleting note: %s', note_id)
# FIXME this isn't working...
tell_notes(
'repeat with theNote in notes of default account',
'set noteID to id of theNote as string',
f'if noteID is equal to "{note_id}" then',
'delete theNote'
'end if',
'end repeat'
)
#---------------------------------------------------------------------------
def get(self, note_id):
self.logger.debug('loading note: %s', note_id)
# to get the data from Notes, we will get a dump from AppleScript
# as YAML that we can turn back into a Python object
text = tell_notes(
# there is no direct way to get a note from AppleScript using the ID...
# so we have to loop over all notes and look for the right one.
# on large databases, this takes a VERY long time for some reason
'repeat with theNote in notes of default account',
'set noteID to id of theNote as string',
f'if noteID is equal to "{note_id}" then',
# determine the the Notes folder
# TODO get the full folder path
'set folderName to ""',
'set theContainer to container of theNote',
'if theContainer is not missing value',
'set folderName to "/" & (name of theContainer)',
'end if',
# "export" the note data when we find it...
'set noteMeta to "meta:" ¬',
' & "\n id: " & quoted form of (id of theNote as string) ¬',
' & "\n name: " & quoted form of (name of theNote as string) ¬',
' & "\n folder: " & quoted form of folderName ¬',
' & "\n creation_date: " & quoted form of (creation date of theNote as string) ¬',
' & "\n modification_date: " & quoted form of (modification date of theNote as string) ¬',
' & "\n locked: " & (password protected of theNote as boolean) ¬',
' & "\n shared: " & (shared of theNote as boolean) ¬',
' & "\nattachments:"',
'repeat with theAttachment in attachments of theNote',
'set noteMeta to noteMeta & "\n - id: " & (id of theAttachment as string) ¬',
' & "\n name: " & quoted form of (name of theAttachment as string) ¬',
' & "\n ref: " & quoted form of (content identifier of theAttachment as string) ¬',
' & "\n creation_date: " & quoted form of (creation date of theAttachment as string) ¬',
' & "\n modification_date: " & quoted form of (modification date of theAttachment as string) ¬',
' & "\n url: " & (url of theAttachment)',
'end repeat',
'return noteMeta & "\n---\n" & (body of theNote as string)',
'end if',
'end repeat'
)
# DEBUG - print raw text output from AppleScript
#print(text)
# bail if nothing came out...
if text is None:
self.logger.debug('Note is empty: %s', note_id)
return None
self.logger.debug('parsing %d bytes from export', len(text))
# parse the output from AppleScript into a Python object...
(text_meta, text_body) = text.split('---', maxsplit=1)
# adjust the metadata to account for `quoted form of`
text_meta = text_meta.replace("'\\''", "''")
try:
note = yaml.load(text_meta, Loader=YamlLoader)
except yaml.YAMLError as e:
self.logger.error('YAMLError - %s', e)
return None
note['body'] = text_body.strip()
self.logger.debug('loaded note - %s', note['meta']['name'])
# DEBUG - print yaml structure from parsed note
#print(yaml.dump(note))
return note
#---------------------------------------------------------------------------
class Iterator(object):
outer = None
iter_idx = None
#-----------------------------------------------------------------------
def __init__(self, outer, note_ids):
self.note_ids = note_ids
self.iter_idx = 0
self.outer = outer
self.logger = logging.getLogger('notes2notion.apple.Notes.Iterator')
#-----------------------------------------------------------------------
def __next__(self):
self.logger.debug('load next note - cursor: %d', self.iter_idx)
# make sure we were properly initialized
if self.iter_idx is None or self.note_ids is None:
raise ValueError
# make sure the next index is in bounds
if self.iter_idx < 0 or self.iter_idx >= len(self.note_ids):
raise StopIteration
note_id = self.note_ids[self.iter_idx]
self.logger.debug('next note ID: %s', note_id)
# set up for next call...
self.iter_idx += 1
return self.outer.get(note_id)
```
#### File: jheddings/notes2notion/builder.py
```python
import re
import logging
import yaml
from notion.block import PageBlock, TextBlock, CodeBlock, ImageBlock
from notion.block import HeaderBlock, SubheaderBlock, SubsubheaderBlock
from notion.block import BulletedListBlock, NumberedListBlock
from notion.block import CollectionViewBlock, DividerBlock, QuoteBlock
from bs4 import BeautifulSoup
try:
from yaml import CLoader as YamlLoader
except ImportError:
from yaml import Loader as YamlLoader
# this maps the source HTML element from Notes to a Notion block type
block_map = {
'h1' : HeaderBlock,
'h2' : SubheaderBlock,
'h3' : SubsubheaderBlock,
'tt' : CodeBlock,
'ul' : BulletedListBlock,
'ol' : NumberedListBlock
}
# parse embedded image data
img_data_re = re.compile('^data:image/([^;]+);([^,]+),(.+)$')
################################################################################
# Notion supports inline markdown for common formatting...
def markup_text(tag, text):
# bold text
if tag == 'b' or tag == 'strong':
return '**' + text + '**'
# italics
elif tag == 'i' or tag == 'em':
return '*' + text + '*'
# strike-through text
elif tag == 'strike':
return '~~' + text + '~~'
# standard links
elif tag == 'a':
return '<' + text + '>'
# underline - not supported in markdown
#elif tag == 'u':
return text
################################################################################
def get_block_text(block):
# no-name blocks are just strings...
if block.name is None:
return str(block)
# otherwise, iterate over the text in the child elements
# we could use this method to do additional processing on the text
# e.g. we could look for things that look like URL's and make links
# e.g. we could look for lines that start with '-' and make lists
strings = list()
for child in block.children:
string = get_block_text(child)
if string is None: continue
if len(string) == 0: continue
strings.append(string.strip())
text = ' '.join(strings)
return markup_text(block.name, text)
################################################################################
def build_schema(thead):
schema = dict()
for idx, td in enumerate(thead):
col_id = f'c{idx}'
col_schema = {
'name' : td,
'type' : 'text'
}
# treat the first column differently
if idx == 0:
col_id = 'title'
col_schema['type'] = 'title'
schema[col_id] = col_schema
return schema
################################################################################
class PageArchive(object):
#---------------------------------------------------------------------------
def __init__(self, archive):
self.archive = archive
self.logger = logging.getLogger('notes2notion.builder.PageArchive')
self.logger.debug('archive ready - %s', archive.title)
#---------------------------------------------------------------------------
def store(self, note):
note_meta = note['meta']
note_name = note_meta['name']
self.logger.debug('creating page - %s', note_name)
# TODO support the folder heirarchy from the note metadata
page = self.archive.children.add_new(PageBlock, title=note_name)
self.logger.debug('page => %s', page.id)
builder = PageBuilder(page)
builder.construct(note)
return page
################################################################################
class PageBuilder(object):
# TODO make configurable
skip_title = True
include_meta = True
include_html = False
upload_attachments = True
#---------------------------------------------------------------------------
def __init__(self, page):
self.page = page
self.logger = logging.getLogger('notes2notion.builder.PageBuilder')
self.logger.debug('builder ready - %s', page.title)
#---------------------------------------------------------------------------
def get_url(self):
if self.page is None:
return None
return self.page.get_browseable_url()
#---------------------------------------------------------------------------
def construct(self, note):
note_meta = note['meta']
self.logger.debug('starting construction - %s', note_meta['id'])
self.append_html(note['body'])
if note['attachments']:
self.logger.debug('processing attachments...')
self.page.children.add_new(DividerBlock)
self.page.children.add_new(SubheaderBlock, title='Attachments')
for attachment in note['attachments']:
self.logger.debug('attachment[%s] => %s', attachment['id'], attachment['name'])
# TODO upload attachments
if self.include_meta:
meta_text = yaml.dump(attachment)
self.page.children.add_new(CodeBlock, title=meta_text, language='yaml')
if self.include_meta or self.include_html:
self.page.children.add_new(DividerBlock)
if self.include_meta:
self.logger.debug('adding metadata to page...')
meta_text = yaml.dump(note_meta)
self.page.children.add_new(CodeBlock, title=meta_text, language='yaml')
if self.include_html:
self.logger.debug('appending raw HTML...')
self.page.children.add_new(CodeBlock, title=html, language='html')
self.logger.debug('finished construction - %s', note_meta['id'])
#---------------------------------------------------------------------------
def append_html(self, html):
self.logger.debug('importing HTML (%d bytes)', len(html))
soup = BeautifulSoup(html, 'html.parser')
# Apple Notes exports pretty basic HTML...
# there is no html, head or body wrapper.
for elem in soup.children:
if elem.name is None: continue
self.logger.debug('append block: %s', elem.name)
# let append_* methods do the heavy lifting
if elem.name == 'div':
self.append_block(elem)
# handle lists separately
elif elem.name == 'ul' or elem.name == 'ol':
self.append_list(elem)
else:
self.logger.warning('Unknown Block: %s', elem.name)
#---------------------------------------------------------------------------
def append_block(self, elem):
if elem is None: return None
# collect blocks that are not directly mapped
pending_blocks = list()
for child in elem.children:
self.logger.debug('processing child - %s', child.name)
# skip empty line breaks
if child.name == 'br':
self.logger.debug('skipping line break')
continue
# if this is the first h1 child on the page, assume it is the title
elif child.name == 'h1' and len(self.page.children) == 0:
self.logger.debug('skipping title element')
continue
# handle images (may be more than one per block)
elif child.name == 'img':
self.append_img(child)
# handle objects (like tables)
elif child.name == 'object':
self.append_object(child)
# look for known block mappings...
elif child.name in block_map:
self.append_text(child)
# track text from remaining blocks
else:
text = get_block_text(child)
self.logger.debug('pending block [%d]: "%s..."',
len(pending_blocks), text[:7])
pending_blocks.append(text)
# deal with pending blocks (if we found any)
self.logger.debug('block complete; %d pending block(s)', len(pending_blocks))
if len(pending_blocks) > 0:
text = ' '.join(pending_blocks)
self.page.children.add_new(TextBlock, title=text)
#---------------------------------------------------------------------------
def append_text(self, elem):
block_type = block_map.get(elem.name, TextBlock)
text = get_block_text(elem)
if text is None or len(text) == 0:
self.logger.debug('empty text block; skipping')
return
self.logger.debug('mapped to Notion block: %s => "%s..."', block_type, text[:7])
block = self.page.children.add_new(block_type, title=text)
self.logger.debug('block => %s', block.id)
#---------------------------------------------------------------------------
def append_list(self, list_elem):
block_type = block_map.get(list_elem.name, None)
self.logger.debug('building Notion list: %s', block_type)
if block_type is None:
self.logger.warning('Unknown list type: %s', block_type)
return
for li in list_elem.find_all('li', recursive=False):
text = get_block_text(li)
self.logger.debug('adding list item: "%s..."', text[:7])
self.page.children.add_new(block_type, title=text)
#---------------------------------------------------------------------------
def append_img(self, img_elem):
import base64
import tempfile
self.logger.debug('processing image')
# Notes uses embedded images... we need to extract the image, upload it
# and reference it in the block
# TODO this probably needs more error handling and better flow
img_src = img_elem['src']
m = img_data_re.match(img_src)
if m is None:
self.logger.warning('Unsupported image in note')
return
img_type = m.groups()[0]
img_data_enc = m.groups()[1]
img_data_str = m.groups()[2]
self.logger.debug('found embedded image: %s [%s]', img_type, img_data_enc)
if img_data_enc == 'base64':
self.logger.debug('decoding base64 image: %d bytes', len(img_data_str))
img_data_b64 = img_data_str.encode('ascii')
img_data = base64.b64decode(img_data_b64)
else:
self.logger.warning('Unsupported img encoding: %s', img_data_enc)
return
self.logger.debug('preparing %d bytes for image upload', len(img_data))
with tempfile.NamedTemporaryFile(suffix=f'.{img_type}') as fp:
self.logger.debug('using temporary file: %s', fp.name)
fp.write(img_data)
# upload the image to Notion
block = self.page.children.add_new(ImageBlock)
try:
block.upload_file(fp.name)
except Exception:
self.logger.error('UPLOAD FAILED')
#---------------------------------------------------------------------------
def append_object(self, elem):
self.logger.debug('processing object')
for child in elem.children:
if child.name == 'table':
self.append_table(child)
else:
self.logger.warning('Unsupported object: %s', block.name)
#---------------------------------------------------------------------------
def append_table(self, table):
self.logger.debug('building table')
# XXX it would make more sense if Notion supported basic markdown tables
# instead, we have to build a collection view to capture the table data
block = self.page.children.add_new(CollectionViewBlock)
self.logger.debug('table => %s', block.id)
# does Apple ever set a header? I don't think so...
# XXX maybe we want a flag to use the first table row as a header or not?
thead = None
tbody = table.find('tbody')
for tr in tbody.find_all('tr', recursive=False):
# if no header was provided, we will build it from this row...
if thead is None:
self.logger.debug('initializing header')
thead = list()
# if we have a header, but no Collection (yet)
elif block.collection is None:
schema = build_schema(thead)
self.logger.debug('initializing schema: %s', schema)
# XXX directly accessing _client here is a bit of a hack...
client = self.page._client
block.collection = client.get_collection(
client.create_record('collection', parent=block, schema=schema)
)
# we need a new view to see our lovely table...
block.views.add_new(view_type='table')
# if we have a valid collection, add data directly to rows
row = None if block.collection is None else block.collection.add_row()
# start processing the column data...
tds = tr.find_all('td', recursive=False)
for idx, td in enumerate(tds):
text = get_block_text(td)
if text is None: continue
col_id = 'title' if idx == 0 else f'c{idx}'
self.logger.debug('table data: %s => "%s..."', col_id, text[:7])
if block.collection is None:
thead.append(text)
if row is not None:
row.set_property(col_id, text)
``` |
{
"source": "jheddings/notional",
"score": 3
} |
#### File: notional/notional/records.py
```python
import logging
from datetime import datetime
from typing import Dict, List, Optional, Union
from uuid import UUID
from .core import DataObject, NamedObject
from .schema import PropertyObject
from .text import plain_text
from .types import EmojiObject, FileObject, PropertyValue, RichTextObject
log = logging.getLogger(__name__)
class ParentRef(DataObject):
"""Reference another block."""
# XXX Notion does not handle parent references consistently in the API...
# in some cases, the `type` is accepted and in others it is not. eventually
# these should all by TypedObject's with the appropriate fields
@classmethod
def from_record(cls, record):
"""Return the correct parent ID based on the object type."""
if isinstance(record, ParentRef):
return record
elif isinstance(record, Page):
return PageParent(page_id=record.id)
elif isinstance(record, Database):
return DatabaseParent(database_id=record.id)
raise ValueError("Unrecognized 'parent' attribute")
class DatabaseParent(ParentRef):
"""Reference a database."""
database_id: UUID
class PageParent(ParentRef):
"""Reference a page."""
page_id: UUID
class WorkspaceParent(ParentRef):
"""Reference the workspace."""
workspace: bool = True
class Record(NamedObject):
"""The base type for Notion API records."""
id: UUID = None
created_time: datetime = None
last_edited_time: datetime = None
has_children: bool = False
archived: bool = False
class Database(Record, object="database"):
"""A database record type."""
title: List[RichTextObject] = None
url: str = None
parent: ParentRef = None
icon: Optional[Union[FileObject, EmojiObject]] = None
cover: Optional[FileObject] = None
properties: Dict[str, PropertyObject] = {}
@property
def Title(self):
if self.title is None or len(self.title) == 0:
return None
return plain_text(*self.title)
class Page(Record, object="page"):
"""A standard Notion page object."""
url: str = None
parent: ParentRef = None
icon: Optional[Union[FileObject, EmojiObject]] = None
cover: Optional[FileObject] = None
properties: Dict[str, PropertyValue] = {}
def __getitem__(self, name):
"""Indexer for the given property name.
:param name: the name of the property to get
"""
log.debug("get property :: {%s} [%s]", self.id, name)
if self.properties is None:
raise AttributeError("No properties in Page")
prop = self.properties.get(name)
if prop is None:
raise AttributeError(f"No such property: {name}")
return prop
def __setitem__(self, name, value):
"""Set the object data for the given property.
:param name: the name of the property to set
:param prop: the PropertyValue for the named property
"""
log.debug("set property :: {%s} [%s] => %s", self.id, name, value)
if value is None:
self.properties.pop(name, None)
elif not isinstance(value, PropertyValue):
raise ValueError(f"Unable to set {name} :: unsupported value type")
else:
self.properties[name] = value
@property
def Title(self):
if self.properties is None or len(self.properties) == 0:
return None
for prop in self.properties.values():
if prop.id == "title":
return prop.Value or None
return None
``` |
{
"source": "j-hedtke/torchxrayvision",
"score": 2
} |
#### File: torchxrayvision/tests/test_dataloaders.py
```python
import pytest
import torchxrayvision as xrv
def test_dataloader_basic():
xrv.datasets.NIH_Dataset(imgpath=".", views=["PA"])
``` |
{
"source": "jhee514/wouldYouCi",
"score": 2
} |
#### File: wouldyouci_back/cinemas/views.py
```python
from django.shortcuts import get_object_or_404
from rest_framework.decorators import api_view, permission_classes
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework import viewsets
from datetime import date, datetime
from movies.models import Onscreen
from movies.serializers import OnscreenSerializer, CinemaSerializer
from accounts.models import CinemaRating
from accounts.serializers import CinemaRatingSerializer
from accounts.serializers import SimpleCinemaRatingSerializer
from .models import Cinema
from .serializers import SimpleCinemaSerializer
from django.contrib.auth import get_user_model
User = get_user_model()
@api_view(['GET'])
@permission_classes([AllowAny])
def get_cinema_width(request):
x1 = float(request.query_params.get('x1'))
x2 = float(request.query_params.get('x2'))
y1 = float(request.query_params.get('y1'))
y2 = float(request.query_params.get('y2'))
if not x1 or not x2 or not y1 or not y2:
return Response(status=400, data={'message': 'x, y 값은 필수입니다.'})
cinemas = Cinema.objects.filter(y__gte=y1,
y__lte=y2,
x__gte=x1,
x__lte=x2
)
serializer = SimpleCinemaSerializer(cinemas, many=True)
dataset = {
'meta': {
'total': cinemas.count()
},
'documents': serializer.data
}
return Response(status=200, data=dataset)
@api_view(['GET'])
@permission_classes([AllowAny])
def get_fast_movie(request, cinema_id):
start_time = request.query_params.get('start_time')
start_time = start_time if start_time else datetime.now().time()
onscreen = Onscreen.objects.filter(cinema=cinema_id,
date=date.today(),
start_time__gte=start_time)
serializer = OnscreenSerializer(onscreen, many=True)
dataset = {
'meta': {
'total': onscreen.count()
},
'documents': serializer.data
}
return Response(status=200, data=dataset)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def cinema_detail(request, cinema_id):
user = request.user
cinema = get_object_or_404(Cinema, id=cinema_id)
serializer = CinemaSerializer(cinema)
has_score = user.cinema_ratings.filter(cinema=cinema_id).exists()
pick_cinemas = user.pick_cinemas.filter(id=cinema_id).exists()
dataset = {
'has_score': has_score,
'pick_cinemas': pick_cinemas,
}
dataset.update(serializer.data)
return Response(status=200, data=dataset)
@api_view(['PATCH'])
@permission_classes([IsAuthenticated])
def pick_cinema(request, cinema_id):
user = request.user
cinema = get_object_or_404(Cinema, id=cinema_id)
if cinema.pick_users.filter(id=user.id).exists():
cinema.pick_users.remove(user)
return Response(status=200, data={"pick_cinemas": False})
else:
cinema.pick_users.add(user)
return Response(status=200, data={"pick_cinemas": True})
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def create_cinema_rating(request):
user = request.user
if user.cinema_ratings.filter(cinema=request.data['cinema']).exists():
return Response(status=403, data={'message': '이미 평가한 영화관입니다.'})
serializer = CinemaRatingSerializer(data=request.data)
if serializer.is_valid():
new_rating = serializer.save(user=user)
cinema = new_rating.cinema
ratings_count = cinema.cinema_ratings.count()
cinema_rating = cinema.score * (ratings_count - 1)
cinema_rating = (cinema_rating + new_rating.score) / ratings_count
cinema.score = cinema_rating
cinema.save()
new_rating_serializer = SimpleCinemaRatingSerializer(new_rating)
return Response(new_rating_serializer.data)
return Response(status=400, data=serializer.errors)
@api_view(['PATCH', 'DELETE'])
@permission_classes([IsAuthenticated])
def patch_delete_cinema_rating(request, rating_id):
rating = get_object_or_404(CinemaRating, id=rating_id)
origin_score = rating.score
cinema = rating.cinema
ratings_count = cinema.cinema_ratings.count()
cinema_rating = cinema.score * ratings_count - origin_score
if rating.user.id == request.user.id:
if request.method == 'PATCH':
serializer = CinemaRatingSerializer(instance=rating, data=request.data)
if serializer.is_valid():
update_rating = serializer.save()
cinema_rating = (cinema_rating + update_rating.score) / ratings_count
cinema.score = cinema_rating
cinema.save()
new_rating_serializer = SimpleCinemaRatingSerializer(update_rating)
return Response(new_rating_serializer.data)
return Response(status=400, data=serializer.errors)
elif request.method == 'DELETE':
rating.delete()
if ratings_count - 1:
cinema_rating = cinema_rating / (ratings_count - 1)
else:
cinema_rating = 0
cinema.score = cinema_rating
cinema.save()
return Response(status=204)
return Response(status=400, data={'message': '권한이 없습니다.'})
class SmallPagination(PageNumberPagination):
page_size = 10
page_size_query_param = "page_size"
max_page_size = 50
@permission_classes([AllowAny])
class RatingViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = SimpleCinemaRatingSerializer
pagination_class = SmallPagination
def get_queryset(self):
cinema_id = self.request.query_params.get('cinema', 0)
cinema = get_object_or_404(Cinema, id=cinema_id)
queryset = (
cinema.cinema_ratings.all()
)
return queryset
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def get_cinema_rating_avg(request, cinema_id):
cinema = get_object_or_404(Cinema, id=cinema_id)
score = cinema.score
if score:
score = round(score, 2)
else:
score = 0
return Response(status=200, data={'score': score})
```
#### File: wouldyouci_back/movies/views.py
```python
from rest_framework import viewsets
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from django.shortcuts import get_object_or_404
from django.core.cache import cache
from accounts.serializers import RatingSerializer, SimpleRatingSerializer
from accounts.models import Rating
from cinemas.serializers import SearchCinemaSerializer
from cinemas.models import Cinema
from .models import Movie
from .serializers import MovieSerializer
from .func import contentsbased_by_genres_and_directors
from django.contrib.auth import get_user_model
User = get_user_model()
class SmallPagination(PageNumberPagination):
page_size = 10
page_size_query_param = "page_size"
max_page_size = 50
class RatingViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = SimpleRatingSerializer
pagination_class = SmallPagination
permission_classes = [IsAuthenticated]
def get_queryset(self):
movie_id = self.request.query_params.get('movie', 0)
movie = get_object_or_404(Movie, id=movie_id)
queryset = (
movie.ratings.all()
)
return queryset
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def movie_detail(request, movie_id):
movie = get_object_or_404(Movie, id=movie_id)
serializer = MovieSerializer(movie)
user = request.user
has_score = user.ratings.filter(movie=movie_id).exists()
pick_movies = user.pick_movies.filter(id=movie_id).exists()
predicted_score = 0
if user.ratings.count() > 9:
predicted_score = contentsbased_by_genres_and_directors(user.id, movie_id)
dataset = {
'has_score': has_score,
'pick_movies': pick_movies,
'is_showing': movie.onscreens.exists(),
'predicted_score': predicted_score,
}
dataset.update(serializer.data)
return Response(status=200, data=dataset)
@api_view(['PATCH'])
@permission_classes([IsAuthenticated])
def pick_movie(request, movie_id):
user = request.user
movie = get_object_or_404(Movie, id=movie_id)
if movie.pick_users.filter(id=user.id).exists():
movie.pick_users.remove(user)
return Response(status=200, data={"pick_movies": False})
else:
movie.pick_users.add(user)
return Response(status=200, data={"pick_movies": True})
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def create_rating(request):
user = request.user
if user.ratings.filter(movie=request.data['movie']).exists():
return Response(status=403, data={'message': '이미 평가한 영화입니다.'})
serializer = RatingSerializer(data=request.data)
if serializer.is_valid():
cache.delete(f'recommend_{user.id}')
new_rating = serializer.save(user=user)
movie = new_rating.movie
ratings_count = movie.ratings.count()
movie_rating = movie.score * (ratings_count - 1)
movie_rating = (movie_rating + new_rating.score) / ratings_count
movie.score = movie_rating
movie.save()
new_rating_serializer = SimpleRatingSerializer(new_rating)
return Response(new_rating_serializer.data)
return Response(status=400, data=serializer.errors)
@api_view(['PATCH', 'DELETE'])
@permission_classes([IsAuthenticated])
def patch_delete_rating(request, rating_id):
rating = get_object_or_404(Rating, id=rating_id)
origin_score = rating.score
movie = rating.movie
ratings_count = movie.ratings.count()
movie_rating = movie.score * ratings_count - origin_score
user_id = request.user.id
if rating.user.id == user_id:
if request.method == 'PATCH':
serializer = RatingSerializer(instance=rating, data=request.data)
if serializer.is_valid():
cache.delete(f'recommend_{user_id}')
update_rating = serializer.save()
movie_rating = (movie_rating + update_rating.score) / ratings_count
movie.score = movie_rating
movie.save()
new_rating_serializer = SimpleRatingSerializer(update_rating)
return Response(new_rating_serializer.data)
return Response(status=400, data=serializer.errors)
elif request.method == 'DELETE':
cache.delete(f'recommend_{user_id}')
rating.delete()
if ratings_count - 1:
movie_rating = movie_rating / (ratings_count - 1)
else:
movie_rating = 0
movie.score = movie_rating
movie.save()
return Response(status=204)
return Response(status=400, data={'message': '권한이 없습니다.'})
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def get_onscreen_cinema(request, movie_id):
movie = get_object_or_404(Movie, id=movie_id)
cinema_id_set = movie.onscreens.values_list('cinema', flat=True).distinct()
cinemas = Cinema.objects.filter(id__in=cinema_id_set)
area = list(cinemas.values_list('area', flat=True).distinct())
serializer = SearchCinemaSerializer(cinemas, many=True)
dataset = {
'area': area,
'data': serializer.data
}
return Response(status=200, data=dataset)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def get_rating_avg(request, movie_id):
movie = get_object_or_404(Movie, id=movie_id)
score = movie.score
if score:
score = round(score, 2)
else:
score = 0
return Response(status=200, data={'score': score})
```
#### File: wouldyouci_database/crawling/match_code.py
```python
from bs4 import BeautifulSoup
import urllib.request
from dotenv import load_dotenv
import json
import requests
import pyperclip
import time
import datetime
import os
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding='utf-8')
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
def copyInput(self, xpath, input_text):
pyperclip.copy(input_text)
driver.find_element_by_xpath(xpath).click()
ActionChains(driver).key_down(Keys.CONTROL).send_keys('v').key_up(Keys.CONTROL).perform()
time.sleep(1)
def getMovieName(tg_str):
end_idx = tg_str.index(']')
res = tg_str[end_idx+2:]
for i in range(len(res)):
if res[i] == '/':
res = res[:i]
break
return res
def deleteGwal(tg_str):
end_idx = tg_str.index(')')
return tg_str[end_idx+1:]
def getCodeFromURL(movie_url):
equal_idx = movie_url.index('=')
movie_code = movie_url[equal_idx+1:]
return movie_code
def getDirectorName(tg_str):
idx = len(tg_str)
for i in range(len(tg_str)):
if tg_str[i] == '|':
idx = i
break
return tg_str[:idx]
def getNaverInfo(movie_name, director_name):
NAVER_CLIENT_ID = os.getenv('NAVER_CLIENT_ID')
NAVER_CLIENT_SECRET = os.getenv('NAVER_CLIENT_SECRET')
NAVER_REQUEST_URL = 'https://openapi.naver.com/v1/search/movie.json?'
header={
"X-Naver-Client-Id":NAVER_CLIENT_ID,
"X-Naver-Client-secret":NAVER_CLIENT_SECRET,
}
req = requests.get(NAVER_REQUEST_URL+"query="+movie_name+"&display=100", headers = header).json()
req_items = req['items']
if req_items:
if director_name:
for item in req_items:
check_dir = getDirectorName(item['director'])
if check_dir == director_name:
return item
return req_items[0]
else:
return False
def naverLogin():
NAVER_ID = os.getenv('NAVER_ID')
NAVER_PW = os.getenv('NAVER_PW')
IDxPath='//*[@id="id"]'
PasswordxPath='//*[@id="pw"]'
ID=NAVER_ID
Password=<PASSWORD>
copyInput(driver, IDxPath, ID)
copyInput(driver, PasswordxPath, Password)
driver.find_element_by_xpath('//*[@value="로그인"]').click()
time.sleep(1)
def getTrailer(title, s_opt):
res = ''
YOUTUBE_KEY = os.getenv('YOUTUBE_KEY')
REQUEST_URL = 'https://www.googleapis.com/youtube/v3/search?'
YOUTUBE_SEARCH = 'https://www.youtube.com/results?'
options = {
'key': YOUTUBE_KEY,
'part': 'id',
'q': title + ' ' + s_opt,
'maxResults': 1,
'type': 'video',
'videoDuration': 'short'
}
search_option = {
'search_query': title + ' ' + s_opt,
}
url_option = urllib.parse.urlencode(options)
SEARCH_URL = REQUEST_URL+url_option
SEARCH_RESULT = json.loads(urllib.request.urlopen(SEARCH_URL).read())
ITEM_LIST = SEARCH_RESULT['items']
if ITEM_LIST:
YOUTUBE_VIDEO_URL = 'https://www.youtube.com/embed/'
for ITEM in ITEM_LIST:
if ITEM['id'].get('videoId'):
youtube_code = ITEM['id']['videoId']
break
res = YOUTUBE_VIDEO_URL + youtube_code
return res
def getMovieDetail(movie_code, movie_info, movie_name):
NAVER_MOVIE_BASE = 'https://movie.naver.com/movie/bi/mi/basic.nhn?code='
NAVER_IMAGE_URL = 'https://movie.naver.com/movie/bi/mi/photoViewPopup.nhn?movieCode='
movie_detail_url = NAVER_MOVIE_BASE + movie_code
driver.get(movie_detail_url)
time.sleep(1)
source = driver.page_source
soup = BeautifulSoup(source, 'html.parser')
login_form = soup.find('form', {'id': 'frmNIDLogin'})
if login_form:
naverLogin()
source = driver.page_source
soup = BeautifulSoup(source, 'html.parser')
new_info = {
'model': 'movies.movie',
'pk': int(movie_code),
}
new_fields = {
"name": movie_name,
"name_eng": makeStringFlat(movie_info['subtitle']),
"watch_grade": "",
"running_time": "",
"summary": "",
"open_date": movie_info['pubDate'],
"trailer": "",
"poster": "",
"directors": [],
"genres": [],
"actors": [],
"score": 0,
}
image_url = NAVER_IMAGE_URL+movie_code
image_html = urllib.request.urlopen(image_url)
image_soup = BeautifulSoup(image_html, 'lxml')
image_tag = image_soup.find('img', {'id': 'targetImage'})
if image_tag:
image_src = image_tag.get('src')
new_fields['poster'] = image_src
elif movie_info['image']:
new_fields['poster'] = tg_movie['image']
else:
new_fields['poster'] = ""
movie_info_dl = soup.find('dl', {'class': 'info_spec'})
if movie_info_dl:
atag_list = movie_info_dl.find_all('a')
for atag in atag_list:
atag_href = atag.get('href')
if atag_href != '#':
key, value = getHrefInfo(atag_href)
if key == 'genre':
new_fields['genres'].append(int(value))
elif key == 'open':
if len(value) > 4:
dash_date = value[:4] + '-' + value[4:6] + '-' + value[6:]
new_fields['open_date'] = dash_date
elif key == 'grade' and not new_fields['watch_grade']:
new_fields['watch_grade'] = atag.text
if getRunTime(movie_info_dl):
new_fields['running_time'] = getRunTime(movie_info_dl)
people_area = soup.find('div', {'class': 'people'})
if people_area:
people_dict = getPeopleInfo(people_area)
for k, v in people_dict.items():
new_fields[k] = v
description = soup.find('div', {'class': 'story_area'})
if description:
new_fields['summary'] = getSummary(description.text)
new_info['trailer'] = getTrailer(movie_name, '예고편')
new_info['fields'] = new_fields
return new_info
def getHrefInfo(tg_href):
question_idx = tg_href.index('?')
equal_idx = tg_href.index('=')
return tg_href[question_idx+1:equal_idx], tg_href[equal_idx+1:]
def getRunTime(tg_area):
span_tags = tg_area.find_all('span')
for span_tag in span_tags:
span_text = span_tag.text
if '분' in span_text:
return span_text
def getPeopleInfo(people_area):
global people_check
global new_people
directors = []
actors = []
if people_area:
people_list = people_area.find_all('li')
for people in people_list:
dt_tag = people.find('dt')
dt_class = dt_tag.get('class')
a_tag = people.find('a', {'class': 'tx_people'})
if a_tag:
a_href = a_tag.get('href')
people_name = a_tag.get('title')
people_code_str = getCodeFromURL(a_href)
people_code = int(people_code_str)
if dt_class[0] == 'staff_dir':
directors.append(people_code)
elif dt_class[0] == 'staff_main':
actors.append(people_code)
if not people_check.get(people_code_str):
people_check[people_code_str] = people_name
new_people.append({
'model': 'movies.people',
'pk': people_code,
'fields': {
'name': people_name
}
})
people_dict = {}
people_dict['directors'] = directors
people_dict['actors'] = actors
return people_dict
def getSummary(desc):
cutpoint = 0
new_desc = desc
for idx in range(len(desc)-3):
if desc[idx:idx+3] == '줄거리':
cutpoint = idx
new_desc = new_desc[cutpoint+5:]
break
double_point = []
for i in range(len(new_desc)-1):
if new_desc[i] == '\n' and new_desc[i+1] == '\n':
double_point.append(i+1)
res = ''
s_point = 0
for idx in double_point:
res += new_desc[s_point:idx]
s_point = idx+1
if s_point < len(new_desc):
res += new_desc[s_point:]
for idx in range(len(res)-4):
if res[idx:idx+4] == '제작노트':
res = res[:idx-1]
break
res = makeStringFlat(res)
return res
def makeStringFlat(tg_str):
res = tg_str
res = res.replace ('\xa0', '')
res = res.replace('\r', '')
res = res.replace('&', '')
return res
def renameCGV(tg_str):
idx = len(tg_str)
for i in range(idx):
if tg_str[i] == '(':
return tg_str[:i]
return tg_str
def getCompanyDetail(tg_dict):
BASE_DICT = {
'CGV': 'http://www.cgv.co.kr/movies/detail-view/?midx=',
'MEGABOX': 'https://www.megabox.co.kr/movie-detail?rpstMovieNo=',
'LOTTE':'https://www.lottecinema.co.kr/NLCHS/Movie/MovieDetailView?movie=',
'YES': 'https://movie.yes24.com/MovieInfo/Index?mId=M'
}
director_name = ''
for company, code in tg_dict.items():
if company == 'CINEQ':
return ''
base_url = BASE_DICT[company]
detail_url = base_url + code
detail_html = urllib.request.urlopen(detail_url)
detail_soup = BeautifulSoup(detail_html, 'lxml')
if company == 'CGV':
info_box = detail_soup.find('div', {'class': 'spec'})
director_name_atag = info_box.find('a')
if director_name_atag:
director_name = director_name_atag.text
elif company == 'MEGABOX':
driver.get(detail_url)
time.sleep(2)
detail_source = driver.page_source
detail_soup = BeautifulSoup(detail_source, 'html.parser')
spec_box = detail_soup.find('div', {'class': 'movie-info infoContent'})
if spec_box:
div_line = spec_box.find('div', {'class': 'line'})
if div_line:
director_name = spec_box.find('p')
if director_name:
director_name = renameMega(director_name.text)
elif company == 'LOTTE':
driver.get(detail_url)
time.sleep(2)
detail_source = driver.page_source
detail_soup = BeautifulSoup(detail_source, 'html.parser')
ul_box = detail_soup.find('ul', {'class': 'sub_info2'})
if ul_box and ul_box.find('em').text == '감독':
director_name = ul_box.find('a').text
elif company == 'YES':
driver.get(detail_url)
time.sleep(2)
detail_source = driver.page_source
detail_soup = BeautifulSoup(detail_source, 'html.parser')
people_list = detail_soup.find_all('div', {'class': 'act_info'})
for people in people_list:
people_job = people.find('p', {'class': 'job'})
if people_job and people_job.text == '감독':
director_name = people.find('p', {'class': 'name dot_st'}).text
break
if director_name:
break
return director_name
def rename(tg_str):
res = tg_str
idx = 0
for i in range(len(res)):
if tg_str[i] == '@':
idx = i
break
if idx:
res = res[:idx]
for i in range(len(res)):
if res[i] == '+':
idx = i
if idx:
res = res[:idx-1]
return res
def renameMega(tg_str):
res = ''
if tg_str[:2] == '감독':
end_idx = len(tg_str)
st_idx = 0
for i in range(len(tg_str)):
if tg_str[i] == ':':
st_idx = i+2
if tg_str[i] == ',':
end_idx = i
break
return tg_str[st_idx:end_idx]
else:
return res
chromedriver_dir=r'C:\Users\multicampus\Downloads\chromedriver\chromedriver.exe'
load_dotenv(verbose=True)
with open('06_complete.json', 'r', encoding='UTF-8') as fr:
complete_list = json.load(fr)
with open('05_people_save.json', 'r', encoding='UTF-8') as fr:
people_save = json.load(fr)
with open('04_peoples_save.json', 'r', encoding='UTF-8') as fr:
people_check = json.load(fr)
new_movie = []
new_people = []
not_found = {
'pk': 1,
'model': 'movies.movie',
'fields': {
"name": "정보 없음",
"name_eng": "404 Not Found",
"watch_grade": "관계자외 출입금지",
"running_time": "",
"summary": "영화 정보를 찾지 못하였습니다.\n",
"open_date": "2019-07-01",
"trailer": "",
"poster": "",
"directors": [],
"genres": [],
"actors": []
}
}
def matchingMovieCode():
global driver
driver = webdriver.Chrome(chromedriver_dir)
with open('07_movie_dict.json', 'r', encoding='UTF-8') as fr:
movie_dict = json.load(fr)
for k, v in movie_dict.items():
if k[0] == '[':
movie_name = getMovieName(k)
elif k[0] == '(':
movie_name = deleteGwal(k)
else:
movie_name = k
director_name = getCompanyDetail(v)
movie_name = rename(movie_name)
naver_info = getNaverInfo(movie_name, director_name)
if naver_info:
naver_code = getCodeFromURL(naver_info['link'])
if complete_list.get(naver_code):
v['NAVER'] = naver_code
else:
new_movie_info = getMovieDetail(naver_code, naver_info, movie_name)
new_movie.append(new_movie_info)
complete_list[naver_code] = movie_name
v['NAVER'] = naver_code
else:
v['NAVER'] = "1"
driver.quit()
new_movie.append(not_found)
with open('08_new_movie.json', 'w', encoding='UTF-8') as fp:
json.dump(new_movie, fp, ensure_ascii=False, indent=4)
with open('08_new_people.json', 'w', encoding='UTF-8') as fp:
json.dump(new_people, fp, ensure_ascii=False, indent=4)
with open('08_movie_match.json', 'w', encoding='UTF-8') as fp:
json.dump(movie_dict, fp, ensure_ascii=False, indent=4)
with open('06_complete.json', 'w', encoding='UTF-8') as fp:
json.dump(complete_list, fp, ensure_ascii=False, indent=4)
with open('04_peoples_save.json', 'w', encoding='UTF-8') as fp:
json.dump(people_check, fp, ensure_ascii=False, indent=4)
```
#### File: wouldyouci_database/recommendation/contents_based_filtering.py
```python
import os
import time
import pymysql
import pandas as pd
from decouple import config
from datetime import datetime
from sklearn.linear_model import Lasso
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform as sp_rand
def contentsbased1(user_id, movie_id, genres_p):
print('======== 전체영화 예상평점 - 장르 ===========')
print('START TIME : ', str(datetime.now())[10:19])
start = time.time()
conn = pymysql.connect(host=config('HOST'), port=3306, user=config('USER'),
password=config('PASSWORD'), db=config('DB'))
sql = 'SELECT * FROM wouldyouci.accounts_rating where user_id=' + str(user_id)
ratings = pd.read_sql_query(sql, conn)
genres = genres_p
conn.close()
user_profile = ratings.merge(genres, left_on='movie_id', right_index=True)
model = Lasso()
param_grid = {'alpha': sp_rand()}
research = RandomizedSearchCV(estimator=model,
param_distributions=param_grid,
n_iter=20,
cv=5,
random_state=406)
research.fit(user_profile[genres.columns], user_profile['score'])
predictions = research.best_estimator_.predict(genres)
genres.reset_index()
genres['predict'] = predictions
predicted_score = genres.at[movie_id, 'predict']
print('END TIME : ', str(datetime.now())[10:19])
end = time.time()
print('TOTAL TIME : ', end-start)
print('PREDICTED SCORE : ', predicted_score)
print()
return pd.DataFrame.to_json(genres['predict'])
def contentsbased2(user_id, movie_id, movies_p):
print('======== 전체 영화 예상평점 - 장르 & 감독 & 배우 ===========')
print('START TIME : ', str(datetime.now())[10:19])
start = time.time()
conn = pymysql.connect(host=config('HOST'), port=3306, user=config('USER'),
password=config('PASSWORD'), db=config('DB'))
sql = 'SELECT * FROM wouldyouci.accounts_rating where user_id=' + str(user_id)
ratings = pd.read_sql_query(sql, conn)
movies = movies_p
conn.close()
ratings = ratings.merge(movies, left_on='movie_id', right_index=True)
x_train, x_test, y_train, y_test = train_test_split(ratings[movies.columns],
ratings['score'],
random_state=406,
test_size=.1)
reg = LinearRegression()
reg.fit(x_train, y_train)
predictions = reg.predict(movies)
movies.reset_index()
movies['predict'] = predictions
print('END TIME : ', str(datetime.now())[10:19])
predicted_score = movies.at[movie_id, 'predict']
end = time.time()
print('TOTAL TIME : ', end-start)
print('PREDICTED SCORE : ', predicted_score)
print()
return pd.DataFrame.to_json(movies['predict'])
def contentsbased3(user_id, movie_id, movies_p):
print('======== 특정 영화 예상평점 - 장르 & 감독 & 배우 ===========')
print('START TIME : ', str(datetime.now())[10:19])
start = time.time()
conn = pymysql.connect(host=config('HOST'), port=3306, user=config('USER'),
password=config('PASSWORD'), db=config('DB'))
sql = 'SELECT * FROM wouldyouci.accounts_rating where user_id=' + str(user_id)
ratings = pd.read_sql_query(sql, conn)
movies = movies_p
conn.close()
ratings = ratings.merge(movies, left_on='movie_id', right_index=True)
train, test = train_test_split(ratings, test_size=0.1, random_state=406)
x_train = train[movies.columns]
y_train = train['score']
reg = Lasso(alpha=0.03)
reg.fit(x_train, y_train)
user_profile = []
user_profile.append([reg.intercept_, *reg.coef_])
user_profile = pd.DataFrame(user_profile,
index=train['user_id'].unique(),
columns=['intercept', *movies.columns])
intercept = user_profile.loc[user_id, 'intercept']
columns_score = sum(user_profile.loc[user_id, movies.columns] * movies.loc[movie_id, movies.columns])
predicted_score = intercept + columns_score
print('END TIME : ', str(datetime.now())[10:19])
end = time.time()
print('TOTAL TIME : ', end-start)
print('PREDICTED SCORE : ', predicted_score)
print()
return predicted_score
def contentsbased4(user_id, movie_id, movies_p):
print('======== 전체 영화 예상평점 - 장르 & 감독 ===========')
print('START TIME : ',str(datetime.now())[10:19] )
start = time.time()
conn = pymysql.connect(host=config('HOST'), port=3306, user=config('USER'),
password=config('PASSWORD'), db=config('DB'))
sql = 'SELECT * FROM wouldyouci.accounts_rating where user_id=' + str(user_id)
ratings = pd.read_sql_query(sql, conn)
movies = movies_p
conn.close()
ratings = ratings.merge(movies, left_on='movie_id', right_index=True)
x_train, x_test, y_train, y_test = train_test_split(ratings[movies.columns],
ratings['score'],
random_state=406,
test_size=0.1)
reg = LinearRegression()
reg.fit(x_train, y_train)
predictions = reg.predict(movies)
movies.reset_index()
movies['predict'] = predictions
predicted_score = movies.at[movie_id, 'predict']
print('END TIME : ', str(datetime.now())[10:19])
end = time.time()
print('TOTAL TIME : ', end-start)
print('PREDICTED SCORE : ', predicted_score)
return pd.DataFrame.to_json(movies['predict'])
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
a = time.time()
genres = pd.read_pickle(os.path.join(BASE_DIR, 'movie_director_train.p'))
b = time.time()
print('Time to read pickle file 1: ', b - a)
movies = pd.read_pickle(os.path.join(BASE_DIR, 'movie_train.p'))
c = time.time()
print('Time to read pickle file 2: ', c - b)
directors = pd.read_pickle(os.path.join(BASE_DIR, 'movie_director_train.p'))
d = time.time()
print('Time to read pickle file 3: ', d - c)
print()
contentsbased1(9000007, 10016, genres)
contentsbased2(9000007, 10016, movies)
contentsbased3(9000007, 10016, movies)
contentsbased4(9000007, 10016, directors)
``` |
{
"source": "jheidel/ece4760-lab5",
"score": 3
} |
#### File: ece4760-lab5/pylib/laserviz.py
```python
from gi.repository import Gtk, Gdk, Pango, PangoCairo
from threading import Thread, Event
from time import sleep
import cairo
import math
class LaserViz(Gtk.DrawingArea):
def __init__(self, parent):
Gtk.DrawingArea.__init__(self)
self.connect('draw', self._do_expose)
self.parent = parent
self.show()
self.container = parent.builder.get_object("laserVizBox")
self.container.pack_start(self, True, True, 0)
self.ildaframe = None
def set_frame(self, frame):
self.ildaframe = frame
self.queue_draw()
def _do_expose(self, widget, cr):
allocation = self.get_allocation()
width = allocation.width
height = allocation.height
#draw background
cr.set_source_rgb(0,0,0)
cr.rectangle(0,0, width, height)
cr.fill()
if self.ildaframe is not None:
cr.set_source_rgb(255,0,255)
cr.move_to(20,20)
cr.show_text("Frame points: %d" % self.ildaframe.get_length())
cr.set_line_width(1.0)
past_xy = None
for i,pt in enumerate(self.ildaframe.get_points()):
radius = 1
if (pt["blank"]):
cr.set_source_rgb(0.4,0.1,0) #dim laser (blank)
else:
cr.set_source_rgb(0,1.0,0) #green laser!
draw_x = width / 2 + float(pt["x"]) / 2**16 * width
draw_y = height / 2 - float(pt["y"]) / 2**16 * height
cr.arc(draw_x, draw_y, radius, 0, 2 * math.pi)
cr.fill()
if past_xy is not None:
#Draw line from past to present
(px, py) = past_xy
cr.move_to(px, py)
cr.line_to(draw_x, draw_y)
cr.stroke()
past_xy = (draw_x, draw_y)
``` |
{
"source": "jheidel/IrssiNotifier",
"score": 2
} |
#### File: Server/IrssiNotifierServer/datamodels.py
```python
import json
import logging
from google.appengine.ext import ndb
class Secret(ndb.Model):
secret = ndb.StringProperty()
class IrssiUser(ndb.Model):
user_name = ndb.StringProperty(indexed=False)
email = ndb.StringProperty(indexed=True)
user_id = ndb.StringProperty(indexed=True)
api_token = ndb.StringProperty(indexed=True)
registration_date = ndb.IntegerProperty(indexed=False)
notification_count_since_licensed = ndb.IntegerProperty(indexed=False)
last_notification_time = ndb.IntegerProperty(indexed=False)
irssi_script_version = ndb.IntegerProperty(indexed=False)
license_timestamp = ndb.IntegerProperty(indexed=False)
class GcmToken(ndb.Model):
gcm_token = ndb.StringProperty(indexed=True)
enabled = ndb.BooleanProperty(indexed=True)
name = ndb.StringProperty(indexed=False)
registration_date = ndb.IntegerProperty(indexed=False)
class Message(ndb.Model):
_use_memcache = False
_use_cache = False
server_timestamp = ndb.IntegerProperty(indexed=True)
message = ndb.TextProperty(indexed=False)
channel = ndb.TextProperty(indexed=False)
nick = ndb.TextProperty(indexed=False)
def to_json(self):
return json.dumps(
{'server_timestamp': '%f' % self.server_timestamp,
'message': self.message,
'channel': self.channel,
'nick': self.nick,
'id': self.key.integer_id()})
def to_gcm_json(self):
values = {'server_timestamp': '%f' % self.server_timestamp,
'message': self.message,
'channel': self.channel,
'nick': self.nick,
'id': self.key.integer_id()}
#if self.key.integer_id() is not None:
# values['id'] = self.key.integer_id() #this breaks free apps prior to version 13
m = json.dumps(values)
if len(m) < 3072:
return m
logging.warn("too big message %s, shortening" % len(m))
values['message'] = 'toolong'
return json.dumps(values)
class Nonce(ndb.Model):
nonce = ndb.IntegerProperty()
issue_timestamp = ndb.IntegerProperty()
class License(ndb.Model):
response_code = ndb.IntegerProperty(indexed=False)
nonce = ndb.IntegerProperty(indexed=False)
package_name = ndb.TextProperty(indexed=False)
version_code = ndb.TextProperty(indexed=False)
user_id = ndb.TextProperty(indexed=False)
timestamp = ndb.IntegerProperty(indexed=False)
extra_data = ndb.TextProperty(indexed=False)
receive_timestamp = ndb.IntegerProperty()
```
#### File: Server/IrssiNotifierServer/gcmhelper.py
```python
import traceback
from google.appengine.ext import deferred
from google.appengine.api.taskqueue import TransientError
from gcm import GCM
import logging
import dao
import sys
QueueName = 'gcmqueue'
def send_gcm_to_user_deferred(irssiuser, message):
logging.info("Queuing deferred task for sending message to user %s" % irssiuser.email)
key = irssiuser.key
try:
deferred.defer(_send_gcm_to_user, key, message, _queue=QueueName)
except TransientError:
logging.warn("Transient error: %s" % traceback.format_exc())
def _send_gcm_to_user(irssiuser_key, message):
logging.info("Executing deferred task: _send_gcm_to_user, %s, %s" % (irssiuser_key, message))
gcm = GCM(dao, sys.modules[__name__])
gcm.send_gcm_to_user(irssiuser_key, message)
def send_gcm_to_token_deferred(token, message):
logging.info("Queuing deferred task for sending message to token %s" % token.gcm_token)
key = token.key
try:
deferred.defer(_send_gcm_to_token, key, message, _queue=QueueName)
except TransientError:
logging.warn("Transient error: %s" % traceback.format_exc())
def _send_gcm_to_token(token_key, message):
logging.info("Executing deferred task: _send_gcm_to_token, %s, %s" % (token_key, message))
token = dao.get_gcm_token_for_key(token_key)
gcm = GCM(dao, sys.modules[__name__])
gcm.send_gcm([token], message)
```
#### File: Server/IrssiNotifierServer/gcm.py
```python
import socket
import traceback
import urllib2
import logging
from httplib import HTTPException
from urllib2 import HTTPError
import json
GcmUrl = "https://fcm.googleapis.com/fcm/send"
def is_set(key, arr):
return key in arr and arr[key] is not None and arr[key] != ""
class GCM(object):
authkey = None
def __init__(self, dao, gcmhelper):
self.tokens = []
self.dao = dao
self.gcmhelper = gcmhelper
if GCM.authkey is None:
GCM.authkey = self.dao.load_gcm_auth_key()
if GCM.authkey is None:
raise Exception("No auth key for FCM!")
def send_gcm_to_user(self, irssiuser_key, message):
logging.debug("Sending gcm message to user %s" % irssiuser_key)
if GCM.authkey is None:
logging.error("No auth key for FCM!")
return
tokens = self.dao.get_gcm_tokens_for_user_key(irssiuser_key)
self.send_gcm(tokens, message)
def send_gcm(self, tokens, message):
self.tokens = tokens
logging.info("Sending FCM message to %s tokens" % len(self.tokens))
if GCM.authkey is None:
logging.error("No auth key for FCM!")
return
if len(self.tokens) == 0:
logging.info("No tokens, stop sending")
return
response_json = self.send_request(message, self.tokens)
if response_json is None:
return # instant failure
if response_json['failure'] == '0' and response_json['canonical_ids'] == '0':
return # success
results = response_json["results"]
index = -1
for result in results:
index += 1
token = self.tokens[index]
self.handle_gcm_result(result, token, message)
def send_request(self, message, tokens):
request = urllib2.Request(GcmUrl)
request.add_header('Authorization', 'key=%s' % GCM.authkey)
request.add_header('Content-Type', 'application/json')
json_request = {'data': {'message': message}, 'registration_ids': [], 'priority': 'high'}
for token in tokens:
json_request['registration_ids'].append(token.gcm_token)
request.add_data(json.dumps(json_request))
response_body = ''
try:
response = urllib2.urlopen(request)
response_body = response.read()
logging.debug("FCM Message sent, response: %s" % response_body)
return json.loads(response_body)
except HTTPError as e:
if 500 <= e.code < 600:
raise Exception("NOMAIL %s, retrying whole task" % e.code) # retry
else:
logging.error(
"Unable to send FCM message! Response code: %s, response body: %s " % (e.code, response_body))
return None # do not retry
except HTTPException as e:
logging.warn("HTTPException: Unable to send FCM message! %s" % traceback.format_exc())
raise HTTPException("NOMAIL %s " % e) # retry
except socket.error as e:
logging.warn("socket.error: Unable to send FCM message! %s" % traceback.format_exc())
raise HTTPException("NOMAIL %s " % e) # retry
except:
logging.error("Unable to send FCM message! %s" % traceback.format_exc())
return None
def handle_gcm_result(self, result, token, message):
if is_set("message_id", result):
if is_set("registration_id", result):
new_token = result["registration_id"]
self.replace_gcm_token_with_canonical(token, new_token)
else:
if is_set("error", result):
error = result["error"]
logging.warn("Error sending FCM message with authkey %s: %s" % (GCM.authkey, error))
if error == "Unavailable":
logging.warn("Token unavailable, retrying")
self.gcmhelper.send_gcm_to_token_deferred(token, message)
elif error == "NotRegistered":
logging.warn("Token not registered, deleting token")
self.dao.remove_gcm_token(token)
elif error == "InvalidRegistration":
logging.error("Invalid registration, deleting token")
self.dao.remove_gcm_token(token)
else:
if error == "InternalServerError":
logging.warn("InternalServerError in FCM: " + error)
else:
logging.error("Unrecoverable error in FCM: " + error)
def replace_gcm_token_with_canonical(self, token, new_token_id):
already_exists = new_token_id in [t.gcm_token for t in self.tokens]
if already_exists:
logging.info("Canonical token already exists, removing old one: %s" % (new_token_id))
self.dao.remove_gcm_token(token)
else:
logging.info("Updating token with canonical token: %s -> %s" % (token.gcm_token, new_token_id))
self.dao.update_gcm_token(token, new_token_id)
```
#### File: Server/IrssiNotifierServer/gcm_test.py
```python
import logging
from datamodels import GcmToken
from gcm import GCM
import json
import unittest
class MockDao():
def __init__(self):
self.removed_tokens = []
self.updated_tokens = []
def load_gcm_auth_key(self):
return '123'
def remove_gcm_token(self, token):
self.removed_tokens.append(token)
def update_gcm_token(self, token, new_token_id):
self.updated_tokens.append((token, new_token_id))
class MockGcmHelper():
def __init__(self):
self.sent_tokens = []
def send_gcm_to_token_deferred(self, token, message):
self.sent_tokens.append((token, message))
class TestGcm(unittest.TestCase):
def test_canonical_ids(self):
logging.root.setLevel(logging.DEBUG)
mock_dao = MockDao()
mock_helper = MockGcmHelper()
gcm = GCM(mock_dao, mock_helper)
gcm.tokens = [GcmToken(gcm_token='0'), GcmToken(gcm_token='1'), GcmToken(gcm_token='2'),
GcmToken(gcm_token='3'), GcmToken(gcm_token='4'), GcmToken(gcm_token='5'),
GcmToken(gcm_token='6'), GcmToken(gcm_token='7')]
message = 'testing testing 1 2 3'
response = {'multicast_id': 666, 'success': 4, 'canonical_ids': 2,
'results': [
{'error': 'something else'},
{'message_id': '11'}, # success
{'message_id': '22', 'registration_id': '3'}, # message with already existing canonical id
{'message_id': '33'}, # canonical id for previous
{'message_id': '44', 'registration_id': '123'}, # totally new canonical id
{'error': 'NotRegistered'}, # not registered
{'error': 'Unavailable'},
{'error': 'something else'}
]}
js = json.dumps(response)
response_json = json.loads(js)
results = response_json["results"]
index = -1
for result in results:
index += 1
token = gcm.tokens[index]
gcm.handle_gcm_result(result, token, message)
self.assertEqual(2, len(mock_dao.removed_tokens))
self.assertEqual('2', mock_dao.removed_tokens[0].gcm_token)
self.assertEqual('5', mock_dao.removed_tokens[1].gcm_token)
self.assertEqual(1, len(mock_dao.updated_tokens))
self.assertEqual('4', mock_dao.updated_tokens[0][0].gcm_token)
self.assertEqual('123', mock_dao.updated_tokens[0][1])
self.assertEqual(1, len(mock_helper.sent_tokens))
self.assertEqual('6', mock_helper.sent_tokens[0][0].gcm_token)
self.assertEqual(message, mock_helper.sent_tokens[0][1])
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.