metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jpope8/seam-doppelganger",
"score": 4
} |
#### File: seam-doppelganger/src-python/prepareGray.py
```python
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
from tqdm import tqdm
DATADIR = "./TrainPetImages"
CATEGORIES = ["Dog", "Cat"]
IMG_SIZE = 100
# https://pythonprogramming.net/loading-custom-data-deep-learning-python-tensorflow-keras/
# This is just a utility to see how images are rezsized
def viewImage():
for category in CATEGORIES: # do dogs and cats
path = os.path.join(DATADIR,category) # create path to dogs and cats
for img in os.listdir(path): # iterate over each image per dogs and cats
img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE) # convert to array
plt.imshow(img_array, cmap='gray') # graph it
plt.show() # display!
break # we just want one for now so break
break #...and one more!
# Oh look, a dog!
print(img_array)
#And now it's shape:
print(img_array.shape)
# So that's a 375 tall, 500 wide, and 3-channel image. 3-channel is
# because it's RGB (color). We definitely don't want the images that big,
# but also various images are different shapes, and this is also a problem.
IMG_SIZE = 50
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(new_array, cmap='gray')
plt.show()
# Hmm, that's a bit blurry I'd say. Let's go with 100x100?
IMG_SIZE = 100
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(new_array, cmap='gray')
plt.show()
# Let's try that. Next, we're going to want to create training data and
# all that, but, first, we should set aside some images for final testing.
# I am going to just manually create a directory called Testing and
# then create 2 directories inside of there, one for Dog and one for Cat.
# From here, I am just going to move the first 15 images from
# both Dog and Cat into the training versions. Make sure you move them,
# not copy. We will use this for our final tests.
# Now, we want to begin building our training data!
training_data = []
def create_training_data():
for category in CATEGORIES: # do dogs and cats
path = os.path.join(DATADIR,category) # create path to dogs and cats
class_num = CATEGORIES.index(category) # get the classification (0 or a 1). 0=dog 1=cat
for img in tqdm(os.listdir(path)): # iterate over each image per dogs and cats
try:
# convert to array
img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE)
# resize to normalize data size
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
# add this to our training_data
training_data.append([new_array, class_num])
except Exception as e: # in the interest in keeping the output clean...
pass
#except OSError as e:
# print("OSErrroBad img most likely", e, os.path.join(path,img))
#except Exception as e:
# print("general exception", e, os.path.join(path,img))
create_training_data()
print(len(training_data))
# Next, we want to shuffle the data. Right now our data is just all dogs,
# then all cats. This will usually wind up causing trouble too, as,
# initially, the classifier will learn to just predict dogs always.
# Then it will shift to oh, just predict all cats! Going back and
# forth like this is no good either.
import random
random.shuffle(training_data)
# Our training_data is a list, meaning it's mutable, so it's now
# nicely shuffled. We can confirm this by iterating over a few of
# the initial samples and printing out the class.
for sample in training_data[:10]:
print(sample[1])
# Great, we've got the classes nicely mixed in! Time to make our model!
X = []
y = []
for features,label in training_data:
X.append(features)
y.append(label)
# Pain in ass, convert to numpy
# Note the 1 is for grayscale, needs to be 3 for color images
# The -1 is the "number of features"
print(X[0].reshape(-1, IMG_SIZE, IMG_SIZE, 1))
X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
# Let's save this data, so that we don't need to keep calculating it
# every time we want to play with the neural network model:
import pickle
pickle_out = open("X.pickle","wb")
pickle.dump(X, pickle_out)
pickle_out.close()
pickle_out = open("y.pickle","wb")
pickle.dump(y, pickle_out)
pickle_out.close()
# We can always load it in to our current script, or a totally new one by doing:
pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)
pickle_in = open("y.pickle","rb")
y = pickle.load(pickle_in)
# Now that we've got our dataset, we're ready to cover convolutional
# neural networks and implement one with our data for classification.
``` |
{
"source": "jpopelka/ansible-bender",
"score": 2
} |
#### File: ansible-bender/tests/spellbook.py
```python
import logging
import os
import random
import string
import subprocess
import pytest
from ansible_bender.builders.buildah_builder import buildah
from ansible_bender.utils import set_logging
set_logging(level=logging.DEBUG)
tests_dir = os.path.dirname(os.path.abspath(__file__))
project_dir = os.path.dirname(tests_dir)
data_dir = os.path.abspath(os.path.join(tests_dir, "data"))
roles_dir = os.path.join(data_dir, "roles")
buildah_inspect_data_path = os.path.join(data_dir, "buildah_inspect.json")
basic_playbook_path = os.path.join(data_dir, "basic_playbook.yaml")
multiplay_path = os.path.join(data_dir, "multiplay.yaml")
non_ex_pb = os.path.join(data_dir, "non_ex_pb.yaml")
b_p_w_vars_path = os.path.join(data_dir, "b_p_w_vars.yaml")
p_w_vars_files_path = os.path.join(data_dir, "p_w_vars_files.yaml")
full_conf_pb_path = os.path.join(data_dir, "full_conf_pb.yaml")
basic_playbook_path_w_bv = os.path.join(data_dir, "basic_playbook_with_volume.yaml")
dont_cache_playbook_path_pre = os.path.join(data_dir, "dont_cache_playbook_pre.yaml")
dont_cache_playbook_path = os.path.join(data_dir, "dont_cache_playbook.yaml")
small_basic_playbook_path = os.path.join(data_dir, "small_basic_playbook.yaml")
change_layering_playbook = os.path.join(data_dir, "change_layering.yaml")
bad_playbook_path = os.path.join(data_dir, "bad_playbook.yaml")
role_pb_path = os.path.join(data_dir, "role.yaml")
base_image = "docker.io/library/python:3-alpine"
C7_AP_VER_OUT = """\
ansible-playbook 2.4.2.0
config file = /etc/ansible/ansible.cfg
configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python2.7/site-packages/ansible
executable location = /usr/bin/ansible-playbook
python version = 2.7.5 (default, Oct 30 2018, 23:45:53) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)]
"""
def random_word(length):
# https://stackoverflow.com/a/2030081/909579
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for _ in range(length))
``` |
{
"source": "jpopelka/atomic-reactor",
"score": 2
} |
#### File: atomic_reactor/plugins/exit_koji_promote.py
```python
from __future__ import unicode_literals
from collections import namedtuple
import json
import os
import random
from string import ascii_letters
import subprocess
from tempfile import NamedTemporaryFile
import time
from atomic_reactor import __version__ as atomic_reactor_version
from atomic_reactor import start_time as atomic_reactor_start_time
from atomic_reactor.plugin import ExitPlugin
from atomic_reactor.source import GitSource
from atomic_reactor.plugins.post_rpmqa import PostBuildRPMqaPlugin
from atomic_reactor.plugins.pre_add_filesystem import AddFilesystemPlugin
from atomic_reactor.constants import PROG
from atomic_reactor.util import (get_version_of_tools, get_checksums,
get_build_json, get_preferred_label)
from atomic_reactor.koji_util import create_koji_session, TaskWatcher
from dockerfile_parse import DockerfileParser
from osbs.conf import Configuration
from osbs.api import OSBS
from osbs.exceptions import OsbsException
# An output file and its metadata
Output = namedtuple('Output', ['file', 'metadata'])
class KojiUploadLogger(object):
def __init__(self, logger, notable_percent=10):
self.logger = logger
self.notable_percent = notable_percent
self.last_percent_done = 0
def callback(self, offset, totalsize, size, t1, t2): # pylint: disable=W0613
if offset == 0:
self.logger.debug("upload size: %.1fMiB", totalsize / 1024.0 / 1024)
if not totalsize or not t1:
return
percent_done = 100 * offset / totalsize
if (percent_done >= 99 or
percent_done - self.last_percent_done >= self.notable_percent):
self.last_percent_done = percent_done
self.logger.debug("upload: %d%% done (%.1f MiB/sec)",
percent_done, size / t1 / 1024 / 1024)
class KojiPromotePlugin(ExitPlugin):
"""
Promote this build to Koji
Submits a successful build to Koji using the Content Generator API,
https://fedoraproject.org/wiki/Koji/ContentGenerators
Authentication is with Kerberos unless the koji_ssl_certs
configuration parameter is given, in which case it should be a
path at which 'cert', 'ca', and 'serverca' are the certificates
for SSL authentication.
If Kerberos is used for authentication, the default principal will
be used (from the kernel keyring) unless both koji_keytab and
koji_principal are specified. The koji_keytab parameter is a
keytab name like 'type:name', and so can be used to specify a key
in a Kubernetes secret by specifying 'FILE:/path/to/key'.
If metadata_only is set, the 'docker save' image will not be
uploaded, only the logs. The import will be marked as
metadata-only.
Runs as an exit plugin in order to capture logs from all other
plugins.
"""
key = "koji_promote"
is_allowed_to_fail = False
def __init__(self, tasker, workflow, kojihub, url,
verify_ssl=True, use_auth=True,
koji_ssl_certs=None, koji_proxy_user=None,
koji_principal=None, koji_keytab=None,
metadata_only=False, blocksize=None,
target=None, poll_interval=5):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param kojihub: string, koji hub (xmlrpc)
:param url: string, URL for OSv3 instance
:param verify_ssl: bool, verify OSv3 SSL certificate?
:param use_auth: bool, initiate authentication with OSv3?
:param koji_ssl_certs: str, path to 'cert', 'ca', 'serverca'
:param koji_proxy_user: str, user to log in as (requires hub config)
:param koji_principal: str, Kerberos principal (must specify keytab)
:param koji_keytab: str, keytab name (must specify principal)
:param metadata_only: bool, whether to omit the 'docker save' image
:param blocksize: int, blocksize to use for uploading files
:param target: str, koji target
:param poll_interval: int, seconds between Koji task status requests
"""
super(KojiPromotePlugin, self).__init__(tasker, workflow)
self.kojihub = kojihub
self.koji_ssl_certs = koji_ssl_certs
self.koji_proxy_user = koji_proxy_user
self.koji_principal = koji_principal
self.koji_keytab = koji_keytab
self.metadata_only = metadata_only
self.blocksize = blocksize
self.target = target
self.poll_interval = poll_interval
self.namespace = get_build_json().get('metadata', {}).get('namespace', None)
osbs_conf = Configuration(conf_file=None, openshift_uri=url,
use_auth=use_auth, verify_ssl=verify_ssl,
namespace=self.namespace)
self.osbs = OSBS(osbs_conf, osbs_conf)
self.build_id = None
self.nvr_image = None
@staticmethod
def parse_rpm_output(output, tags, separator=';'):
"""
Parse output of the rpm query.
:param output: list, decoded output (str) from the rpm subprocess
:param tags: list, str fields used for query output
:return: list, dicts describing each rpm package
"""
def field(tag):
"""
Get a field value by name
"""
try:
value = fields[tags.index(tag)]
except ValueError:
return None
if value == '(none)':
return None
return value
components = []
sigmarker = 'Key ID '
for rpm in output:
fields = rpm.rstrip('\n').split(separator)
if len(fields) < len(tags):
continue
signature = field('SIGPGP:pgpsig') or field('SIGGPG:pgpsig')
if signature:
parts = signature.split(sigmarker, 1)
if len(parts) > 1:
signature = parts[1]
component_rpm = {
'type': 'rpm',
'name': field('NAME'),
'version': field('VERSION'),
'release': field('RELEASE'),
'arch': field('ARCH'),
'sigmd5': field('SIGMD5'),
'signature': signature,
}
# Special handling for epoch as it must be an integer or None
epoch = field('EPOCH')
if epoch is not None:
epoch = int(epoch)
component_rpm['epoch'] = epoch
if component_rpm['name'] != 'gpg-pubkey':
components.append(component_rpm)
return components
def get_rpms(self):
"""
Build a list of installed RPMs in the format required for the
metadata.
"""
tags = [
'NAME',
'VERSION',
'RELEASE',
'ARCH',
'EPOCH',
'SIGMD5',
'SIGPGP:pgpsig',
'SIGGPG:pgpsig',
]
sep = ';'
fmt = sep.join(["%%{%s}" % tag for tag in tags])
cmd = "/bin/rpm -qa --qf '{0}\n'".format(fmt)
try:
# py3
(status, output) = subprocess.getstatusoutput(cmd)
except AttributeError:
# py2
with open('/dev/null', 'r+') as devnull:
p = subprocess.Popen(cmd,
shell=True,
stdin=devnull,
stdout=subprocess.PIPE,
stderr=devnull)
(stdout, stderr) = p.communicate()
status = p.wait()
output = stdout.decode()
if status != 0:
self.log.debug("%s: stderr output: %s", cmd, stderr)
raise RuntimeError("%s: exit code %s" % (cmd, status))
return self.parse_rpm_output(output.splitlines(), tags, separator=sep)
def get_output_metadata(self, path, filename):
"""
Describe a file by its metadata.
:return: dict
"""
checksums = get_checksums(path, ['md5'])
metadata = {'filename': filename,
'filesize': os.path.getsize(path),
'checksum': checksums['md5sum'],
'checksum_type': 'md5'}
if self.metadata_only:
metadata['metadata_only'] = True
return metadata
def get_builder_image_id(self):
"""
Find out the docker ID of the buildroot image we are in.
"""
try:
buildroot_tag = os.environ["OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE"]
except KeyError:
return ''
try:
pod = self.osbs.get_pod_for_build(self.build_id)
all_images = pod.get_container_image_ids()
except OsbsException as ex:
self.log.error("unable to find image id: %r", ex)
return buildroot_tag
try:
return all_images[buildroot_tag]
except KeyError:
self.log.error("Unable to determine buildroot image ID for %s",
buildroot_tag)
return buildroot_tag
def get_buildroot(self, build_id):
"""
Build the buildroot entry of the metadata.
:return: dict, partial metadata
"""
docker_version = self.tasker.get_version()
docker_info = self.tasker.get_info()
host_arch = docker_version['Arch']
if host_arch == 'amd64':
host_arch = 'x86_64'
buildroot = {
'id': 1,
'host': {
'os': docker_info['OperatingSystem'],
'arch': host_arch,
},
'content_generator': {
'name': PROG,
'version': atomic_reactor_version,
},
'container': {
'type': 'docker',
'arch': os.uname()[4],
},
'tools': [
{
'name': tool['name'],
'version': tool['version'],
}
for tool in get_version_of_tools()] + [
{
'name': 'docker',
'version': docker_version['Version'],
},
],
'components': self.get_rpms(),
'extra': {
'osbs': {
'build_id': build_id,
'builder_image_id': self.get_builder_image_id(),
}
},
}
return buildroot
def get_logs(self):
"""
Build the logs entry for the metadata 'output' section
:return: list, Output instances
"""
output = []
# Collect logs from server
try:
logs = self.osbs.get_build_logs(self.build_id)
except OsbsException as ex:
self.log.error("unable to get build logs: %r", ex)
else:
# Deleted once closed
logfile = NamedTemporaryFile(prefix=self.build_id,
suffix=".log",
mode='w')
logfile.write(logs)
logfile.flush()
metadata = self.get_output_metadata(logfile.name,
"openshift-final.log")
output.append(Output(file=logfile, metadata=metadata))
docker_logs = NamedTemporaryFile(prefix="docker-%s" % self.build_id,
suffix=".log",
mode='w')
docker_logs.write("\n".join(self.workflow.build_logs))
docker_logs.flush()
output.append(Output(file=docker_logs,
metadata=self.get_output_metadata(docker_logs.name,
"build.log")))
return output
def get_image_components(self):
"""
Re-package the output of the rpmqa plugin into the format required
for the metadata.
"""
try:
output = self.workflow.postbuild_results[PostBuildRPMqaPlugin.key]
except KeyError:
self.log.error("%s plugin did not run!",
PostBuildRPMqaPlugin.key)
return []
return self.parse_rpm_output(output, PostBuildRPMqaPlugin.rpm_tags,
separator=',')
def get_image_output(self, arch):
"""
Create the output for the image
This is the Koji Content Generator metadata, along with the
'docker save' output to upload.
For metadata-only builds, an empty file is used instead of the
output of 'docker save'.
:param arch: str, architecture for this output
:return: tuple, (metadata dict, Output instance)
"""
image_id = self.workflow.builder.image_id
saved_image = self.workflow.exported_image_sequence[-1].get('path')
ext = saved_image.split('.', 1)[1]
name_fmt = 'docker-image-{id}.{arch}.{ext}'
image_name = name_fmt.format(id=image_id, arch=arch, ext=ext)
if self.metadata_only:
metadata = self.get_output_metadata(os.path.devnull, image_name)
output = Output(file=None, metadata=metadata)
else:
metadata = self.get_output_metadata(saved_image, image_name)
output = Output(file=open(saved_image), metadata=metadata)
return metadata, output
def get_digests(self):
"""
Returns a map of repositories to digests
"""
digests = {} # repository -> digest
for registry in self.workflow.push_conf.docker_registries:
for image in self.workflow.tag_conf.images:
image_str = image.to_str()
if image_str in registry.digests:
digest = registry.digests[image_str]
digests[image.to_str(registry=False)] = digest
return digests
def get_repositories(self, digests):
"""
Build the repositories metadata
:param digests: dict, repository -> digest
"""
if self.workflow.push_conf.pulp_registries:
# If pulp was used, only report pulp images
registries = self.workflow.push_conf.pulp_registries
else:
# Otherwise report all the images we pushed
registries = self.workflow.push_conf.all_registries
output_images = []
for registry in registries:
image = self.nvr_image.copy()
image.registry = registry.uri
pullspec = image.to_str()
output_images.append(pullspec)
digest = digests.get(image.to_str(registry=False))
if digest:
digest_pullspec = image.to_str(tag=False) + "@" + digest
output_images.append(digest_pullspec)
return output_images
def get_output(self, buildroot_id):
"""
Build the 'output' section of the metadata.
:return: list, Output instances
"""
def add_buildroot_id(output):
logfile, metadata = output
metadata.update({'buildroot_id': buildroot_id})
return Output(file=logfile, metadata=metadata)
def add_log_type(output):
logfile, metadata = output
metadata.update({'type': 'log', 'arch': 'noarch'})
return Output(file=logfile, metadata=metadata)
output_files = [add_log_type(add_buildroot_id(metadata))
for metadata in self.get_logs()]
# Parent of squashed built image is base image
image_id = self.workflow.builder.image_id
parent_id = self.workflow.base_image_inspect['Id']
digests = self.get_digests()
repositories = self.get_repositories(digests)
arch = os.uname()[4]
metadata, output = self.get_image_output(arch)
metadata.update({
'arch': arch,
'type': 'docker-image',
'components': self.get_image_components(),
'extra': {
'image': {
'arch': arch,
},
'docker': {
'id': image_id,
'parent_id': parent_id,
'repositories': repositories,
},
},
})
# Add the 'docker save' image to the output
image = add_buildroot_id(output)
output_files.append(image)
return output_files
def get_build(self, metadata):
start_time = int(atomic_reactor_start_time)
labels = DockerfileParser(self.workflow.builder.df_path).labels
component = get_preferred_label(labels, 'com.redhat.component')
version = get_preferred_label(labels, 'version')
release = get_preferred_label(labels, 'release')
source = self.workflow.source
if not isinstance(source, GitSource):
raise RuntimeError('git source required')
extra = {'image': {}}
koji_task_id = metadata.get('labels', {}).get('koji-task-id')
if koji_task_id is not None:
self.log.info("build configuration created by Koji Task ID %s",
koji_task_id)
extra['container_koji_task_id'] = koji_task_id
fs_result = self.workflow.prebuild_results.get(AddFilesystemPlugin.key)
if fs_result is not None:
try:
task_id = fs_result['filesystem-koji-task-id']
except KeyError:
self.log.error("%s: expected filesystem-koji-task-id in result",
AddFilesystemPlugin.key)
else:
extra['filesystem_koji_task_id'] = str(task_id)
build = {
'name': component,
'version': version,
'release': release,
'source': "{0}#{1}".format(source.uri, source.commit_id),
'start_time': start_time,
'end_time': int(time.time()),
'extra': extra,
}
if self.metadata_only:
build['metadata_only'] = True
return build
def get_metadata(self):
"""
Build the metadata needed for importing the build
:return: tuple, the metadata and the list of Output instances
"""
try:
metadata = get_build_json()["metadata"]
self.build_id = metadata["name"]
except KeyError:
self.log.error("No build metadata")
raise
for image in self.workflow.tag_conf.primary_images:
# dash at first/last postition does not count
if '-' in image.tag[1:-1]:
self.nvr_image = image
break
else:
raise RuntimeError('Unable to determine name:version-release')
metadata_version = 0
build = self.get_build(metadata)
buildroot = self.get_buildroot(build_id=self.build_id)
output_files = self.get_output(buildroot['id'])
koji_metadata = {
'metadata_version': metadata_version,
'build': build,
'buildroots': [buildroot],
'output': [output.metadata for output in output_files],
}
return koji_metadata, output_files
def upload_file(self, session, output, serverdir):
"""
Upload a file to koji
:return: str, pathname on server
"""
name = output.metadata['filename']
self.log.debug("uploading %r to %r as %r",
output.file.name, serverdir, name)
kwargs = {}
if self.blocksize is not None:
kwargs['blocksize'] = self.blocksize
self.log.debug("using blocksize %d", self.blocksize)
upload_logger = KojiUploadLogger(self.log)
session.uploadWrapper(output.file.name, serverdir, name=name,
callback=upload_logger.callback, **kwargs)
path = os.path.join(serverdir, name)
self.log.debug("uploaded %r", path)
return path
@staticmethod
def get_upload_server_dir():
"""
Create a path name for uploading files to
:return: str, path name expected to be unique
"""
dir_prefix = 'koji-promote'
random_chars = ''.join([random.choice(ascii_letters)
for _ in range(8)])
unique_fragment = '%r.%s' % (time.time(), random_chars)
return os.path.join(dir_prefix, unique_fragment)
def login(self):
"""
Log in to koji
:return: koji.ClientSession instance, logged in
"""
auth_info = {
"proxyuser": self.koji_proxy_user,
"ssl_certs_dir": self.koji_ssl_certs,
"krb_principal": self.koji_principal,
"krb_keytab": self.koji_keytab
}
return create_koji_session(self.kojihub, auth_info)
def run(self):
"""
Run the plugin.
"""
if ((self.koji_principal and not self.koji_keytab) or
(self.koji_keytab and not self.koji_principal)):
raise RuntimeError("specify both koji_principal and koji_keytab "
"or neither")
# Only run if the build was successful
if self.workflow.build_process_failed:
self.log.info("Not promoting failed build to koji")
return
koji_metadata, output_files = self.get_metadata()
try:
session = self.login()
server_dir = self.get_upload_server_dir()
for output in output_files:
if output.file:
self.upload_file(session, output, server_dir)
finally:
for output in output_files:
if output.file:
output.file.close()
try:
build_info = session.CGImport(koji_metadata, server_dir)
except Exception:
self.log.debug("metadata: %r", koji_metadata)
raise
# Older versions of CGImport do not return a value.
build_id = build_info.get("id") if build_info else None
self.log.debug("Build information: %s",
json.dumps(build_info, sort_keys=True, indent=4))
# Tag the build
if build_id is not None and self.target is not None:
self.log.debug("Finding build tag for target %s", self.target)
target_info = session.getBuildTarget(self.target)
build_tag = target_info['dest_tag_name']
self.log.info("Tagging build with %s", build_tag)
task_id = session.tagBuild(build_tag, build_id)
task = TaskWatcher(session, task_id,
poll_interval=self.poll_interval)
task.wait()
if task.failed():
raise RuntimeError("Task %s failed to tag koji build" % task_id)
return build_id
```
#### File: atomic_reactor/plugins/input_osv3.py
```python
import json
import os
from atomic_reactor.plugin import InputPlugin
from atomic_reactor.util import get_build_json
class OSv3InputPlugin(InputPlugin):
key = "osv3"
def __init__(self, **kwargs):
"""
constructor
"""
# call parent constructor
super(OSv3InputPlugin, self).__init__(**kwargs)
def run(self):
"""
each plugin has to implement this method -- it is used to run the plugin actually
response from plugin is kept and used in json result response
"""
build_json = get_build_json()
git_url = os.environ['SOURCE_URI']
git_ref = os.environ.get('SOURCE_REF', None)
image = os.environ['OUTPUT_IMAGE']
self.target_registry = os.environ.get('OUTPUT_REGISTRY', None)
try:
self.plugins_json = os.environ['ATOMIC_REACTOR_PLUGINS']
except KeyError:
try:
self.plugins_json = os.environ['DOCK_PLUGINS']
except KeyError:
raise RuntimeError("No plugin configuration found!")
else:
self.log.warning("DOCK_PLUGINS is deprecated - please update your osbs-client!")
self.plugins_json = json.loads(self.plugins_json)
input_json = {
'source': {
'provider': 'git',
'uri': git_url,
'provider_params': {'git_commit': git_ref}
},
'image': image,
'openshift_build_selflink': build_json.get('metadata', {}).get('selfLink', None)
}
input_json.update(self.plugins_json)
self.log.debug("build json: %s", input_json)
return input_json
@classmethod
def is_autousable(cls):
return 'BUILD' in os.environ and 'SOURCE_URI' in os.environ and 'OUTPUT_IMAGE' in os.environ
```
#### File: atomic_reactor/plugins/input_path.py
```python
import json
import os
from atomic_reactor.constants import CONTAINER_BUILD_JSON_PATH
from atomic_reactor.plugin import InputPlugin
class PathInputPlugin(InputPlugin):
key = "path"
def __init__(self, path=None, **kwargs):
"""
constructor
"""
# call parent constructor
super(PathInputPlugin, self).__init__(**kwargs)
self.path = path
def run(self):
"""
get json with build config from path
"""
path = self.path or CONTAINER_BUILD_JSON_PATH
try:
with open(path, 'r') as build_cfg_fd:
build_cfg_json = json.load(build_cfg_fd)
except ValueError:
self.log.error("couldn't decode json from file '%s'", path)
return None
except IOError:
self.log.error("couldn't read json from file '%s'", path)
return None
else:
return self.substitute_configuration(build_cfg_json)
@classmethod
def is_autousable(cls):
return os.path.exists(CONTAINER_BUILD_JSON_PATH)
```
#### File: atomic_reactor/plugins/post_cp_built_image_to_nfs.py
```python
from __future__ import unicode_literals
import os
import shutil
import subprocess
import errno
from atomic_reactor.plugin import PostBuildPlugin
__all__ = ('CopyBuiltImageToNFSPlugin', )
DEFAULT_MOUNTPOINT = "/atomic-reactor-nfs-mountpoint/"
def mount(server_path, mountpoint, args=None, mount_type="nfs"):
args = args or ["nolock"]
rendered_args = ",".join(args)
cmd = [
"mount",
"-t", mount_type,
"-o", rendered_args,
server_path,
mountpoint
]
subprocess.check_call(cmd)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class CopyBuiltImageToNFSPlugin(PostBuildPlugin):
"""
Workflow of this plugin:
1. mount NFS
2. create subdir (`dest_dir`)
3. copy squashed image to $NFS/$dest_dir/
"""
key = "cp_built_image_to_nfs"
is_allowed_to_fail = False
def __init__(self, tasker, workflow, nfs_server_path, dest_dir=None,
mountpoint=DEFAULT_MOUNTPOINT):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param nfs_server_path: str, $server:$path of NFS share
:param dest_dir: this directory will be created in NFS and the built image will be copied
into it, if not specified, copy to root of NFS
:param mountpoint: str, path where NFS share will be mounted
"""
# call parent constructor
super(CopyBuiltImageToNFSPlugin, self).__init__(tasker, workflow)
self.nfs_server_path = nfs_server_path
self.dest_dir = dest_dir
self.mountpoint = mountpoint
self.absolute_dest_dir = self.mountpoint
if self.dest_dir:
self.absolute_dest_dir = os.path.join(self.mountpoint, self.dest_dir)
self.log.debug("destination dir = %s", self.absolute_dest_dir)
def mount_nfs(self):
self.log.debug("create mountpoint %s", self.mountpoint)
mkdir_p(self.mountpoint)
self.log.debug("mount NFS %r at %s", self.nfs_server_path, self.mountpoint)
mount(self.nfs_server_path, self.mountpoint)
def run(self):
if len(self.workflow.exported_image_sequence) == 0:
raise RuntimeError('no exported image to upload to nfs')
source_path = self.workflow.exported_image_sequence[-1].get("path")
if not source_path or not os.path.isfile(source_path):
raise RuntimeError("squashed image does not exist: %s", source_path)
self.mount_nfs()
if self.dest_dir:
try:
mkdir_p(self.absolute_dest_dir)
except (IOError, OSError) as ex:
self.log.error("couldn't create %s: %r", self.dest_dir, ex)
raise
fname = os.path.basename(source_path)
expected_image_path = os.path.join(self.absolute_dest_dir, fname)
if os.path.isfile(expected_image_path):
raise RuntimeError("%s already exists!" % expected_image_path)
self.log.info("starting copying the image; this may take a while")
try:
shutil.copy2(source_path, self.absolute_dest_dir)
except (IOError, OSError) as ex:
self.log.error("couldn't copy %s into %s: %r", source_path, self.dest_dir, ex)
raise
if os.path.isfile(os.path.join(self.absolute_dest_dir, fname)):
self.log.debug("CopyBuiltImagePlugin.run() success")
else:
self.log.error("CopyBuiltImagePlugin.run() unknown error")
```
#### File: atomic_reactor/plugins/pre_pull_base_image.py
```python
from __future__ import unicode_literals
from atomic_reactor.plugin import PreBuildPlugin
class PullBaseImagePlugin(PreBuildPlugin):
key = "pull_base_image"
is_allowed_to_fail = False
def __init__(self, tasker, workflow, parent_registry=None, parent_registry_insecure=False):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param parent_registry: registry to enforce pulling from
:param parent_registry_insecure: allow connecting to the registry over plain http
"""
# call parent constructor
super(PullBaseImagePlugin, self).__init__(tasker, workflow)
self.parent_registry = parent_registry
self.parent_registry_insecure = parent_registry_insecure
def run(self):
"""
pull base image
"""
base_image = self.workflow.builder.base_image
if self.parent_registry is not None:
self.log.info("pulling base image '%s' from registry '%s'",
base_image, self.parent_registry)
else:
self.log.info("pulling base image '%s'", base_image)
base_image_with_registry = base_image.copy()
if self.parent_registry:
# registry in dockerfile doesn't match provided source registry
if base_image.registry and base_image.registry != self.parent_registry:
self.log.error("registry in dockerfile doesn't match provided source registry, "
"dockerfile = '%s', provided = '%s'",
base_image.registry, self.parent_registry)
raise RuntimeError(
"Registry specified in dockerfile doesn't match provided one. "
"Dockerfile: '%s', Provided: '%s'"
% (base_image.registry, self.parent_registry))
base_image_with_registry.registry = self.parent_registry
pulled_base = self.tasker.pull_image(base_image_with_registry,
insecure=self.parent_registry_insecure)
if (base_image_with_registry.namespace != 'library' and
not self.tasker.image_exists(base_image_with_registry.to_str())):
self.log.info("'%s' not found", base_image_with_registry.to_str())
base_image_with_registry.namespace = 'library'
self.log.info("trying '%s'", base_image_with_registry.to_str())
pulled_base = self.tasker.pull_image(base_image_with_registry,
insecure=self.parent_registry_insecure)
self.workflow.pulled_base_images.add(pulled_base)
if not base_image.registry:
response = self.tasker.tag_image(base_image_with_registry, base_image, force=True)
self.workflow.pulled_base_images.add(response)
pulled_base = response
self.log.debug("image '%s' is available", pulled_base)
```
#### File: atomic_reactor/plugins/pre_return_dockerfile.py
```python
from dockerfile_parse import DockerfileParser
from atomic_reactor.plugin import PreBuildPlugin
class CpDockerfilePlugin(PreBuildPlugin):
key = "dockerfile_content"
def __init__(self, tasker, workflow):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:return:
"""
# call parent constructor
super(CpDockerfilePlugin, self).__init__(tasker, workflow)
def run(self):
"""
try open dockerfile, output an error if there is one
"""
try:
return DockerfileParser(self.workflow.builder.df_path).content
except (IOError, OSError) as ex:
return "Couldn't retrieve dockerfile: %r" % ex
```
#### File: atomic-reactor/atomic_reactor/source.py
```python
import logging
import copy
import os
import shutil
import tempfile
import collections
from atomic_reactor import util
from atomic_reactor.constants import SOURCE_DIRECTORY_NAME
logger = logging.getLogger(__name__)
# Intended for use as vcs-type, vcs-url and vcs-ref docker labels as defined
# in https://github.com/projectatomic/ContainerApplicationGenericLabels
VcsInfo = collections.namedtuple('VcsInfo', ['vcs_type', 'vcs_url', 'vcs_ref'])
class Source(object):
def __init__(self, provider, uri, dockerfile_path=None, provider_params=None, tmpdir=None):
self.provider = provider
self.uri = uri
self.dockerfile_path = dockerfile_path
self.provider_params = provider_params or {}
# TODO: do we want to delete tmpdir when destroying the object?
self.tmpdir = tmpdir or tempfile.mkdtemp()
logger.debug("workdir is %r", self.tmpdir)
self.source_path = os.path.join(self.tmpdir, SOURCE_DIRECTORY_NAME)
logger.debug("source path is %r", self.source_path)
@property
def path(self):
return self.get()
@property
def workdir(self):
return self.tmpdir
def get(self):
"""Run this to get source and save it to `tmpdir` or a newly created tmpdir."""
raise NotImplementedError('Must override in subclasses!')
def get_dockerfile_path(self):
# TODO: will we need figure_out_dockerfile as a separate method?
return util.figure_out_dockerfile(self.path, self.dockerfile_path)
def remove_tmpdir(self):
shutil.rmtree(self.tmpdir)
def get_vcs_info(self):
"""Returns VcsInfo namedtuple or None if not applicable."""
return None
class GitSource(Source):
def __init__(self, provider, uri, dockerfile_path=None, provider_params=None, tmpdir=None):
super(GitSource, self).__init__(provider, uri, dockerfile_path,
provider_params, tmpdir)
self.git_commit = self.provider_params.get('git_commit', None)
self.lg = util.LazyGit(self.uri, self.git_commit, self.source_path)
@property
def commit_id(self):
return self.lg.commit_id
def get(self):
return self.lg.git_path
def get_vcs_info(self):
return VcsInfo(
vcs_type='git',
vcs_url=self.lg.git_url,
vcs_ref=self.lg.commit_id
)
class PathSource(Source):
def __init__(self, provider, uri, dockerfile_path=None, provider_params=None, tmpdir=None):
super(PathSource, self).__init__(provider, uri, dockerfile_path,
provider_params, tmpdir)
# make sure we have canonical URI representation even if we got path without "file://"
if not self.uri.startswith('file://'):
self.uri = 'file://' + self.uri
self.schemeless_path = self.uri[len('file://'):]
os.makedirs(self.source_path)
def get(self):
# work around the weird behaviour of copytree, which requires the top dir
# to *not* exist
for f in os.listdir(self.schemeless_path):
old = os.path.join(self.schemeless_path, f)
new = os.path.join(self.source_path, f)
if os.path.exists(new):
# this is the second invocation of this method; just break the loop
break
else:
if os.path.isdir(old):
shutil.copytree(old, new)
else:
shutil.copy2(old, new)
return self.source_path
def get_source_instance_for(source, tmpdir=None):
validate_source_dict_schema(source)
klass = None
provider = source['provider'].lower()
if provider == 'git':
klass = GitSource
elif provider == 'path':
klass = PathSource
else:
raise ValueError('unknown source provider "{0}"'.format(provider))
# don't modify original source
args = copy.deepcopy(source)
args['tmpdir'] = tmpdir
return klass(**args)
def validate_source_dict_schema(sd):
if not isinstance(sd, dict):
raise ValueError('"source" must be a dict')
for k in ['provider', 'uri']:
if k not in sd:
raise ValueError('"source" must contain "{0}" key'.format(k))
```
#### File: tests/koji/__init__.py
```python
TASK_STATES = {
'FREE': 0,
'OPEN': 1,
'CLOSED': 2,
'CANCELED': 3,
'ASSIGNED': 4,
'FAILED': 5,
}
TASK_STATES.update({value: name for name, value in TASK_STATES.items()})
class ClientSession(object):
def __init__(self, hub):
raise ImportError("No module named koji")
class PathInfo(object):
def __init__(self, topdir=None):
raise ImportError("No module named koji")
```
#### File: tests/plugins/test_check_and_set_rebuild.py
```python
from __future__ import unicode_literals
import pytest
from atomic_reactor.core import DockerTasker
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.plugin import PreBuildPluginsRunner, PluginFailedException
from atomic_reactor.plugins.pre_check_and_set_rebuild import (is_rebuild,
CheckAndSetRebuildPlugin)
from atomic_reactor.util import ImageName
import json
from osbs.api import OSBS
import osbs.conf
from osbs.exceptions import OsbsResponseException
from flexmock import flexmock
from tests.constants import SOURCE, MOCK
if MOCK:
from tests.docker_mock import mock_docker
class X(object):
pass
class TestCheckRebuild(object):
def prepare(self, key, value, set_labels_args=None, set_labels_kwargs=None):
if MOCK:
mock_docker()
tasker = DockerTasker()
workflow = DockerBuildWorkflow(SOURCE, "test-image")
setattr(workflow, 'builder', X())
setattr(workflow.builder, 'image_id', 'asd123')
setattr(workflow.builder, 'base_image', ImageName(repo='Fedora',
tag='22'))
setattr(workflow.builder, 'source', X())
setattr(workflow.builder.source, 'path', '/tmp')
setattr(workflow.builder.source, 'dockerfile_path', None)
expectation = (flexmock(OSBS)
.should_receive('set_labels_on_build_config'))
if set_labels_args is not None:
if set_labels_kwargs is None:
set_labels_kwargs = {}
expectation.with_args(*set_labels_args)
namespace = None
if set_labels_kwargs is not None:
namespace = set_labels_kwargs.get('namespace')
(flexmock(osbs.conf).should_call('Configuration')
.with_args(namespace=namespace, conf_file=None, verify_ssl=True, openshift_url="",
openshift_uri="", use_auth=True))
runner = PreBuildPluginsRunner(tasker, workflow, [
{
'name': CheckAndSetRebuildPlugin.key,
'args': {
'label_key': key,
'label_value': value,
'url': '',
},
}
])
return workflow, runner
def test_check_rebuild_no_build_json(self, monkeypatch):
workflow, runner = self.prepare('is_autorebuild', 'true')
monkeypatch.delenv('BUILD', raising=False)
with pytest.raises(PluginFailedException):
runner.run()
def test_check_no_buildconfig(self, monkeypatch):
key = 'is_autorebuild'
value = 'true'
workflow, runner = self.prepare(key, value)
monkeypatch.setenv("BUILD", json.dumps({
"metadata": {
"labels": {
key: value,
}
}
}))
# No buildconfig in metadata
with pytest.raises(PluginFailedException):
runner.run()
@pytest.mark.parametrize(('namespace'), [None, 'my_namespace'])
def test_check_is_not_rebuild(self, namespace, monkeypatch):
key = 'is_autorebuild'
value = 'true'
buildconfig = "buildconfig1"
namespace_dict = {}
if namespace is not None:
namespace_dict["namespace"] = namespace
workflow, runner = self.prepare(key, value,
set_labels_args=(buildconfig,
{key: value}),
set_labels_kwargs=namespace_dict)
build_json = {
"metadata": {
"labels": {
"buildconfig": buildconfig,
key: "false",
}
}
}
build_json["metadata"].update(namespace_dict)
monkeypatch.setenv("BUILD", json.dumps(build_json))
runner.run()
assert workflow.prebuild_results[CheckAndSetRebuildPlugin.key] == False
assert not is_rebuild(workflow)
def test_check_is_rebuild(self, monkeypatch):
key = 'is_autorebuild'
value = 'true'
workflow, runner = self.prepare(key, value)
monkeypatch.setenv("BUILD", json.dumps({
"metadata": {
"labels": {
"buildconfig": "buildconfig1",
key: value,
}
}
}))
runner.run()
assert workflow.prebuild_results[CheckAndSetRebuildPlugin.key] == True
assert is_rebuild(workflow)
def test_409_response(self, monkeypatch):
key = 'is_autorebuild'
workflow, runner = self.prepare(key, 'true')
(flexmock(OSBS)
.should_receive('set_labels_on_build_config')
.twice()
.and_raise(OsbsResponseException('conflict', 409))
.and_return(None))
monkeypatch.setenv("BUILD", json.dumps({
"metadata": {
"labels": {
"buildconfig": "buildconfig1",
key: 'false',
}
}
}))
runner.run()
```
#### File: tests/plugins/test_koji_promote.py
```python
from __future__ import unicode_literals
import json
import os
try:
import koji
except ImportError:
import inspect
import sys
# Find out mocked koji module
import tests.koji as koji
mock_koji_path = os.path.dirname(inspect.getfile(koji.ClientSession))
if mock_koji_path not in sys.path:
sys.path.append(os.path.dirname(mock_koji_path))
# Now load it properly, the same way the plugin will
del koji
import koji
from atomic_reactor.core import DockerTasker
from atomic_reactor.plugins.exit_koji_promote import (KojiUploadLogger,
KojiPromotePlugin)
from atomic_reactor.plugins.post_rpmqa import PostBuildRPMqaPlugin
from atomic_reactor.plugins.pre_check_and_set_rebuild import CheckAndSetRebuildPlugin
from atomic_reactor.plugins.pre_add_filesystem import AddFilesystemPlugin
from atomic_reactor.plugin import ExitPluginsRunner, PluginFailedException
from atomic_reactor.inner import DockerBuildWorkflow, TagConf, PushConf
from atomic_reactor.util import ImageName
from atomic_reactor.source import GitSource, PathSource
from tests.constants import SOURCE, MOCK
from flexmock import flexmock
import pytest
from tests.docker_mock import mock_docker
import subprocess
from osbs.api import OSBS
from osbs.exceptions import OsbsException
from six import string_types
NAMESPACE = 'mynamespace'
BUILD_ID = 'build-1'
class X(object):
pass
class MockedPodResponse(object):
def get_container_image_ids(self):
return {'buildroot:latest': '0123456'}
class MockedClientSession(object):
TAG_TASK_ID = 1234
DEST_TAG = 'images-candidate'
def __init__(self, hub, task_states=None):
self.uploaded_files = []
self.build_tags = {}
self.task_states = task_states or ['FREE', 'ASSIGNED', 'CLOSED']
self.task_states = list(self.task_states)
self.task_states.reverse()
self.tag_task_state = self.task_states.pop()
def krb_login(self, principal=None, keytab=None, proxyuser=None):
return True
def ssl_login(self, cert, ca, serverca, proxyuser=None):
return True
def logout(self):
pass
def uploadWrapper(self, localfile, path, name=None, callback=None,
blocksize=1048576, overwrite=True):
self.uploaded_files.append(path)
self.blocksize = blocksize
def CGImport(self, metadata, server_dir):
self.metadata = metadata
self.server_dir = server_dir
return {"id": "123"}
def getBuildTarget(self, target):
return {'dest_tag_name': self.DEST_TAG}
def tagBuild(self, tag, build, force=False, fromtag=None):
self.build_tags[build] = tag
return self.TAG_TASK_ID
def getTaskInfo(self, task_id, request=False):
assert task_id == self.TAG_TASK_ID
# For extra code coverage, imagine Koji denies the task ever
# existed.
if self.tag_task_state is None:
return None
return {'state': koji.TASK_STATES[self.tag_task_state]}
def taskFinished(self, task_id):
try:
self.tag_task_state = self.task_states.pop()
except IndexError:
# No more state changes
pass
return self.tag_task_state in ['CLOSED', 'FAILED', 'CANCELED', None]
FAKE_SIGMD5 = b'0' * 32
FAKE_RPM_OUTPUT = (
b'name1;1.0;1;x86_64;0;' + FAKE_SIGMD5 + b';(none);'
b'RSA/SHA256, Mon 29 Jun 2015 13:58:22 BST, Key ID <KEY>'
b'gpg-pubkey;01234567;01234567;(none);(none);(none);(none);(none)\n'
b'gpg-pubkey-doc;01234567;01234567;noarch;(none);' + FAKE_SIGMD5 +
b';(none);(none)\n'
b'name2;2.0;2;x86_64;0;' + FAKE_SIGMD5 + b';' +
b'RSA/SHA256, Mon 29 Jun 2015 13:58:22 BST, Key ID <KEY>;(none)\n'
b'\n')
FAKE_OS_OUTPUT = 'fedora-22'
def fake_subprocess_output(cmd):
if cmd.startswith('/bin/rpm'):
return FAKE_RPM_OUTPUT
elif 'os-release' in cmd:
return FAKE_OS_OUTPUT
else:
raise RuntimeError
class MockedPopen(object):
def __init__(self, cmd, *args, **kwargs):
self.cmd = cmd
def wait(self):
return 0
def communicate(self):
return (fake_subprocess_output(self.cmd), '')
def fake_Popen(cmd, *args, **kwargs):
return MockedPopen(cmd, *args, **kwargs)
def fake_digest(image):
tag = image.to_str(registry=False)
return 'sha256:{0:032x}'.format(len(tag))
def is_string_type(obj):
return any(isinstance(obj, strtype)
for strtype in string_types)
def mock_environment(tmpdir, session=None, name=None,
component=None, version=None, release=None,
source=None, build_process_failed=False,
is_rebuild=True, pulp_registries=0, blocksize=None,
task_states=None):
if session is None:
session = MockedClientSession('', task_states=None)
if source is None:
source = GitSource('git', 'git://hostname/path')
if MOCK:
mock_docker()
tasker = DockerTasker()
workflow = DockerBuildWorkflow(SOURCE, "test-image")
base_image_id = '123456parent-id'
setattr(workflow, '_base_image_inspect', {'Id': base_image_id})
setattr(workflow, 'builder', X())
setattr(workflow.builder, 'image_id', '123456imageid')
setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22'))
setattr(workflow.builder, 'source', X())
setattr(workflow.builder, 'built_image_info', {'ParentId': base_image_id})
setattr(workflow.builder.source, 'dockerfile_path', None)
setattr(workflow.builder.source, 'path', None)
setattr(workflow, 'tag_conf', TagConf())
with open(os.path.join(str(tmpdir), 'Dockerfile'), 'wt') as df:
df.write('FROM base\n'
'LABEL BZComponent={component} com.redhat.component={component}\n'
'LABEL Version={version} version={version}\n'
'LABEL Release={release} release={release}\n'
.format(component=component, version=version, release=release))
setattr(workflow.builder, 'df_path', df.name)
if name and version:
workflow.tag_conf.add_unique_image('user/test-image:{v}-timestamp'
.format(v=version))
if name and version and release:
workflow.tag_conf.add_primary_images(["{0}:{1}-{2}".format(name,
version,
release),
"{0}:{1}".format(name, version),
"{0}:latest".format(name)])
flexmock(subprocess, Popen=fake_Popen)
flexmock(koji, ClientSession=lambda hub: session)
flexmock(GitSource)
(flexmock(OSBS)
.should_receive('get_build_logs')
.with_args(BUILD_ID)
.and_return('build logs'))
(flexmock(OSBS)
.should_receive('get_pod_for_build')
.with_args(BUILD_ID)
.and_return(MockedPodResponse()))
setattr(workflow, 'source', source)
setattr(workflow.source, 'lg', X())
setattr(workflow.source.lg, 'commit_id', '123456')
setattr(workflow, 'build_logs', ['docker build log\n'])
setattr(workflow, 'push_conf', PushConf())
docker_reg = workflow.push_conf.add_docker_registry('docker.example.com')
for image in workflow.tag_conf.images:
tag = image.to_str(registry=False)
docker_reg.digests[tag] = fake_digest(image)
for pulp_registry in range(pulp_registries):
workflow.push_conf.add_pulp_registry('env', 'pulp.example.com')
with open(os.path.join(str(tmpdir), 'image.tar.xz'), 'wt') as fp:
fp.write('x' * 2**12)
setattr(workflow, 'exported_image_sequence', [{'path': fp.name}])
setattr(workflow, 'build_failed', build_process_failed)
workflow.prebuild_results[CheckAndSetRebuildPlugin.key] = is_rebuild
workflow.postbuild_results[PostBuildRPMqaPlugin.key] = [
"name1,1.0,1,x86_64,0,2000," + FAKE_SIGMD5.decode() + ",23000",
"name2,2.0,1,x86_64,0,3000," + FAKE_SIGMD5.decode() + ",24000",
]
return tasker, workflow
@pytest.fixture
def os_env(monkeypatch):
monkeypatch.setenv('BUILD', json.dumps({
"metadata": {
"creationTimestamp": "2015-07-27T09:24:00Z",
"namespace": NAMESPACE,
"name": BUILD_ID,
}
}))
monkeypatch.setenv('OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE', 'buildroot:latest')
def create_runner(tasker, workflow, ssl_certs=False, principal=None,
keytab=None, metadata_only=False, blocksize=None,
target=None):
args = {
'kojihub': '',
'url': '/',
}
if ssl_certs:
args['koji_ssl_certs'] = '/'
if principal:
args['koji_principal'] = principal
if keytab:
args['koji_keytab'] = keytab
if metadata_only:
args['metadata_only'] = True
if blocksize:
args['blocksize'] = blocksize
if target:
args['target'] = target
args['poll_interval'] = 0
runner = ExitPluginsRunner(tasker, workflow,
[
{
'name': KojiPromotePlugin.key,
'args': args,
},
])
return runner
class TestKojiUploadLogger(object):
@pytest.mark.parametrize('totalsize', [0, 1024])
def test_with_zero(self, totalsize):
logger = flexmock()
logger.should_receive('debug').once()
upload_logger = KojiUploadLogger(logger)
upload_logger.callback(0, totalsize, 0, 0, 0)
@pytest.mark.parametrize(('totalsize', 'step', 'expected_times'), [
(10, 1, 11),
(12, 1, 7),
(12, 3, 5),
])
def test_with_defaults(self, totalsize, step, expected_times):
logger = flexmock()
logger.should_receive('debug').times(expected_times)
upload_logger = KojiUploadLogger(logger)
upload_logger.callback(0, totalsize, 0, 0, 0)
for offset in range(step, totalsize + step, step):
upload_logger.callback(offset, totalsize, step, 1.0, 1.0)
@pytest.mark.parametrize(('totalsize', 'step', 'notable', 'expected_times'), [
(10, 1, 10, 11),
(10, 1, 20, 6),
(10, 1, 25, 5),
(12, 3, 25, 5),
])
def test_with_notable(self, totalsize, step, notable, expected_times):
logger = flexmock()
logger.should_receive('debug').times(expected_times)
upload_logger = KojiUploadLogger(logger, notable_percent=notable)
for offset in range(0, totalsize + step, step):
upload_logger.callback(offset, totalsize, step, 1.0, 1.0)
class TestKojiPromote(object):
def test_koji_promote_failed_build(self, tmpdir, os_env):
session = MockedClientSession('')
tasker, workflow = mock_environment(tmpdir,
session=session,
build_process_failed=True,
name='ns/name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow)
runner.run()
# Must not have promoted this build
assert not hasattr(session, 'metadata')
def test_koji_promote_no_tagconf(self, tmpdir, os_env):
tasker, workflow = mock_environment(tmpdir)
runner = create_runner(tasker, workflow)
with pytest.raises(PluginFailedException):
runner.run()
def test_koji_promote_no_build_env(self, tmpdir, monkeypatch, os_env):
tasker, workflow = mock_environment(tmpdir,
name='ns/name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow)
# No BUILD environment variable
monkeypatch.delenv("BUILD", raising=False)
with pytest.raises(PluginFailedException) as exc:
runner.run()
assert "plugin 'koji_promote' raised an exception: KeyError" in str(exc)
def test_koji_promote_no_build_metadata(self, tmpdir, monkeypatch, os_env):
tasker, workflow = mock_environment(tmpdir,
name='ns/name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow)
# No BUILD metadata
monkeypatch.setenv("BUILD", json.dumps({}))
with pytest.raises(PluginFailedException):
runner.run()
def test_koji_promote_wrong_source_type(self, tmpdir, os_env):
source = PathSource('path', 'file:///dev/null')
tasker, workflow = mock_environment(tmpdir,
name='ns/name',
version='1.0',
release='1',
source=source)
runner = create_runner(tasker, workflow)
with pytest.raises(PluginFailedException) as exc:
runner.run()
assert "plugin 'koji_promote' raised an exception: RuntimeError" in str(exc)
def test_koji_promote_log_task_id(self, tmpdir, monkeypatch, os_env,
caplog):
session = MockedClientSession('')
tasker, workflow = mock_environment(tmpdir,
session=session,
name='ns/name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow)
koji_task_id = '12345'
monkeypatch.setenv("BUILD", json.dumps({
'metadata': {
'creationTimestamp': '2015-07-27T09:24:00Z',
'namespace': NAMESPACE,
'name': BUILD_ID,
'labels': {
'koji-task-id': koji_task_id,
},
}
}))
runner.run()
assert "Koji Task ID {}".format(koji_task_id) in caplog.text()
metadata = session.metadata
assert 'build' in metadata
build = metadata['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
assert 'container_koji_task_id' in extra
extra_koji_task_id = extra['container_koji_task_id']
assert is_string_type(extra_koji_task_id)
assert extra_koji_task_id == koji_task_id
@pytest.mark.parametrize('params', [
{
'should_raise': False,
'principal': None,
'keytab': None,
},
{
'should_raise': False,
'principal': '<EMAIL>',
'keytab': 'FILE:/var/run/secrets/mysecret',
},
{
'should_raise': True,
'principal': '<EMAIL>',
'keytab': None,
},
{
'should_raise': True,
'principal': None,
'keytab': 'FILE:/var/run/secrets/mysecret',
},
])
def test_koji_promote_krb_args(self, tmpdir, params, os_env):
session = MockedClientSession('')
expectation = flexmock(session).should_receive('krb_login').and_return(True)
name = 'name'
version = '1.0'
release = '1'
tasker, workflow = mock_environment(tmpdir,
session=session,
name=name,
version=version,
release=release)
runner = create_runner(tasker, workflow,
principal=params['principal'],
keytab=params['keytab'])
if params['should_raise']:
expectation.never()
with pytest.raises(PluginFailedException):
runner.run()
else:
expectation.once()
runner.run()
def test_koji_promote_krb_fail(self, tmpdir, os_env):
session = MockedClientSession('')
(flexmock(session)
.should_receive('krb_login')
.and_raise(RuntimeError)
.once())
tasker, workflow = mock_environment(tmpdir,
session=session,
name='ns/name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow)
with pytest.raises(PluginFailedException):
runner.run()
def test_koji_promote_ssl_fail(self, tmpdir, os_env):
session = MockedClientSession('')
(flexmock(session)
.should_receive('ssl_login')
.and_raise(RuntimeError)
.once())
tasker, workflow = mock_environment(tmpdir,
session=session,
name='ns/name',
version='1.0',
release='1')
runner = create_runner(tasker, workflow, ssl_certs=True)
with pytest.raises(PluginFailedException):
runner.run()
@pytest.mark.parametrize('fail_method', [
'get_build_logs',
'get_pod_for_build',
])
def test_koji_promote_osbs_fail(self, tmpdir, os_env, fail_method):
tasker, workflow = mock_environment(tmpdir,
name='name',
version='1.0',
release='1')
(flexmock(OSBS)
.should_receive(fail_method)
.and_raise(OsbsException))
runner = create_runner(tasker, workflow)
runner.run()
@staticmethod
def check_components(components):
assert isinstance(components, list)
assert len(components) > 0
for component_rpm in components:
assert isinstance(component_rpm, dict)
assert set(component_rpm.keys()) == set([
'type',
'name',
'version',
'release',
'epoch',
'arch',
'sigmd5',
'signature',
])
assert component_rpm['type'] == 'rpm'
assert component_rpm['name']
assert is_string_type(component_rpm['name'])
assert component_rpm['name'] != 'gpg-pubkey'
assert component_rpm['version']
assert is_string_type(component_rpm['version'])
assert component_rpm['release']
epoch = component_rpm['epoch']
assert epoch is None or isinstance(epoch, int)
assert is_string_type(component_rpm['arch'])
assert component_rpm['signature'] != '(none)'
def validate_buildroot(self, buildroot):
assert isinstance(buildroot, dict)
assert set(buildroot.keys()) == set([
'id',
'host',
'content_generator',
'container',
'tools',
'components',
'extra',
])
host = buildroot['host']
assert isinstance(host, dict)
assert set(host.keys()) == set([
'os',
'arch',
])
assert host['os']
assert is_string_type(host['os'])
assert host['arch']
assert is_string_type(host['arch'])
assert host['arch'] != 'amd64'
content_generator = buildroot['content_generator']
assert isinstance(content_generator, dict)
assert set(content_generator.keys()) == set([
'name',
'version',
])
assert content_generator['name']
assert is_string_type(content_generator['name'])
assert content_generator['version']
assert is_string_type(content_generator['version'])
container = buildroot['container']
assert isinstance(container, dict)
assert set(container.keys()) == set([
'type',
'arch',
])
assert container['type'] == 'docker'
assert container['arch']
assert is_string_type(container['arch'])
assert isinstance(buildroot['tools'], list)
assert len(buildroot['tools']) > 0
for tool in buildroot['tools']:
assert isinstance(tool, dict)
assert set(tool.keys()) == set([
'name',
'version',
])
assert tool['name']
assert is_string_type(tool['name'])
assert tool['version']
assert is_string_type(tool['version'])
self.check_components(buildroot['components'])
extra = buildroot['extra']
assert isinstance(extra, dict)
assert set(extra.keys()) == set([
'osbs',
])
assert 'osbs' in extra
osbs = extra['osbs']
assert isinstance(osbs, dict)
assert set(osbs.keys()) == set([
'build_id',
'builder_image_id',
])
assert is_string_type(osbs['build_id'])
assert is_string_type(osbs['builder_image_id'])
def validate_output(self, output, metadata_only):
if metadata_only:
mdonly = set()
else:
mdonly = set(['metadata_only'])
assert isinstance(output, dict)
assert 'type' in output
assert 'buildroot_id' in output
assert 'filename' in output
assert output['filename']
assert is_string_type(output['filename'])
assert 'filesize' in output
assert int(output['filesize']) > 0 or metadata_only
assert 'arch' in output
assert output['arch']
assert is_string_type(output['arch'])
assert 'checksum' in output
assert output['checksum']
assert is_string_type(output['checksum'])
assert 'checksum_type' in output
assert output['checksum_type'] == 'md5'
assert is_string_type(output['checksum_type'])
assert 'type' in output
if output['type'] == 'log':
assert set(output.keys()) == set([
'buildroot_id',
'filename',
'filesize',
'arch',
'checksum',
'checksum_type',
'type',
'metadata_only', # only when True
]) - mdonly
assert output['arch'] == 'noarch'
else:
assert set(output.keys()) == set([
'buildroot_id',
'filename',
'filesize',
'arch',
'checksum',
'checksum_type',
'type',
'components',
'extra',
'metadata_only', # only when True
]) - mdonly
assert output['type'] == 'docker-image'
assert is_string_type(output['arch'])
assert output['arch'] != 'noarch'
assert output['arch'] in output['filename']
self.check_components(output['components'])
extra = output['extra']
assert isinstance(extra, dict)
assert set(extra.keys()) == set([
'image',
'docker',
])
image = extra['image']
assert isinstance(image, dict)
assert set(image.keys()) == set([
'arch',
])
assert image['arch'] == output['arch'] # what else?
assert 'docker' in extra
docker = extra['docker']
assert isinstance(docker, dict)
assert set(docker.keys()) == set([
'parent_id',
'id',
'repositories',
])
assert is_string_type(docker['parent_id'])
assert is_string_type(docker['id'])
repositories = docker['repositories']
assert isinstance(repositories, list)
repositories_digest = list(filter(lambda repo: '@sha256' in repo, repositories))
repositories_tag = list(filter(lambda repo: '@sha256' not in repo, repositories))
assert len(repositories_tag) == len(repositories_digest)
# check for duplicates
assert sorted(repositories_tag) == sorted(set(repositories_tag))
assert sorted(repositories_digest) == sorted(set(repositories_digest))
for repository in repositories_tag:
assert is_string_type(repository)
image = ImageName.parse(repository)
assert image.registry
assert image.namespace
assert image.repo
assert image.tag and image.tag != 'latest'
digest_pullspec = image.to_str(tag=False) + '@' + fake_digest(image)
assert digest_pullspec in repositories_digest
def test_koji_promote_import_fail(self, tmpdir, os_env, caplog):
session = MockedClientSession('')
(flexmock(session)
.should_receive('CGImport')
.and_raise(RuntimeError))
name = 'ns/name'
version = '1.0'
release = '1'
target = 'images-docker-candidate'
tasker, workflow = mock_environment(tmpdir,
name=name,
version=version,
release=release,
session=session)
runner = create_runner(tasker, workflow, target=target)
with pytest.raises(PluginFailedException):
runner.run()
assert 'metadata:' in caplog.text()
@pytest.mark.parametrize('task_states', [
['FREE', 'ASSIGNED', 'FAILED'],
['CANCELED'],
[None],
])
def test_koji_promote_tag_fail(self, tmpdir, task_states, os_env):
session = MockedClientSession('', task_states=task_states)
name = 'ns/name'
version = '1.0'
release = '1'
target = 'images-docker-candidate'
tasker, workflow = mock_environment(tmpdir,
name=name,
version=version,
release=release,
session=session)
runner = create_runner(tasker, workflow, target=target)
with pytest.raises(PluginFailedException):
runner.run()
def test_koji_promote_filesystem_koji_task_id(self, tmpdir, os_env):
session = MockedClientSession('')
tasker, workflow = mock_environment(tmpdir,
name='ns/name',
version='1.0',
release='1',
session=session)
task_id = 1234
workflow.prebuild_results[AddFilesystemPlugin.key] = {
'base-image-id': 'abcd',
'filesystem-koji-task-id': task_id,
}
runner = create_runner(tasker, workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
assert 'filesystem_koji_task_id' in extra
filesystem_koji_task_id = extra['filesystem_koji_task_id']
assert is_string_type(filesystem_koji_task_id)
assert filesystem_koji_task_id == str(task_id)
def test_koji_promote_filesystem_koji_task_id_missing(self, tmpdir, os_env,
caplog):
session = MockedClientSession('')
tasker, workflow = mock_environment(tmpdir,
name='ns/name',
version='1.0',
release='1',
session=session)
task_id = 1234
workflow.prebuild_results[AddFilesystemPlugin.key] = {
'base-image-id': 'abcd',
}
runner = create_runner(tasker, workflow)
runner.run()
data = session.metadata
assert 'build' in data
build = data['build']
assert isinstance(build, dict)
assert 'extra' in build
extra = build['extra']
assert isinstance(extra, dict)
assert 'filesystem_koji_task_id' not in extra
assert AddFilesystemPlugin.key in caplog.text()
@pytest.mark.parametrize(('apis',
'pulp_registries',
'metadata_only',
'blocksize',
'target'), [
('v1-only',
1,
False,
None,
'images-docker-candidate'),
('v1+v2',
2,
False,
10485760,
None),
('v2-only',
1,
True,
None,
None),
])
def test_koji_promote_success(self, tmpdir, apis, pulp_registries,
metadata_only, blocksize, target, os_env):
session = MockedClientSession('')
component = 'component'
name = 'ns/name'
version = '1.0'
release = '1'
tasker, workflow = mock_environment(tmpdir,
session=session,
name=name,
component=component,
version=version,
release=release,
pulp_registries=pulp_registries,
blocksize=blocksize)
runner = create_runner(tasker, workflow, metadata_only=metadata_only,
blocksize=blocksize, target=target)
runner.run()
data = session.metadata
if metadata_only:
mdonly = set()
else:
mdonly = set(['metadata_only'])
output_filename = 'koji_promote-{0}.json'.format(apis)
with open(output_filename, 'w') as out:
json.dump(data, out, sort_keys=True, indent=4)
assert set(data.keys()) == set([
'metadata_version',
'build',
'buildroots',
'output',
])
assert data['metadata_version'] in ['0', 0]
build = data['build']
assert isinstance(build, dict)
buildroots = data['buildroots']
assert isinstance(buildroots, list)
assert len(buildroots) > 0
output_files = data['output']
assert isinstance(output_files, list)
assert set(build.keys()) == set([
'name',
'version',
'release',
'source',
'start_time',
'end_time',
'extra', # optional but always supplied
'metadata_only', # only when True
]) - mdonly
assert build['name'] == component
assert build['version'] == version
assert build['release'] == release
assert build['source'] == 'git://hostname/path#123456'
start_time = build['start_time']
assert isinstance(start_time, int) and start_time
end_time = build['end_time']
assert isinstance(end_time, int) and end_time
if metadata_only:
assert isinstance(build['metadata_only'], bool)
assert build['metadata_only']
extra = build['extra']
assert isinstance(extra, dict)
for buildroot in buildroots:
self.validate_buildroot(buildroot)
# Unique within buildroots in this metadata
assert len([b for b in buildroots
if b['id'] == buildroot['id']]) == 1
for output in output_files:
self.validate_output(output, metadata_only)
buildroot_id = output['buildroot_id']
# References one of the buildroots
assert len([buildroot for buildroot in buildroots
if buildroot['id'] == buildroot_id]) == 1
if metadata_only:
assert isinstance(output['metadata_only'], bool)
assert output['metadata_only']
files = session.uploaded_files
# There should be a file in the list for each output
# except for metadata-only imports, in which case there
# will be no upload for the image itself
assert isinstance(files, list)
expected_uploads = len(output_files)
if metadata_only:
expected_uploads -= 1
assert len(files) == expected_uploads
# The correct blocksize argument should have been used
if blocksize is not None:
assert blocksize == session.blocksize
build_id = runner.plugins_results[KojiPromotePlugin.key]
assert build_id == "123"
if target is not None:
assert session.build_tags[build_id] == session.DEST_TAG
assert session.tag_task_state == 'CLOSED'
def test_koji_promote_without_build_info(self, tmpdir, os_env):
class LegacyCGImport(MockedClientSession):
def CGImport(self, *args, **kwargs):
super(LegacyCGImport, self).CGImport(*args, **kwargs)
return
session = LegacyCGImport('')
name = 'ns/name'
version = '1.0'
release = '1'
tasker, workflow = mock_environment(tmpdir,
session=session,
name=name,
version=version,
release=release)
runner = create_runner(tasker, workflow)
runner.run()
assert runner.plugins_results[KojiPromotePlugin.key] is None
```
#### File: tests/plugins/test_stop_autorebuild_if_disabled.py
```python
try:
import configparser
except ImportError:
import ConfigParser as configparser
from contextlib import contextmanager
from flexmock import flexmock
import os
import pytest
from atomic_reactor.core import DockerTasker
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.plugin import PreBuildPluginsRunner, AutoRebuildCanceledException
from atomic_reactor.plugins.pre_check_and_set_rebuild import CheckAndSetRebuildPlugin
from atomic_reactor.plugins.pre_stop_autorebuild_if_disabled import StopAutorebuildIfDisabledPlugin
from atomic_reactor.util import ImageName
from tests.constants import INPUT_IMAGE, MOCK, MOCK_SOURCE
if MOCK:
from tests.docker_mock import mock_docker
@contextmanager
def mocked_configparser_getboolean(ret):
# configparser.SafeConfigParser.getboolean can't be mocked in Py3.4 due to
# https://github.com/has207/flexmock/pull/100
def getboolean(self, a, b):
if isinstance(ret, bool):
return ret
else:
raise ret
old_gb = configparser.SafeConfigParser.getboolean
configparser.SafeConfigParser.getboolean = getboolean
yield
configparser.SafeConfigParser.getboolean = old_gb
class Y(object):
path = ''
dockerfile_path = ''
class X(object):
image_id = INPUT_IMAGE
source = Y()
base_image = ImageName.parse('asd')
class TestStopAutorebuildIfDisabledPlugin(object):
prebuild_plugins = [{
'name': StopAutorebuildIfDisabledPlugin.key,
'args': {
'config_file': '.osbs-repo-config'
}
}]
def assert_message_logged(self, msg, cplog):
assert any([msg in l.getMessage() for l in cplog.records()])
def setup_method(self, method):
if MOCK:
mock_docker()
self.tasker = DockerTasker()
self.workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
self.workflow.builder = X()
def get():
return 'path'
self.workflow.source.get = get
self.workflow.prebuild_results[CheckAndSetRebuildPlugin.key] = True
self.runner = PreBuildPluginsRunner(self.tasker, self.workflow, self.prebuild_plugins)
def test_disabled_in_config(self, caplog):
if MOCK:
mock_docker()
flexmock(os.path).should_receive('exists').with_args('path/.osbs-repo-config').\
and_return(True)
flexmock(configparser.SafeConfigParser).should_receive('read').and_return(None)
# flexmock(configparser.SafeConfigParser).should_receive('getboolean').\
# with_args('autorebuild', 'enabled').and_return(False)
with mocked_configparser_getboolean(False):
with pytest.raises(AutoRebuildCanceledException):
self.runner.run()
self.assert_message_logged('autorebuild is disabled in .osbs-repo-config', caplog)
def test_enabled_in_config(self, caplog):
if MOCK:
mock_docker()
flexmock(os.path).should_receive('exists').with_args('path/.osbs-repo-config').\
and_return(True)
flexmock(configparser.SafeConfigParser).should_receive('read').and_return(None)
# flexmock(configparser.SafeConfigParser).should_receive('getboolean').\
# with_args('autorebuild', 'enabled').and_return(True)
# assert this doesn't raise
with mocked_configparser_getboolean(True):
self.runner.run()
self.assert_message_logged('autorebuild is enabled in .osbs-repo-config', caplog)
def test_malformed_config(self, caplog):
if MOCK:
mock_docker()
flexmock(os.path).should_receive('exists').with_args('path/.osbs-repo-config').\
and_return(True)
flexmock(configparser.SafeConfigParser).should_receive('read').and_return(None)
# flexmock(configparser.SafeConfigParser).should_receive('getboolean').\
# with_args('autorebuild', 'enabled').and_raise(configparser.Error)
# assert this doesn't raise
with mocked_configparser_getboolean(configparser.Error):
self.runner.run()
self.assert_message_logged(
'can\'t parse ".osbs-repo-config", assuming autorebuild is enabled',
caplog)
def test_no_config(self, caplog):
if MOCK:
mock_docker()
# assert this doesn't raise
self.runner.run()
self.assert_message_logged('no ".osbs-repo-config", assuming autorebuild is enabled',
caplog)
```
#### File: atomic-reactor/tests/test_cli.py
```python
from __future__ import print_function, unicode_literals
import logging
import os
import sys
import pytest
from atomic_reactor.buildimage import BuildImageBuilder
from atomic_reactor.core import DockerTasker
import atomic_reactor.cli.main
from tests.fixtures import is_registry_running, temp_image_name, get_uuid
from tests.constants import LOCALHOST_REGISTRY, DOCKERFILE_GIT, DOCKERFILE_OK_PATH, FILES, MOCK
if MOCK:
from tests.docker_mock import mock_docker
PRIV_BUILD_IMAGE = None
DH_BUILD_IMAGE = None
logger = logging.getLogger('atomic_reactor.tests')
if MOCK:
mock_docker()
dt = DockerTasker()
reactor_root = os.path.dirname(os.path.dirname(__file__))
with_all_sources = pytest.mark.parametrize('source_provider, uri', [
('git', DOCKERFILE_GIT),
('path', DOCKERFILE_OK_PATH),
])
# TEST-SUITE SETUP
def setup_module(module):
global PRIV_BUILD_IMAGE, DH_BUILD_IMAGE
PRIV_BUILD_IMAGE = get_uuid()
DH_BUILD_IMAGE = get_uuid()
if MOCK:
return
b = BuildImageBuilder(reactor_local_path=reactor_root)
b.create_image(os.path.join(reactor_root, 'images', 'privileged-builder'),
PRIV_BUILD_IMAGE, use_cache=True)
b2 = BuildImageBuilder(reactor_local_path=reactor_root)
b2.create_image(os.path.join(reactor_root, 'images', 'dockerhost-builder'),
DH_BUILD_IMAGE, use_cache=True)
def teardown_module(module):
if MOCK:
return
dt.remove_image(PRIV_BUILD_IMAGE, force=True)
dt.remove_image(DH_BUILD_IMAGE, force=True)
# TESTS
class TestCLISuite(object):
def exec_cli(self, command):
saved_args = sys.argv
sys.argv = command
atomic_reactor.cli.main.run()
sys.argv = saved_args
@with_all_sources
def test_simple_privileged_build(self, is_registry_running, temp_image_name,
source_provider, uri):
if MOCK:
mock_docker()
temp_image = temp_image_name
command = [
"main.py",
"--verbose",
"build",
source_provider,
"--method", "privileged",
"--build-image", PRIV_BUILD_IMAGE,
"--image", temp_image.to_str(),
"--uri", uri,
]
if is_registry_running:
logger.info("registry is running")
command += ["--source-registry", LOCALHOST_REGISTRY]
else:
logger.info("registry is NOT running")
with pytest.raises(SystemExit) as excinfo:
self.exec_cli(command)
assert excinfo.value.code == 0
@with_all_sources
def test_simple_dh_build(self, is_registry_running, temp_image_name, source_provider, uri):
if MOCK:
mock_docker()
temp_image = temp_image_name
command = [
"main.py",
"--verbose",
"build",
source_provider,
"--method", "hostdocker",
"--build-image", DH_BUILD_IMAGE,
"--image", temp_image.to_str(),
"--uri", uri,
]
if is_registry_running:
logger.info("registry is running")
command += ["--source-registry", LOCALHOST_REGISTRY]
else:
logger.info("registry is NOT running")
with pytest.raises(SystemExit) as excinfo:
self.exec_cli(command)
assert excinfo.value.code == 0
dt.remove_image(temp_image, noprune=True)
def test_building_from_json_source_provider(self, is_registry_running, temp_image_name):
if MOCK:
mock_docker()
temp_image = temp_image_name
command = [
"main.py",
"--verbose",
"build",
"json",
"--method", "hostdocker",
"--build-image", DH_BUILD_IMAGE,
os.path.join(FILES, 'example-build.json'),
"--substitute", "image={0}".format(temp_image),
"source.uri={0}".format(DOCKERFILE_OK_PATH)
]
if is_registry_running:
logger.info("registry is running")
command += ["--source-registry", LOCALHOST_REGISTRY]
else:
logger.info("registry is NOT running")
with pytest.raises(SystemExit) as excinfo:
self.exec_cli(command)
assert excinfo.value.code == 0
dt.remove_image(temp_image, noprune=True)
def test_create_build_image(self, temp_image_name):
if MOCK:
mock_docker()
temp_image = temp_image_name
priv_builder_path = os.path.join(reactor_root, 'images', 'privileged-builder')
command = [
"main.py",
"--verbose",
"create-build-image",
"--reactor-local-path", reactor_root,
priv_builder_path,
temp_image.to_str(),
]
with pytest.raises(SystemExit) as excinfo:
self.exec_cli(command)
assert excinfo.value.code == 0
dt.remove_image(temp_image, noprune=True)
```
#### File: atomic-reactor/tests/test_util.py
```python
from __future__ import unicode_literals
import os
import tempfile
import pytest
import six
try:
from collections import OrderedDict
except ImportError:
# Python 2.6
from ordereddict import OrderedDict
import docker
from atomic_reactor.util import (ImageName, wait_for_command, clone_git_repo,
LazyGit, figure_out_dockerfile,
render_yum_repo, process_substitutions,
get_checksums, print_version_of_tools,
get_version_of_tools, get_preferred_label_key,
human_size, CommandResult)
from tests.constants import DOCKERFILE_GIT, INPUT_IMAGE, MOCK, DOCKERFILE_SHA1
from tests.util import requires_internet
if MOCK:
from tests.docker_mock import mock_docker
TEST_DATA = {
"repository.com/image-name": ImageName(registry="repository.com", repo="image-name"),
"repository.com/prefix/image-name:1": ImageName(registry="repository.com",
namespace="prefix",
repo="image-name", tag="1"),
"repository.com/prefix/image-name": ImageName(registry="repository.com",
namespace="prefix",
repo="image-name"),
"image-name": ImageName(repo="image-name"),
"registry:5000/image-name:latest": ImageName(registry="registry:5000",
repo="image-name", tag="latest"),
"registry:5000/image-name": ImageName(registry="registry:5000", repo="image-name"),
"fedora:20": ImageName(repo="fedora", tag="20"),
"prefix/image-name:1": ImageName(namespace="prefix", repo="image-name", tag="1"),
"library/fedora:20": ImageName(namespace="library", repo="fedora", tag="20"),
}
def test_image_name_parse():
for inp, parsed in TEST_DATA.items():
assert ImageName.parse(inp) == parsed
def test_image_name_format():
for expected, image_name in TEST_DATA.items():
assert image_name.to_str() == expected
def test_image_name_comparison():
# make sure that both "==" and "!=" are implemented right on both Python major releases
i1 = ImageName(registry='foo.com', namespace='spam', repo='bar', tag='1')
i2 = ImageName(registry='foo.com', namespace='spam', repo='bar', tag='1')
assert i1 == i2
assert not i1 != i2
i2 = ImageName(registry='foo.com', namespace='spam', repo='bar', tag='2')
assert not i1 == i2
assert i1 != i2
def test_wait_for_command():
if MOCK:
mock_docker()
d = docker.Client()
logs_gen = d.pull(INPUT_IMAGE, stream=True)
assert wait_for_command(logs_gen) is not None
@requires_internet
def test_clone_git_repo(tmpdir):
tmpdir_path = str(tmpdir.realpath())
commit_id = clone_git_repo(DOCKERFILE_GIT, tmpdir_path)
assert commit_id is not None
assert len(commit_id) == 40 # current git hashes are this long
assert os.path.isdir(os.path.join(tmpdir_path, '.git'))
class TestCommandResult(object):
@pytest.mark.parametrize(('item', 'expected'), [
(b'{"stream":"Step 0 : FROM ebbc51b7dfa5bcd993a[...]\\n"}\n',
"Step 0 : FROM ebbc51b7dfa5bcd993a[...]"),
(b'this is not valid JSON\n',
'this is not valid JSON'),
])
def test_parse_item(self, item, expected):
cr = CommandResult()
cr.parse_item(item)
assert cr.logs == [expected]
@requires_internet
def test_clone_git_repo_by_sha1(tmpdir):
tmpdir_path = str(tmpdir.realpath())
commit_id = clone_git_repo(DOCKERFILE_GIT, tmpdir_path, commit=DOCKERFILE_SHA1)
assert commit_id is not None
print(six.text_type(commit_id))
print(commit_id)
assert six.text_type(commit_id, encoding="ascii") == six.text_type(DOCKERFILE_SHA1)
assert len(commit_id) == 40 # current git hashes are this long
assert os.path.isdir(os.path.join(tmpdir_path, '.git'))
@requires_internet
def test_figure_out_dockerfile(tmpdir):
tmpdir_path = str(tmpdir.realpath())
clone_git_repo(DOCKERFILE_GIT, tmpdir_path)
path, dir = figure_out_dockerfile(tmpdir_path)
assert os.path.isfile(path)
assert os.path.isdir(dir)
@requires_internet
def test_lazy_git():
lazy_git = LazyGit(git_url=DOCKERFILE_GIT)
with lazy_git:
assert lazy_git.git_path is not None
assert lazy_git.commit_id is not None
assert len(lazy_git.commit_id) == 40 # current git hashes are this long
@requires_internet
def test_lazy_git_with_tmpdir(tmpdir):
t = str(tmpdir.realpath())
lazy_git = LazyGit(git_url=DOCKERFILE_GIT, tmpdir=t)
assert lazy_git._tmpdir == t
assert lazy_git.git_path is not None
assert lazy_git.commit_id is not None
assert len(lazy_git.commit_id) == 40 # current git hashes are this long
def test_render_yum_repo_unicode():
yum_repo = OrderedDict((
("name", "asd"),
("baseurl", "http://example.com/$basearch/test.repo"),
("enabled", "1"),
("gpgcheck", "0"),
))
rendered_repo = render_yum_repo(yum_repo)
assert rendered_repo == """\
[asd]
name=asd
baseurl=http://example.com/\$basearch/test.repo
enabled=1
gpgcheck=0
"""
@pytest.mark.parametrize('dct, subst, expected', [
({'foo': 'bar'}, ['foo=spam'], {'foo': 'spam'}),
({'foo': 'bar'}, ['baz=spam'], {'foo': 'bar', 'baz': 'spam'}),
({'foo': 'bar'}, ['foo.bar=spam'], {'foo': {'bar': 'spam'}}),
({'foo': 'bar'}, ['spam.spam=spam'], {'foo': 'bar', 'spam': {'spam': 'spam'}}),
({'x_plugins': [{'name': 'a', 'args': {'b': 'c'}}]}, {'x_plugins.a.b': 'd'},
{'x_plugins': [{'name': 'a', 'args': {'b': 'd'}}]}),
# substituting plugins doesn't add new params
({'x_plugins': [{'name': 'a', 'args': {'b': 'c'}}]}, {'x_plugins.a.c': 'd'},
{'x_plugins': [{'name': 'a', 'args': {'b': 'c'}}]}),
({'x_plugins': [{'name': 'a', 'args': {'b': 'c'}}]}, {'x_plugins.X': 'd'},
ValueError()),
])
def test_process_substitutions(dct, subst, expected):
if isinstance(expected, Exception):
with pytest.raises(type(expected)):
process_substitutions(dct, subst)
else:
process_substitutions(dct, subst)
assert dct == expected
@pytest.mark.parametrize('content, algorithms, expected', [
(b'abc', ['md5', 'sha256'],
{'md5sum': '900150983cd24fb0d6963f7d28e17f72',
'sha256sum': 'ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad'}),
(b'abc', ['md5'], {'md5sum': '900150983cd24fb0d6963f7d28e17f72'}),
(b'abc', [], {})
])
def test_get_hexdigests(tmpdir, content, algorithms, expected):
with tempfile.NamedTemporaryFile(dir=str(tmpdir)) as tmpfile:
tmpfile.write(content)
tmpfile.flush()
checksums = get_checksums(tmpfile.name, algorithms)
assert checksums == expected
def test_get_versions_of_tools():
response = get_version_of_tools()
assert isinstance(response, list)
for t in response:
assert t["name"]
assert t["version"]
def test_print_versions_of_tools():
print_version_of_tools()
@pytest.mark.parametrize('labels, name, expected', [
({'name': 'foo', 'Name': 'foo'}, 'name', 'name'),
({'name': 'foo', 'Name': 'foo'}, 'Name', 'name'),
({'name': 'foo'}, 'Name', 'name'),
({'Name': 'foo'}, 'name', 'Name'),
({}, 'Name', 'name'),
({}, 'foobar', 'foobar')
])
def test_preferred_labels(labels, name, expected):
result = get_preferred_label_key(labels, name)
assert result == expected
@pytest.mark.parametrize('size_input,expected', [
(0, "0.00 B"),
(1, "1.00 B"),
(-1, "-1.00 B"),
(1536, "1.50 KiB"),
(-1024, "-1.00 KiB"),
(204800, "200.00 KiB"),
(6983516, "6.66 MiB"),
(14355928186, "13.37 GiB"),
(135734710448947, "123.45 TiB"),
(1180579814801204129310965, "999.99 ZiB"),
(1074589982539051580812825722, "888.88 YiB"),
(4223769947617154742438477168, "3493.82 YiB"),
(-4223769947617154742438477168, "-3493.82 YiB"),
])
def test_human_size(size_input, expected):
assert human_size(size_input) == expected
``` |
{
"source": "jpopelka/fabric8-analytics-common",
"score": 3
} |
#### File: dashboard/src/config.py
```python
import configparser
from urllib.parse import urljoin
class Config:
"""Class representing common configuration."""
CONFIG_FILE_NAME = 'config.ini'
def __init__(self):
"""Read and parse the configuration file."""
self.config = configparser.ConfigParser()
self.config.read(Config.CONFIG_FILE_NAME)
def get_sprint(self):
"""Return name of current sprint."""
return self.config.get('sprint', 'number')
def get_project_url(self):
"""Return URL to a project page on GitHub."""
try:
url_prefix = self.config.get('issue_tracker', 'url')
project_group = self.config.get('issue_tracker', 'group') + "/"
project_name = self.config.get('issue_tracker', 'project_name')
return urljoin(urljoin(url_prefix, project_group), project_name)
except (configparser.NoSectionError, configparser.NoOptionError):
return None
def get_list_of_issues_url(self, team):
"""Return URL to list of issues for selected team."""
try:
sprint = "Sprint+" + self.config.get('sprint', 'number')
team_label = self.config.get(team, 'label')
project_url = self.get_project_url()
url = '{project_url}/issues?q=is:open+is:issue+milestone:"{sprint}"+label:{label}'.\
format(project_url=project_url, sprint=sprint, label=team_label)
return url
except (configparser.NoSectionError, configparser.NoOptionError):
return None
if __name__ == "__main__":
# execute simple checks, but only if run this module as a script
config = Config()
print(config.get_project_url())
print(config.get_list_of_issues_url('core'))
print(config.get_list_of_issues_url('integration'))
```
#### File: features/src/attribute_checks.py
```python
import datetime
import re
def check_attribute_presence(node, attribute_name):
"""Check the attribute presence in the given dictionary or list.
To be used to check the deserialized JSON data etc.
"""
found_attributes = node if type(node) is list else node.keys()
assert attribute_name in node, \
"'%s' attribute is expected in the node, " \
"found: %s attributes " % (attribute_name, ", ".join(found_attributes))
def check_attributes_presence(node, attribute_names):
"""Check the presence of all attributes in the dictionary or in the list.
To be used to check the deserialized JSON data etc.
"""
for attribute_name in attribute_names:
found_attributes = node if type(node) is list else node.keys()
assert attribute_name in node, \
"'%s' attribute is expected in the node, " \
"found: %s attributes " % (attribute_name, ", ".join(found_attributes))
def check_and_get_attribute(node, attribute_name):
"""Check the attribute presence and if the attribute is found, return its value."""
check_attribute_presence(node, attribute_name)
return node[attribute_name]
def check_uuid(uuid):
"""Check if the string contains a proper UUID.
Supported format: 71769af6-0a39-4242-94be-1f84f04c8a56
"""
regex = re.compile('^[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}\Z',
re.I)
match = regex.match(uuid)
return bool(match)
def check_timestamp(timestamp):
"""Check if the string contains proper timestamp value.
The following four formats are supported:
2017-07-19 13:05:25.041688
2017-07-17T09:05:29.101780
2017-07-19 13:05:25
2017-07-17T09:05:29
"""
assert timestamp is not None
assert isinstance(timestamp, str)
# some attributes contains timestamp without the millisecond part
# so we need to take care of it
if len(timestamp) == len("YYYY-mm-dd HH:MM:SS") and '.' not in timestamp:
timestamp += '.0'
assert len(timestamp) >= len("YYYY-mm-dd HH:MM:SS.")
# we have to support the following formats:
# 2017-07-19 13:05:25.041688
# 2017-07-17T09:05:29.101780
# -> it is needed to distinguish the 'T' separator
#
# (please see https://www.tutorialspoint.com/python/time_strptime.htm for
# an explanation how timeformat should look like)
timeformat = "%Y-%m-%d %H:%M:%S.%f"
if timestamp[10] == "T":
timeformat = "%Y-%m-%dT%H:%M:%S.%f"
# just try to parse the string to check whether
# the ValueError exception is raised or not
datetime.datetime.strptime(timestamp, timeformat)
def check_job_token_attributes(token):
"""Check that the given JOB token contains all required attributes."""
attribs = ["limit", "remaining", "reset"]
for attr in attribs:
assert attr in token
assert int(token[attr]) >= 0
def check_status_attribute(data):
"""Check the value of the status attribute, that should contain just two allowed values."""
status = check_and_get_attribute(data, "status")
assert status in ["success", "error"]
def check_summary_attribute(data):
"""Check the summary attribute that can be found all generated metadata."""
summary = check_and_get_attribute(data, "summary")
assert type(summary) is list or type(summary) is dict
def release_string(ecosystem, package, version=None):
"""Construct a string with ecosystem:package or ecosystem:package:version tuple."""
return "{e}:{p}:{v}".format(e=ecosystem, p=package, v=version)
def check_release_attribute(data, ecosystem, package, version=None):
"""Check the content of _release attribute.
Check that the attribute _release contains proper release string for given ecosystem
and package.
"""
check_attribute_presence(data, "_release")
assert data["_release"] == release_string(ecosystem, package, version)
def check_schema_attribute(data, expected_schema_name, expected_schema_version):
"""Check the content of the schema attribute.
This attribute should contains dictionary with name and version that are checked as well.
"""
# read the toplevel attribute 'schema'
schema = check_and_get_attribute(data, "schema")
# read attributes from the 'schema' node
name = check_and_get_attribute(schema, "name")
version = check_and_get_attribute(schema, "version")
# check the schema name
assert name == expected_schema_name, "Schema name '{n1}' is different from " \
"expected name '{n2}'".format(n1=name, n2=expected_schema_name)
# check the schema version (ATM we are able to check just one fixed version)
assert version == expected_schema_version, "Schema version {v1} is different from expected " \
"version {v2}".format(v1=version, v2=expected_schema_version)
def check_audit_metadata(data):
"""Check the metadata stored in the _audit attribute.
Check if all common attributes can be found in the audit node
in the component or package metadata.
"""
check_attribute_presence(data, "_audit")
audit = data["_audit"]
check_attribute_presence(audit, "version")
assert audit["version"] == "v1"
check_attribute_presence(audit, "started_at")
check_timestamp(audit["started_at"])
check_attribute_presence(audit, "ended_at")
check_timestamp(audit["ended_at"])
def get_details_node(context):
"""Get content of details node, given it exists."""
data = context.s3_data
return check_and_get_attribute(data, 'details')
def check_cve_value(cve, with_score=False):
"""Check CVE values in CVE records."""
if with_score:
# please note that in graph DB, the CVE entries have the following format:
# CVE-2012-1150:5.0
# don't ask me why, but the score is stored in one field together with ID itself
# the : character is used as a separator
pattern = "CVE-(\d{4})-\d{4,}:(\d+\.\d+)"
else:
pattern = "CVE-(\d{4})-\d{4,}"
match = re.fullmatch(pattern, cve)
assert match is not None, "Improper CVE number %s" % cve
year = int(match.group(1))
current_year = datetime.datetime.now().year
# well the lower limit is a bit arbitrary
# (according to SRT guys it should be 1999)
assert year >= 1999 and year <= current_year
if with_score:
score = float(match.group(2))
assert score >= 0.0 and score <= 10.0
``` |
{
"source": "jpopelka/openhubinfo",
"score": 3
} |
#### File: jpopelka/openhubinfo/openhubinfo.py
```python
import argparse
import json
import os
import requests
import sys
import xmltodict
PROG = 'openhubinfo.py'
DESCRIPTION = 'Print info (json) about an OpenHub (Ohloh) project or account.'
API_KEY_WARNING = "Set OH_API_KEY environment variable to your Ohloh API key. If you don't have " \
"one, see https://www.openhub.net/accounts/<your_login>/api_keys/new"
class OpenHubInfo(object):
def __init__(self, api_key, indent=None):
"""
:param api_key: Your Ohloh API key.
See https://github.com/blackducksoftware/ohloh_api#api-key
:param indent: if not None then indent the output json
"""
self.api_key = api_key
self.indent = indent
@staticmethod
def _request(url):
"""
Connect to OpenHub website and retrieve the data.
:param url: Ohloh API url
:return: dict (json)
"""
r = requests.get(url)
if r.ok:
xml_string = r.text or r.content
# Ohloh API returns XML, convert it to dict (json)
d = xmltodict.parse(xml_string)
return d
def _info_url(self, info_type, info_id):
# see https://github.com/blackducksoftware/ohloh_api
if info_type == 'project':
return "https://www.openhub.net/p/{project_id}.xml?api_key={key}".\
format(project_id=info_id, key=self.api_key)
elif info_type == 'account':
return "https://www.openhub.net/accounts/{account_id}.xml?api_key={key}".\
format(account_id=info_id, key=self.api_key)
else:
raise NotImplementedError('Info type not implemented')
def _dump_info(self, info_type, info_id):
url = self._info_url(info_type, info_id)
info_json = self._request(url)
json.dump(info_json, sys.stdout, indent=self.indent)
def dump_project_info(self, project_id):
return self._dump_info('project', project_id)
def dump_account_info(self, account_id):
return self._dump_info('account', account_id)
class CLI(object):
def __init__(self):
self.api_key = os.getenv('OH_API_KEY')
if not self.api_key:
raise ValueError(API_KEY_WARNING)
self.parser = argparse.ArgumentParser(prog=PROG,
description=DESCRIPTION,
formatter_class=argparse.HelpFormatter)
self.parser.add_argument("-i", "--indent", action="store_true",
help='pretty-print output json')
self.parser.add_argument("-d", "--debug", action="store_true")
subparsers = self.parser.add_subparsers(help='commands ')
self.project_parser = subparsers.add_parser(
'project',
usage="%s [OPTIONS] project ..." % PROG,
description='get info about a project'
)
self.project_parser.add_argument('project_id',
help="unique id (name) of a project to get info about")
self.project_parser.set_defaults(func=self.project_info)
self.account_parser = subparsers.add_parser(
'account',
usage="%s [OPTIONS] account ..." % PROG,
description='get info about an account (user)'
)
self.account_parser.add_argument('account_id',
help="unique id (name) of a project to get info about")
self.account_parser.set_defaults(func=self.account_info)
def project_info(self, args):
oh_info = OpenHubInfo(self.api_key, args.indent)
oh_info.dump_project_info(args.project_id)
def account_info(self, args):
oh_info = OpenHubInfo(self.api_key, args.indent)
oh_info.dump_account_info(args.account_id)
def run(self):
args = self.parser.parse_args()
args.indent = 1 if args.indent else None
try:
args.func(args)
except AttributeError:
if hasattr(args, 'func'):
raise
else:
self.parser.print_help()
except KeyboardInterrupt:
pass
except Exception as ex:
if args.debug:
raise
else:
print("exception caught: %r", ex)
if __name__ == '__main__':
cli = CLI()
sys.exit(cli.run())
``` |
{
"source": "jpopelka/osbs-client",
"score": 2
} |
#### File: osbs-client/osbs/api.py
```python
from __future__ import print_function, unicode_literals, absolute_import
import json
import logging
import os
import sys
import time
import warnings
from functools import wraps
from .constants import SIMPLE_BUILD_TYPE, PROD_WITHOUT_KOJI_BUILD_TYPE, PROD_WITH_SECRET_BUILD_TYPE
from osbs.build.build_request import BuildManager
from osbs.build.build_response import BuildResponse
from osbs.build.pod_response import PodResponse
from osbs.constants import PROD_BUILD_TYPE
from osbs.core import Openshift
from osbs.exceptions import OsbsException, OsbsValidationException
# import utils in this way, so that we can mock standalone functions with flexmock
from osbs import utils
# Decorator for API methods.
def osbsapi(func):
@wraps(func)
def catch_exceptions(*args, **kwargs):
# XXX: remove this in the future
if kwargs.pop("namespace", None):
warnings.warn("OSBS.%s: the 'namespace' argument is no longer supported" % func.__name__)
try:
return func(*args, **kwargs)
except OsbsException:
# Re-raise OsbsExceptions
raise
except Exception as ex:
# Convert anything else to OsbsException
# Python 3 has implicit exception chaining and enhanced
# reporting, so you get the original traceback as well as
# the one originating here.
# For Python 2, let's do that explicitly.
raise OsbsException(cause=ex, traceback=sys.exc_info()[2])
return catch_exceptions
logger = logging.getLogger(__name__)
class OSBS(object):
"""
Note: all API methods return osbs.http.Response object. This is, due to historical
reasons, untrue for list_builds and get_user, which return list of BuildResponse objects
and dict respectively.
"""
@osbsapi
def __init__(self, openshift_configuration, build_configuration):
""" """
self.os_conf = openshift_configuration
self.build_conf = build_configuration
self.os = Openshift(openshift_api_url=self.os_conf.get_openshift_api_uri(),
openshift_api_version=self.os_conf.get_openshift_api_version(),
openshift_oauth_url=self.os_conf.get_openshift_oauth_api_uri(),
k8s_api_url=self.os_conf.get_k8s_api_uri(),
verbose=self.os_conf.get_verbosity(),
username=self.os_conf.get_username(),
password=self.os_conf.get_password(),
use_kerberos=self.os_conf.get_use_kerberos(),
client_cert=self.os_conf.get_client_cert(),
client_key=self.os_conf.get_client_key(),
kerberos_keytab=self.os_conf.get_kerberos_keytab(),
kerberos_principal=self.os_conf.get_kerberos_principal(),
kerberos_ccache=self.os_conf.get_kerberos_ccache(),
use_auth=self.os_conf.get_use_auth(),
verify_ssl=self.os_conf.get_verify_ssl(),
namespace=self.os_conf.get_namespace())
self._bm = None
# some calls might not need build manager so let's make it lazy
@property
def bm(self):
if self._bm is None:
self._bm = BuildManager(build_json_store=self.os_conf.get_build_json_store())
return self._bm
@osbsapi
def list_builds(self):
response = self.os.list_builds()
serialized_response = response.json()
build_list = []
for build in serialized_response["items"]:
build_list.append(BuildResponse(build))
return build_list
@osbsapi
def get_build(self, build_id):
response = self.os.get_build(build_id)
build_response = BuildResponse(response.json())
return build_response
@osbsapi
def cancel_build(self, build_id):
response = self.os.cancel_build(build_id)
build_response = BuildResponse(response.json())
return build_response
@osbsapi
def get_pod_for_build(self, build_id):
"""
:return: PodResponse object for pod relating to the build
"""
pods = self.os.list_pods(label='openshift.io/build.name=%s' % build_id)
serialized_response = pods.json()
pod_list = [PodResponse(pod) for pod in serialized_response["items"]]
if not pod_list:
raise OsbsException("No pod for build")
elif len(pod_list) != 1:
raise OsbsException("Only one pod expected but %d returned",
len(pod_list))
return pod_list[0]
@osbsapi
def get_build_request(self, build_type=None):
"""
return instance of BuildRequest according to specified build type
:param build_type: str, name of build type
:return: instance of BuildRequest
"""
build_type = build_type or self.build_conf.get_build_type()
build_request = self.bm.get_build_request_by_type(build_type=build_type)
# Apply configured resource limits.
cpu_limit = self.build_conf.get_cpu_limit()
memory_limit = self.build_conf.get_memory_limit()
storage_limit = self.build_conf.get_storage_limit()
if (cpu_limit is not None or
memory_limit is not None or
storage_limit is not None):
build_request.set_resource_limits(cpu=cpu_limit,
memory=memory_limit,
storage=storage_limit)
return build_request
@osbsapi
def create_build_from_buildrequest(self, build_request):
"""
render provided build_request and submit build from it
:param build_request: instance of build.build_request.BuildRequest
:return: instance of build.build_response.BuildResponse
"""
build_request.set_openshift_required_version(self.os_conf.get_openshift_required_version())
build = build_request.render()
response = self.os.create_build(json.dumps(build))
build_response = BuildResponse(response.json())
return build_response
def _get_running_builds_for_build_config(self, build_config_id):
all_builds_for_bc = self.os.list_builds(build_config_id=build_config_id).json()['items']
running = []
for b in all_builds_for_bc:
br = BuildResponse(b)
if br.is_pending() or br.is_running():
running.append(br)
return running
@staticmethod
def _panic_msg_for_more_running_builds(self, build_config_name, builds):
# this should never happen, but if it does, we want to know all the builds
# that were running at the time
builds = ', '.join(['%s: %s' % (b.get_build_name(), b.status) for b in builds])
msg = 'Multiple builds for %s running, can\'t proceed: %s' % \
(build_config_name, builds)
return msg
def _create_build_config_and_build(self, build_request):
# TODO: test this method more thoroughly
build_json = build_request.render()
api_version = build_json['apiVersion']
if api_version != self.os_conf.get_openshift_api_version():
raise OsbsValidationException("BuildConfig template has incorrect apiVersion (%s)" %
api_version)
build_config_name = build_json['metadata']['name']
# check if a build already exists for this config; if so then raise
running_builds = self._get_running_builds_for_build_config(build_config_name)
rb_len = len(running_builds)
if rb_len > 0:
if rb_len == 1:
rb = running_builds[0]
msg = 'Build %s for %s in state %s, can\'t proceed.' % \
(rb.get_build_name(), build_config_name, rb.status)
else:
msg = self._panic_msg_for_more_running_builds(build_config_name, running_builds)
raise OsbsException(msg)
try:
# see if there's already a build config
existing_bc = self.os.get_build_config(build_config_name)
except OsbsException:
# doesn't exist
existing_bc = None
build = None
if existing_bc is not None:
utils.buildconfig_update(existing_bc, build_json)
logger.debug('build config for %s already exists, updating...', build_config_name)
self.os.update_build_config(build_config_name, json.dumps(existing_bc))
else:
# if it doesn't exist, then create it
logger.debug('build config for %s doesn\'t exist, creating...', build_config_name)
bc = self.os.create_build_config(json.dumps(build_json)).json()
# if there's an "ImageChangeTrigger" on the BuildConfig and "From" is of type
# "ImageStreamTag", the build will be scheduled automatically
# see https://github.com/projectatomic/osbs-client/issues/205
if build_request.is_auto_instantiated():
prev_version = bc['status']['lastVersion']
build_id = self.os.wait_for_new_build_config_instance(build_config_name,
prev_version)
build = BuildResponse(self.os.get_build(build_id).json())
if build is None:
response = self.os.start_build(build_config_name)
build = BuildResponse(response.json())
return build
@osbsapi
def create_prod_build(self, git_uri, git_ref,
git_branch, # may be None
user, component,
target, # may be None
architecture=None, yum_repourls=None,
**kwargs):
"""
Create a production build
:param git_uri: str, URI of git repository
:param git_ref: str, reference to commit
:param git_branch: str, branch name (may be None)
:param user: str, user name
:param component: str, component name
:param target: str, koji target (may be None)
:param architecture: str, build architecture
:param yum_repourls: list, URLs for yum repos
:return: BuildResponse instance
"""
df_parser = utils.get_df_parser(git_uri, git_ref, git_branch=git_branch)
build_request = self.get_build_request(PROD_BUILD_TYPE)
build_request.set_params(
git_uri=git_uri,
git_ref=git_ref,
git_branch=git_branch,
user=user,
component=component,
base_image=df_parser.baseimage,
name_label=df_parser.labels['Name'],
registry_uris=self.build_conf.get_registry_uris(),
source_registry_uri=self.build_conf.get_source_registry_uri(),
registry_api_versions=self.build_conf.get_registry_api_versions(),
openshift_uri=self.os_conf.get_openshift_base_uri(),
builder_openshift_url=self.os_conf.get_builder_openshift_url(),
kojiroot=self.build_conf.get_kojiroot(),
kojihub=self.build_conf.get_kojihub(),
sources_command=self.build_conf.get_sources_command(),
koji_target=target,
architecture=architecture,
vendor=self.build_conf.get_vendor(),
build_host=self.build_conf.get_build_host(),
authoritative_registry=self.build_conf.get_authoritative_registry(),
distribution_scope=self.build_conf.get_distribution_scope(),
yum_repourls=yum_repourls,
pulp_secret=self.build_conf.get_pulp_secret(),
pdc_secret=self.build_conf.get_pdc_secret(),
pdc_url=self.build_conf.get_pdc_url(),
smtp_uri=self.build_conf.get_smtp_uri(),
use_auth=self.build_conf.get_builder_use_auth(),
pulp_registry=self.os_conf.get_pulp_registry(),
nfs_server_path=self.os_conf.get_nfs_server_path(),
nfs_dest_dir=self.build_conf.get_nfs_destination_dir(),
git_push_url=self.build_conf.get_git_push_url(),
git_push_username=self.build_conf.get_git_push_username(),
builder_build_json_dir=self.build_conf.get_builder_build_json_store()
)
build_request.set_openshift_required_version(self.os_conf.get_openshift_required_version())
response = self._create_build_config_and_build(build_request)
logger.debug(response.json)
return response
@osbsapi
def create_prod_with_secret_build(self, git_uri, git_ref, git_branch, user, component,
target, architecture=None, yum_repourls=None, **kwargs):
return self.create_prod_build(git_uri, git_ref, git_branch, user, component, target,
architecture, yum_repourls=yum_repourls, **kwargs)
@osbsapi
def create_prod_without_koji_build(self, git_uri, git_ref, git_branch, user, component,
architecture=None, yum_repourls=None, **kwargs):
return self.create_prod_build(git_uri, git_ref, git_branch, user, component, None,
architecture, yum_repourls=yum_repourls, **kwargs)
@osbsapi
def create_simple_build(self, git_uri, git_ref, user, component, tag,
yum_repourls=None, **kwargs):
build_request = self.get_build_request(SIMPLE_BUILD_TYPE)
build_request.set_params(
git_uri=git_uri,
git_ref=git_ref,
user=user,
component=component,
tag=tag,
registry_uris=self.build_conf.get_registry_uris(),
source_registry_uri=self.build_conf.get_source_registry_uri(),
openshift_uri=self.os_conf.get_openshift_base_uri(),
builder_openshift_url=self.os_conf.get_builder_openshift_url(),
yum_repourls=yum_repourls,
use_auth=self.build_conf.get_builder_use_auth(),
)
build_request.set_openshift_required_version(self.os_conf.get_openshift_required_version())
response = self._create_build_config_and_build(build_request)
logger.debug(response.json)
return response
@osbsapi
def create_build(self, **kwargs):
"""
take input args, create build request from provided build type and submit the build
:param kwargs: keyword args for build
:return: instance of BuildRequest
"""
build_type = self.build_conf.get_build_type()
if build_type in (PROD_BUILD_TYPE,
PROD_WITHOUT_KOJI_BUILD_TYPE,
PROD_WITH_SECRET_BUILD_TYPE):
kwargs.setdefault('git_branch', None)
kwargs.setdefault('target', None)
return self.create_prod_build(**kwargs)
elif build_type == SIMPLE_BUILD_TYPE:
return self.create_simple_build(**kwargs)
elif build_type == PROD_WITH_SECRET_BUILD_TYPE:
return self.create_prod_with_secret_build(**kwargs)
else:
raise OsbsException("Unknown build type: '%s'" % build_type)
@osbsapi
def get_build_logs(self, build_id, follow=False, build_json=None, wait_if_missing=False):
"""
provide logs from build
:param build_id: str
:param follow: bool, fetch logs as they come?
:param build_json: dict, to save one get-build query
:param wait_if_missing: bool, if build doesn't exist, wait
:return: None, str or iterator
"""
return self.os.logs(build_id, follow=follow, build_json=build_json,
wait_if_missing=wait_if_missing)
@osbsapi
def get_docker_build_logs(self, build_id, decode_logs=True, build_json=None):
"""
get logs provided by "docker build"
:param build_id: str
:param decode_logs: bool, docker by default output logs in simple json structure:
{ "stream": "line" }
if this arg is set to True, it decodes logs to human readable form
:param build_json: dict, to save one get-build query
:return: str
"""
if not build_json:
build = self.os.get_build(build_id)
build_response = BuildResponse(build.json())
else:
build_response = BuildResponse(build_json)
if build_response.is_finished():
logs = build_response.get_logs(decode_logs=decode_logs)
return logs
logger.warning("build haven't finished yet")
@osbsapi
def wait_for_build_to_finish(self, build_id):
response = self.os.wait_for_build_to_finish(build_id)
build_response = BuildResponse(response)
return build_response
@osbsapi
def wait_for_build_to_get_scheduled(self, build_id):
response = self.os.wait_for_build_to_get_scheduled(build_id)
build_response = BuildResponse(response)
return build_response
@osbsapi
def update_labels_on_build(self, build_id, labels):
response = self.os.update_labels_on_build(build_id, labels)
return response
@osbsapi
def set_labels_on_build(self, build_id, labels):
response = self.os.set_labels_on_build(build_id, labels)
return response
@osbsapi
def update_labels_on_build_config(self, build_config_id, labels):
response = self.os.update_labels_on_build_config(build_config_id, labels)
return response
@osbsapi
def set_labels_on_build_config(self, build_config_id, labels):
response = self.os.set_labels_on_build_config(build_config_id, labels)
return response
@osbsapi
def update_annotations_on_build(self, build_id, annotations):
return self.os.update_annotations_on_build(build_id, annotations)
@osbsapi
def set_annotations_on_build(self, build_id, annotations):
return self.os.set_annotations_on_build(build_id, annotations)
@osbsapi
def import_image(self, name):
"""
Import image tags from a Docker registry into an ImageStream
:return: bool, whether new tags were imported
"""
return self.os.import_image(name)
@osbsapi
def get_token(self):
return self.os.get_oauth_token()
@osbsapi
def get_user(self, username="~"):
return self.os.get_user(username).json()
@osbsapi
def get_image_stream(self, stream_id):
return self.os.get_image_stream(stream_id)
@osbsapi
def create_image_stream(self, name, docker_image_repository,
insecure_registry=False):
"""
Create an ImageStream object
Raises exception on error
:param name: str, name of ImageStream
:param docker_image_repository: str, pull spec for docker image
repository
:param insecure_registry: bool, whether plain HTTP should be used
:return: response
"""
img_stream_file = os.path.join(self.os_conf.get_build_json_store(), 'image_stream.json')
stream = json.load(open(img_stream_file))
stream['metadata']['name'] = name
stream['spec']['dockerImageRepository'] = docker_image_repository
if insecure_registry:
stream['metadata'].setdefault('annotations', {})
insecure_annotation = 'openshift.io/image.insecureRepository'
stream['metadata']['annotations'][insecure_annotation] = 'true'
return self.os.create_image_stream(json.dumps(stream))
@osbsapi
def pause_builds(self):
# First, set quota so 0 pods are allowed to be running
quota_file = os.path.join(self.os_conf.get_build_json_store(),
'pause_quota.json')
with open(quota_file) as fp:
quota_json = json.load(fp)
name = quota_json['metadata']['name']
self.os.create_resource_quota(name, quota_json)
# Now wait for running builds to finish
while True:
builds = self.list_builds()
running_builds = [build for build in builds if build.is_running()]
if not running_builds:
break
name = running_builds[0].get_build_name()
logger.info("waiting for build to finish: %s", name)
self.wait_for_build_to_finish(name)
@osbsapi
def resume_builds(self):
quota_file = os.path.join(self.os_conf.get_build_json_store(),
'pause_quota.json')
with open(quota_file) as fp:
quota_json = json.load(fp)
name = quota_json['metadata']['name']
self.os.delete_resource_quota(name)
# implements subset of OpenShift's export logic in pkg/cmd/cli/cmd/exporter.go
@staticmethod
def _prepare_resource(resource_type, resource):
utils.graceful_chain_del(resource, 'metadata', 'resourceVersion')
if resource_type == 'buildconfigs':
utils.graceful_chain_del(resource, 'status', 'lastVersion')
triggers = utils.graceful_chain_get(resource, 'spec', 'triggers') or ()
for t in triggers:
utils.graceful_chain_del(t, 'imageChange', 'lastTrigerredImageID')
@osbsapi
def dump_resource(self, resource_type):
return self.os.dump_resource(resource_type).json()
@osbsapi
def restore_resource(self, resource_type, resources, continue_on_error=False):
nfailed = 0
for r in resources["items"]:
name = utils.graceful_chain_get(r, 'metadata', 'name') or '(no name)'
logger.debug("restoring %s/%s", resource_type, name)
try:
self._prepare_resource(resource_type, r)
self.os.restore_resource(resource_type, r)
except Exception:
if continue_on_error:
logger.exception("failed to restore %s/%s", resource_type, name)
nfailed += 1
else:
raise
if continue_on_error:
ntotal = len(resources["items"])
logger.info("restored %s/%s %s", ntotal - nfailed, ntotal, resource_type)
```
#### File: osbs/build/pod_response.py
```python
from __future__ import print_function, absolute_import, unicode_literals
import logging
from osbs.utils import graceful_chain_get
logger = logging.getLogger(__name__)
class PodResponse(object):
"""
Wrapper for JSON describing build pod
"""
def __init__(self, pod):
"""
:param request: http.Request
"""
self._json = pod
@property
def json(self):
return self._json
def get_container_image_ids(self):
"""
Find the image IDs the containers use.
:return: dict, image tag to docker ID
"""
statuses = graceful_chain_get(self.json, "status", "containerStatuses")
if statuses is None:
return {}
def remove_prefix(image_id, prefix):
if image_id.startswith(prefix):
return image_id[len(prefix):]
return image_id
return dict([(status['image'], remove_prefix(status['imageID'],
'docker://'))
for status in statuses])
```
#### File: tests/build/test_build_request.py
```python
import copy
import json
import os
from pkg_resources import parse_version
import shutil
from osbs.build.build_request import BuildManager, BuildRequest, ProductionBuild
from osbs.constants import (PROD_BUILD_TYPE, PROD_WITHOUT_KOJI_BUILD_TYPE,
PROD_WITH_SECRET_BUILD_TYPE)
from osbs.exceptions import OsbsValidationException
from flexmock import flexmock
import pytest
from tests.constants import (INPUTS_PATH, TEST_BUILD_CONFIG, TEST_BUILD_JSON, TEST_COMPONENT,
TEST_GIT_BRANCH, TEST_GIT_REF, TEST_GIT_URI)
class NoSuchPluginException(Exception):
pass
def get_plugin(plugins, plugin_type, plugin_name):
plugins = plugins[plugin_type]
for plugin in plugins:
if plugin["name"] == plugin_name:
return plugin
else:
raise NoSuchPluginException()
def plugin_value_get(plugins, plugin_type, plugin_name, *args):
result = get_plugin(plugins, plugin_type, plugin_name)
for arg in args:
result = result[arg]
return result
class TestBuildRequest(object):
def test_build_request_is_auto_instantiated(self):
build_json = copy.deepcopy(TEST_BUILD_JSON)
br = BuildRequest('something')
flexmock(br).should_receive('template').and_return(build_json)
assert br.is_auto_instantiated() is True
def test_build_request_isnt_auto_instantiated(self):
build_json = copy.deepcopy(TEST_BUILD_JSON)
build_json['spec']['triggers'] = []
br = BuildRequest('something')
flexmock(br).should_receive('template').and_return(build_json)
assert br.is_auto_instantiated() is False
def test_render_simple_request_incorrect_postbuild(self, tmpdir):
# Make temporary copies of the JSON files
for basename in ['simple.json', 'simple_inner.json']:
shutil.copy(os.path.join(INPUTS_PATH, basename),
os.path.join(str(tmpdir), basename))
# Create an inner JSON description which incorrectly runs the exit
# plugins as postbuild plugins.
with open(os.path.join(str(tmpdir), 'simple_inner.json'), 'r+') as inner:
inner_json = json.load(inner)
# Re-write all the exit plugins as postbuild plugins
exit_plugins = inner_json['exit_plugins']
inner_json['postbuild_plugins'].extend(exit_plugins)
del inner_json['exit_plugins']
inner.seek(0)
json.dump(inner_json, inner)
inner.truncate()
bm = BuildManager(str(tmpdir))
build_request = bm.get_build_request_by_type("simple")
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'user': "john-foo",
'component': "component",
'registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
}
build_request.set_params(**kwargs)
build_json = build_request.render()
env_vars = build_json['spec']['strategy']['customStrategy']['env']
plugins_json = None
for d in env_vars:
if d['name'] == 'DOCK_PLUGINS':
plugins_json = d['value']
break
assert plugins_json is not None
plugins = json.loads(plugins_json)
# Check the store_metadata_in_osv3's uri parameter was set
# correctly, even though it was listed as a postbuild plugin.
assert plugin_value_get(plugins, "postbuild_plugins", "store_metadata_in_osv3", "args", "url") == \
"http://openshift/"
@pytest.mark.parametrize('tag', [
None,
"some_tag",
])
@pytest.mark.parametrize('registry_uris', [
[],
["registry.example.com:5000"],
["registry.example.com:5000", "localhost:6000"],
])
def test_render_simple_request(self, tag, registry_uris):
bm = BuildManager(INPUTS_PATH)
build_request = bm.get_build_request_by_type("simple")
name_label = "fedora/resultingimage"
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'user': "john-foo",
'component': TEST_COMPONENT,
'registry_uris': registry_uris,
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'tag': tag,
}
build_request.set_params(**kwargs)
build_json = build_request.render()
assert build_json["metadata"]["name"] is not None
assert "triggers" not in build_json["spec"]
assert build_json["spec"]["source"]["git"]["uri"] == TEST_GIT_URI
assert build_json["spec"]["source"]["git"]["ref"] == TEST_GIT_REF
expected_output = "john-foo/component:%s" % (tag if tag else "20")
if registry_uris:
expected_output = registry_uris[0] + "/" + expected_output
assert build_json["spec"]["output"]["to"]["name"].startswith(expected_output)
env_vars = build_json['spec']['strategy']['customStrategy']['env']
plugins_json = None
for d in env_vars:
if d['name'] == 'DOCK_PLUGINS':
plugins_json = d['value']
break
assert plugins_json is not None
plugins = json.loads(plugins_json)
pull_base_image = get_plugin(plugins, "prebuild_plugins",
"pull_base_image")
assert pull_base_image is not None
assert ('args' not in pull_base_image or
'parent_registry' not in pull_base_image['args'])
assert plugin_value_get(plugins, "exit_plugins", "store_metadata_in_osv3", "args", "url") == \
"http://openshift/"
for r in registry_uris:
assert plugin_value_get(plugins, "postbuild_plugins", "tag_and_push", "args",
"registries", r) == {"insecure": True}
@pytest.mark.parametrize('architecture', [
None,
'x86_64',
])
def test_render_prod_request_with_repo(self, architecture):
bm = BuildManager(INPUTS_PATH)
build_request = bm.get_build_request_by_type(PROD_BUILD_TYPE)
name_label = "fedora/resultingimage"
assert isinstance(build_request, ProductionBuild)
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'git_branch': TEST_GIT_BRANCH,
'user': "john-foo",
'component': TEST_COMPONENT,
'base_image': 'fedora:latest',
'name_label': name_label,
'registry_uri': "registry.example.com",
'source_registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'koji_target': "koji-target",
'kojiroot': "http://root/",
'kojihub': "http://hub/",
'sources_command': "make",
'architecture': architecture,
'vendor': "Foo Vendor",
'build_host': "our.build.host.example.com",
'authoritative_registry': "registry.example.com",
'distribution_scope': "authoritative-source-only",
'yum_repourls': ["http://example.com/my.repo"],
'registry_api_versions': ['v1'],
}
build_request.set_params(**kwargs)
build_json = build_request.render()
assert build_json["metadata"]["name"] == TEST_BUILD_CONFIG
assert "triggers" not in build_json["spec"]
assert build_json["spec"]["source"]["git"]["uri"] == TEST_GIT_URI
assert build_json["spec"]["source"]["git"]["ref"] == TEST_GIT_REF
assert build_json["spec"]["output"]["to"]["name"].startswith(
"registry.example.com/john-foo/component:"
)
env_vars = build_json['spec']['strategy']['customStrategy']['env']
plugins_json = None
for d in env_vars:
if d['name'] == 'DOCK_PLUGINS':
plugins_json = d['value']
break
assert plugins_json is not None
plugins = json.loads(plugins_json)
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "check_and_set_rebuild")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins",
"stop_autorebuild_if_disabled")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "bump_release")
assert plugin_value_get(plugins, "prebuild_plugins", "distgit_fetch_artefacts",
"args", "command") == "make"
assert plugin_value_get(plugins, "prebuild_plugins", "pull_base_image",
"args", "parent_registry") == "registry.example.com"
assert plugin_value_get(plugins, "exit_plugins", "store_metadata_in_osv3",
"args", "url") == "http://openshift/"
assert plugin_value_get(plugins, "postbuild_plugins", "tag_and_push", "args",
"registries", "registry.example.com") == {"insecure": True}
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "koji")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "cp_built_image_to_nfs")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "pulp_push")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "pulp_sync")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "import_image")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "exit_plugins", "koji_promote")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "exit_plugins", "sendmail")
assert 'sourceSecret' not in build_json["spec"]["source"]
assert plugin_value_get(plugins, "prebuild_plugins", "add_yum_repo_by_url",
"args", "repourls") == ["http://example.com/my.repo"]
labels = plugin_value_get(plugins, "prebuild_plugins", "add_labels_in_dockerfile",
"args", "labels")
assert labels is not None
assert labels['Authoritative_Registry'] is not None
assert labels['Build_Host'] is not None
assert labels['Vendor'] is not None
assert labels['distribution-scope'] is not None
if architecture:
assert labels['Architecture'] is not None
else:
assert 'Architecture' not in labels
def test_render_prod_request(self):
bm = BuildManager(INPUTS_PATH)
build_request = bm.get_build_request_by_type(PROD_BUILD_TYPE)
name_label = "fedora/resultingimage"
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'git_branch': TEST_GIT_BRANCH,
'user': "john-foo",
'component': TEST_COMPONENT,
'base_image': 'fedora:latest',
'name_label': name_label,
'registry_uri': "registry.example.com",
'source_registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'koji_target': "koji-target",
'kojiroot': "http://root/",
'kojihub': "http://hub/",
'sources_command': "make",
'architecture': "x86_64",
'vendor': "Foo Vendor",
'build_host': "our.build.host.example.com",
'authoritative_registry': "registry.example.com",
'distribution_scope': "authoritative-source-only",
'registry_api_versions': ['v1'],
'pdc_url': 'https://pdc.example.com',
'smtp_uri': 'smtp.example.com',
}
build_request.set_params(**kwargs)
build_json = build_request.render()
assert build_json["metadata"]["name"] == TEST_BUILD_CONFIG
assert "triggers" not in build_json["spec"]
assert build_json["spec"]["source"]["git"]["uri"] == TEST_GIT_URI
assert build_json["spec"]["source"]["git"]["ref"] == TEST_GIT_REF
assert build_json["spec"]["output"]["to"]["name"].startswith(
"registry.example.com/john-foo/component:"
)
env_vars = build_json['spec']['strategy']['customStrategy']['env']
plugins_json = None
for d in env_vars:
if d['name'] == 'DOCK_PLUGINS':
plugins_json = d['value']
break
assert plugins_json is not None
plugins = json.loads(plugins_json)
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "check_and_set_rebuild")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins",
"stop_autorebuild_if_disabled")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "bump_release")
assert plugin_value_get(plugins, "prebuild_plugins", "distgit_fetch_artefacts",
"args", "command") == "make"
assert plugin_value_get(plugins, "prebuild_plugins", "pull_base_image", "args",
"parent_registry") == "registry.example.com"
assert plugin_value_get(plugins, "exit_plugins", "store_metadata_in_osv3",
"args", "url") == "http://openshift/"
assert plugin_value_get(plugins, "prebuild_plugins", "koji",
"args", "root") == "http://root/"
assert plugin_value_get(plugins, "prebuild_plugins", "koji",
"args", "target") == "koji-target"
assert plugin_value_get(plugins, "prebuild_plugins", "koji",
"args", "hub") == "http://hub/"
assert plugin_value_get(plugins, "postbuild_plugins", "tag_and_push", "args",
"registries", "registry.example.com") == {"insecure": True}
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "cp_built_image_to_nfs")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "pulp_push")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "pulp_sync")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "import_image")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "exit_plugins", "koji_promote")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "exit_plugins", "sendmail")
assert 'sourceSecret' not in build_json["spec"]["source"]
labels = plugin_value_get(plugins, "prebuild_plugins", "add_labels_in_dockerfile",
"args", "labels")
assert labels is not None
assert labels['Architecture'] is not None
assert labels['Authoritative_Registry'] is not None
assert labels['Build_Host'] is not None
assert labels['Vendor'] is not None
assert labels['distribution-scope'] is not None
def test_render_prod_without_koji_request(self):
bm = BuildManager(INPUTS_PATH)
build_request = bm.get_build_request_by_type(PROD_WITHOUT_KOJI_BUILD_TYPE)
name_label = "fedora/resultingimage"
assert isinstance(build_request, ProductionBuild)
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'git_branch': TEST_GIT_BRANCH,
'user': "john-foo",
'component': TEST_COMPONENT,
'base_image': 'fedora:latest',
'name_label': name_label,
'registry_uri': "registry.example.com",
'source_registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'sources_command': "make",
'architecture': "x86_64",
'vendor': "Foo Vendor",
'build_host': "our.build.host.example.com",
'authoritative_registry': "registry.example.com",
'distribution_scope': "authoritative-source-only",
'registry_api_versions': ['v1'],
}
build_request.set_params(**kwargs)
build_json = build_request.render()
assert build_json["metadata"]["name"] == TEST_BUILD_CONFIG
assert "triggers" not in build_json["spec"]
assert build_json["spec"]["source"]["git"]["uri"] == TEST_GIT_URI
assert build_json["spec"]["source"]["git"]["ref"] == TEST_GIT_REF
assert build_json["spec"]["output"]["to"]["name"].startswith(
"registry.example.com/john-foo/component:none-"
)
env_vars = build_json['spec']['strategy']['customStrategy']['env']
plugins_json = None
for d in env_vars:
if d['name'] == 'DOCK_PLUGINS':
plugins_json = d['value']
break
assert plugins_json is not None
plugins = json.loads(plugins_json)
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "check_and_set_rebuild")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins",
"stop_autorebuild_if_disabled")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "bump_release")
assert plugin_value_get(plugins, "prebuild_plugins", "distgit_fetch_artefacts",
"args", "command") == "make"
assert plugin_value_get(plugins, "prebuild_plugins", "pull_base_image", "args",
"parent_registry") == "registry.example.com"
assert plugin_value_get(plugins, "exit_plugins", "store_metadata_in_osv3",
"args", "url") == "http://openshift/"
assert plugin_value_get(plugins, "postbuild_plugins", "tag_and_push", "args",
"registries", "registry.example.com") == {"insecure": True}
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "koji")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "cp_built_image_to_nfs")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "pulp_push")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "pulp_sync")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "import_image")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "exit_plugins", "koji_promote")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "exit_plugins", "sendmail")
assert 'sourceSecret' not in build_json["spec"]["source"]
labels = plugin_value_get(plugins, "prebuild_plugins", "add_labels_in_dockerfile",
"args", "labels")
assert labels is not None
assert labels['Architecture'] is not None
assert labels['Authoritative_Registry'] is not None
assert labels['Build_Host'] is not None
assert labels['Vendor'] is not None
assert labels['distribution-scope'] is not None
def test_render_prod_with_secret_request(self):
bm = BuildManager(INPUTS_PATH)
build_request = bm.get_build_request_by_type(PROD_WITH_SECRET_BUILD_TYPE)
assert isinstance(build_request, ProductionBuild)
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'git_branch': TEST_GIT_BRANCH,
'user': "john-foo",
'component': TEST_COMPONENT,
'base_image': 'fedora:latest',
'name_label': 'fedora/resultingimage',
'registry_uri': "",
'pulp_registry': "registry.example.com",
'nfs_server_path': "server:path",
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'koji_target': "koji-target",
'kojiroot': "http://root/",
'kojihub': "http://hub/",
'sources_command': "make",
'architecture': "x86_64",
'vendor': "Foo Vendor",
'build_host': "our.build.host.example.com",
'authoritative_registry': "registry.example.com",
'distribution_scope': "authoritative-source-only",
'registry_api_versions': ['v1'],
'source_secret': 'mysecret',
}
build_request.set_params(**kwargs)
build_json = build_request.render()
assert build_json["spec"]["source"]["sourceSecret"]["name"] == "mysecret"
strategy = build_json['spec']['strategy']['customStrategy']['env']
plugins_json = None
for d in strategy:
if d['name'] == 'DOCK_PLUGINS':
plugins_json = d['value']
break
assert plugins_json is not None
plugins = json.loads(plugins_json)
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "check_and_set_rebuild")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins",
"stop_autorebuild_if_disabled")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "bump_release")
assert get_plugin(plugins, "prebuild_plugins", "koji")
assert get_plugin(plugins, "postbuild_plugins", "pulp_push")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "pulp_sync")
assert get_plugin(plugins, "postbuild_plugins", "cp_built_image_to_nfs")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "import_image")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "exit_plugins", "koji_promote")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "exit_plugins", "sendmail")
assert plugin_value_get(plugins, "postbuild_plugins", "tag_and_push", "args",
"registries") == {}
def test_render_prod_request_requires_newer(self):
"""
We should get an OsbsValidationException when trying to use the
sendmail plugin without requiring OpenShift 1.0.6, as
configuring the plugin requires the new-style secrets.
"""
bm = BuildManager(INPUTS_PATH)
build_request = bm.get_build_request_by_type(PROD_WITH_SECRET_BUILD_TYPE)
name_label = "fedora/resultingimage"
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'git_branch': TEST_GIT_BRANCH,
'user': "john-foo",
'component': TEST_COMPONENT,
'base_image': 'fedora:latest',
'name_label': name_label,
'registry_uris': ["registry1.example.com/v1", # first is primary
"registry2.example.com/v2"],
'nfs_server_path': "server:path",
'source_registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'koji_target': "koji-target",
'kojiroot': "http://root/",
'kojihub': "http://hub/",
'sources_command': "make",
'architecture': "x86_64",
'vendor': "Foo Vendor",
'build_host': "our.build.host.example.com",
'authoritative_registry': "registry.example.com",
'distribution_scope': "authoritative-source-only",
'pdc_secret': 'foo',
'pdc_url': 'https://pdc.example.com',
'smtp_uri': 'smtp.example.com',
}
build_request.set_params(**kwargs)
with pytest.raises(OsbsValidationException):
build_request.render()
@pytest.mark.parametrize('registry_api_versions', [
['v1', 'v2'],
['v2'],
])
@pytest.mark.parametrize('openshift_version', ['1.0.0', '1.0.6'])
def test_render_prod_request_v1_v2(self, registry_api_versions, openshift_version):
bm = BuildManager(INPUTS_PATH)
build_request = bm.get_build_request_by_type(PROD_WITH_SECRET_BUILD_TYPE)
build_request.set_openshift_required_version(parse_version(openshift_version))
name_label = "fedora/resultingimage"
pulp_env = 'v1pulp'
pulp_secret = pulp_env + 'secret'
kwargs = {
'pulp_registry': pulp_env,
'pulp_secret': pulp_secret,
}
kwargs.update({
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'git_branch': TEST_GIT_BRANCH,
'user': "john-foo",
'component': TEST_COMPONENT,
'base_image': 'fedora:latest',
'name_label': name_label,
'registry_uris': [
# first is primary
"http://registry1.example.com:5000/v1",
"http://registry2.example.com:5000/v2"
],
'nfs_server_path': "server:path",
'source_registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'koji_target': "koji-target",
'kojiroot': "http://root/",
'kojihub': "http://hub/",
'sources_command': "make",
'architecture': "x86_64",
'vendor': "Foo Vendor",
'build_host': "our.build.host.example.com",
'authoritative_registry': "registry.example.com",
'distribution_scope': "authoritative-source-only",
'registry_api_versions': registry_api_versions,
})
build_request.set_params(**kwargs)
build_json = build_request.render()
assert build_json["metadata"]["name"] == TEST_BUILD_CONFIG
assert "triggers" not in build_json["spec"]
assert build_json["spec"]["source"]["git"]["uri"] == TEST_GIT_URI
assert build_json["spec"]["source"]["git"]["ref"] == TEST_GIT_REF
# Pulp used, so no direct registry output
assert build_json["spec"]["output"]["to"]["name"].startswith(
"john-foo/component:"
)
env_vars = build_json['spec']['strategy']['customStrategy']['env']
plugins_json = None
for d in env_vars:
if d['name'] == 'DOCK_PLUGINS':
plugins_json = d['value']
break
assert plugins_json is not None
plugins = json.loads(plugins_json)
# tag_and_push configuration. Must not have the scheme part.
expected_registries = {
'registry2.example.com:5000': {'insecure': True},
}
if 'v1' in registry_api_versions:
expected_registries['registry1.example.com:5000'] = {
'insecure': True,
}
assert plugin_value_get(plugins, "postbuild_plugins", "tag_and_push",
"args", "registries") == expected_registries
if openshift_version == '1.0.0':
assert 'secrets' not in build_json['spec']['strategy']['customStrategy']
assert build_json['spec']['source']['sourceSecret']['name'] == pulp_secret
else:
assert 'sourceSecret' not in build_json['spec']['source']
secrets = build_json['spec']['strategy']['customStrategy']['secrets']
for version, plugin in [('v1', 'pulp_push'), ('v2', 'pulp_sync')]:
if version not in registry_api_versions:
continue
path = plugin_value_get(plugins, "postbuild_plugins", plugin,
"args", "pulp_secret_path")
pulp_secrets = [secret for secret in secrets if secret['mountPath'] == path]
assert len(pulp_secrets) == 1
assert pulp_secrets[0]['secretSource']['name'] == pulp_secret
if 'v1' in registry_api_versions:
assert get_plugin(plugins, "postbuild_plugins",
"compress")
assert get_plugin(plugins, "postbuild_plugins",
"cp_built_image_to_nfs")
assert get_plugin(plugins, "postbuild_plugins",
"pulp_push")
assert plugin_value_get(plugins, "postbuild_plugins", "pulp_push",
"args", "pulp_registry_name") == pulp_env
else:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins",
"compress")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins",
"cp_built_image_to_nfs")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins",
"pulp_push")
if 'v2' in registry_api_versions:
assert get_plugin(plugins, "postbuild_plugins", "pulp_sync")
env = plugin_value_get(plugins, "postbuild_plugins", "pulp_sync",
"args", "pulp_registry_name")
assert env == pulp_env
docker_registry = plugin_value_get(plugins, "postbuild_plugins",
"pulp_sync", "args",
"docker_registry")
# pulp_sync config must have the scheme part to satisfy pulp.
assert docker_registry == 'http://registry2.example.com:5000'
else:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "pulp_sync")
def test_render_with_yum_repourls(self):
bm = BuildManager(INPUTS_PATH)
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'git_branch': TEST_GIT_BRANCH,
'user': "john-foo",
'component': TEST_COMPONENT,
'base_image': 'fedora:latest',
'name_label': 'fedora/resultingimage',
'registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'koji_target': "koji-target",
'kojiroot': "http://root/",
'kojihub': "http://hub/",
'sources_command': "make",
'architecture': "x86_64",
'vendor': "Foo Vendor",
'build_host': "our.build.host.example.com",
'authoritative_registry': "registry.example.com",
'distribution_scope': "authoritative-source-only",
'registry_api_versions': ['v1'],
}
build_request = bm.get_build_request_by_type("prod")
# Test validation for yum_repourls parameter
kwargs['yum_repourls'] = 'should be a list'
with pytest.raises(OsbsValidationException):
build_request.set_params(**kwargs)
# Use a valid yum_repourls parameter and check the result
kwargs['yum_repourls'] = ['http://example.com/repo1.repo', 'http://example.com/repo2.repo']
build_request.set_params(**kwargs)
build_json = build_request.render()
strategy = build_json['spec']['strategy']['customStrategy']['env']
plugins_json = None
for d in strategy:
if d['name'] == 'DOCK_PLUGINS':
plugins_json = d['value']
break
assert plugins_json is not None
plugins = json.loads(plugins_json)
repourls = None
for d in plugins['prebuild_plugins']:
if d['name'] == 'add_yum_repo_by_url':
repourls = d['args']['repourls']
assert repourls is not None
assert len(repourls) == 2
assert 'http://example.com/repo1.repo' in repourls
assert 'http://example.com/repo2.repo' in repourls
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "check_and_set_rebuild")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins",
"stop_autorebuild_if_disabled")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "bump_release")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "koji")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "cp_built_image_to_nfs")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "pulp_push")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "pulp_sync")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "import_image")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "exit_plugins", "koji_promote")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "exit_plugins", "sendmail")
def test_render_prod_with_pulp_no_auth(self):
"""
Rendering should fail if pulp is specified but auth config isn't
"""
bm = BuildManager(INPUTS_PATH)
build_request = bm.get_build_request_by_type(PROD_BUILD_TYPE)
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'git_branch': TEST_GIT_BRANCH,
'user': "john-foo",
'component': TEST_COMPONENT,
'base_image': 'fedora:latest',
'name_label': 'fedora/resultingimage',
'registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'sources_command': "make",
'architecture': "x86_64",
'vendor': "Foo Vendor",
'build_host': "our.build.host.example.com",
'authoritative_registry': "registry.example.com",
'distribution_scope': "authoritative-source-only",
'pulp_registry': "foo",
}
build_request.set_params(**kwargs)
with pytest.raises(OsbsValidationException):
build_request.render()
@staticmethod
def create_image_change_trigger_json(outdir):
"""
Create JSON templates with an image change trigger added.
:param outdir: str, path to store modified templates
"""
# Make temporary copies of the JSON files
for basename in ['prod.json', 'prod_inner.json']:
shutil.copy(os.path.join(INPUTS_PATH, basename),
os.path.join(outdir, basename))
# Create a build JSON description with an image change trigger
with open(os.path.join(outdir, 'prod.json'), 'r+') as prod_json:
build_json = json.load(prod_json)
# Add the image change trigger
build_json['spec']['triggers'] = [
{
"type": "ImageChange",
"imageChange": {
"from": {
"kind": "ImageStreamTag",
"name": "{{BASE_IMAGE_STREAM}}"
}
}
}
]
prod_json.seek(0)
json.dump(build_json, prod_json)
prod_json.truncate()
@pytest.mark.parametrize(('registry_uri', 'insecure_registry'), [
("https://registry.example.com", False),
("http://registry.example.com", True),
])
@pytest.mark.parametrize('branchref', [
# Wrong way round
{
'git_ref': TEST_GIT_BRANCH,
'git_branch': TEST_GIT_REF,
'should_raise': True,
},
# Right way round
{
'git_ref': TEST_GIT_REF,
'git_branch': TEST_GIT_BRANCH,
'should_raise': False,
},
])
def test_render_prod_request_with_trigger(self, tmpdir, branchref,
registry_uri, insecure_registry):
self.create_image_change_trigger_json(str(tmpdir))
bm = BuildManager(str(tmpdir))
build_request = bm.get_build_request_by_type(PROD_BUILD_TYPE)
# We're using both pulp and sendmail, both of which require a
# Kubernetes secret. This isn't supported until OpenShift
# Origin 1.0.6.
build_request.set_openshift_required_version(parse_version('1.0.6'))
name_label = "fedora/resultingimage"
push_url = "ssh://{username}git.example.com/git/{component}.git"
pdc_secret_name = 'foo'
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': branchref['git_ref'],
'git_branch': branchref['git_branch'],
'user': "john-foo",
'component': TEST_COMPONENT,
'base_image': 'fedora:latest',
'name_label': name_label,
'registry_uri': registry_uri,
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'koji_target': "koji-target",
'kojiroot': "http://root/",
'kojihub': "http://hub/",
'sources_command': "make",
'architecture': "x86_64",
'vendor': "Foo Vendor",
'build_host': "our.build.host.example.com",
'authoritative_registry': "registry.example.com",
'distribution_scope': "authoritative-source-only",
'registry_api_versions': ['v1'],
'git_push_url': push_url.format(username='', component=TEST_COMPONENT),
'git_push_username': 'example',
'pdc_secret': pdc_secret_name,
'pdc_url': 'https://pdc.example.com',
'smtp_uri': 'smtp.example.com',
}
build_request.set_params(**kwargs)
if branchref['should_raise']:
with pytest.raises(OsbsValidationException):
build_request.render()
return
else:
build_json = build_request.render()
assert "triggers" in build_json["spec"]
assert build_json["spec"]["triggers"][0]["imageChange"]["from"]["name"] == 'fedora:latest'
strategy = build_json['spec']['strategy']['customStrategy']['env']
plugins_json = None
for d in strategy:
if d['name'] == 'DOCK_PLUGINS':
plugins_json = d['value']
break
plugins = json.loads(plugins_json)
assert get_plugin(plugins, "prebuild_plugins", "check_and_set_rebuild")
assert get_plugin(plugins, "prebuild_plugins",
"stop_autorebuild_if_disabled")
assert plugin_value_get(plugins, "prebuild_plugins",
"check_and_set_rebuild", "args",
"url") == kwargs["openshift_uri"]
assert get_plugin(plugins, "prebuild_plugins", "bump_release")
assert plugin_value_get(plugins, "prebuild_plugins", "bump_release", "args",
"git_ref") == TEST_GIT_REF
assert plugin_value_get(plugins, "prebuild_plugins", "bump_release", "args",
"push_url") == push_url.format(username='example@',
component=TEST_COMPONENT)
assert get_plugin(plugins, "postbuild_plugins", "import_image")
assert plugin_value_get(plugins,
"postbuild_plugins", "import_image", "args",
"imagestream") == name_label.replace('/', '-')
expected_repo = os.path.join(kwargs["registry_uri"], name_label)
expected_repo = expected_repo.replace('https://', '')
expected_repo = expected_repo.replace('http://', '')
assert plugin_value_get(plugins,
"postbuild_plugins", "import_image", "args",
"docker_image_repo") == expected_repo
assert plugin_value_get(plugins,
"postbuild_plugins", "import_image", "args",
"url") == kwargs["openshift_uri"]
if insecure_registry:
assert plugin_value_get(plugins,
"postbuild_plugins", "import_image", "args",
"insecure_registry")
else:
with pytest.raises(KeyError):
plugin_value_get(plugins,
"postbuild_plugins", "import_image", "args",
"insecure_registry")
assert plugin_value_get(plugins, "postbuild_plugins", "tag_and_push", "args",
"registries", "registry.example.com") == {"insecure": True}
assert get_plugin(plugins, "exit_plugins", "koji_promote")
assert plugin_value_get(plugins, "exit_plugins", "koji_promote",
"args", "kojihub") == kwargs["kojihub"]
assert plugin_value_get(plugins, "exit_plugins", "koji_promote",
"args", "url") == kwargs["openshift_uri"]
with pytest.raises(KeyError):
plugin_value_get(plugins, 'exit_plugins', 'koji_promote',
'args', 'metadata_only') # v1 enabled by default
pdc_secret = [secret for secret in
build_json['spec']['strategy']['customStrategy']['secrets']
if secret['secretSource']['name'] == pdc_secret_name]
mount_path = pdc_secret[0]['mountPath']
expected = {'args': {'from_address': '<EMAIL>',
'url': 'http://openshift/',
'pdc_url': 'https://pdc.example.com',
'pdc_secret_path': mount_path,
'send_on': ['auto_fail', 'auto_success'],
'error_addresses': ['<EMAIL>'],
'smtp_uri': 'smtp.example.com',
'submitter': 'john-foo'},
'name': 'sendmail'}
assert get_plugin(plugins, 'exit_plugins', 'sendmail') == expected
@pytest.mark.parametrize('missing', [
'git_branch',
'git_push_url',
])
def test_render_prod_request_trigger_missing_param(self, tmpdir, missing):
self.create_image_change_trigger_json(str(tmpdir))
bm = BuildManager(str(tmpdir))
build_request = bm.get_build_request_by_type(PROD_BUILD_TYPE)
push_url = "ssh://{username}git.example.com/git/{component}.git"
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'git_branch': TEST_GIT_BRANCH,
'user': "john-foo",
'component': TEST_COMPONENT,
'base_image': 'fedora:latest',
'name_label': 'fedora/resultingimage',
'registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'koji_target': "koji-target",
'kojiroot': "http://root/",
'kojihub': "http://hub/",
'sources_command': "make",
'architecture': "x86_64",
'vendor': "Foo Vendor",
'build_host': "our.build.host.example.com",
'authoritative_registry': "registry.example.com",
'distribution_scope': "authoritative-source-only",
'registry_api_versions': ['v1'],
'git_push_url': push_url.format(username='', component=TEST_COMPONENT),
'git_push_username': 'example',
}
# Remove one of the parameters required for rebuild triggers
del kwargs[missing]
build_request.set_params(**kwargs)
build_json = build_request.render()
# Verify the triggers are now disabled
assert "triggers" not in build_json["spec"]
strategy = build_json['spec']['strategy']['customStrategy']['env']
plugins_json = None
for d in strategy:
if d['name'] == 'DOCK_PLUGINS':
plugins_json = d['value']
break
# Verify the rebuild plugins are all disabled
plugins = json.loads(plugins_json)
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "check_and_set_rebuild")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins",
"stop_autorebuild_if_disabled")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "prebuild_plugins", "bump_release")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "postbuild_plugins", "import_image")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "exit_plugins", "koji_promote")
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, "exit_plugins", "sendmail")
def test_render_prod_request_new_secrets(self, tmpdir):
bm = BuildManager(INPUTS_PATH)
secret_name = 'mysecret'
kwargs = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'git_branch': TEST_GIT_BRANCH,
'user': "john-foo",
'component': TEST_COMPONENT,
'base_image': 'fedora:latest',
'name_label': "fedora/resultingimage",
'registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'builder_openshift_url': "http://openshift/",
'sources_command': "make",
'architecture': "x86_64",
'vendor': "Foo Vendor",
'build_host': "our.build.host.example.com",
'authoritative_registry': "registry.example.com",
'distribution_scope': "authoritative-source-only",
'registry_api_versions': ['v1'],
'pulp_registry': 'foo',
'pulp_secret': secret_name,
}
# Default required version (0.5.4), implicitly and explicitly
for required in (None, parse_version('0.5.4')):
build_request = bm.get_build_request_by_type(PROD_BUILD_TYPE)
if required is not None:
build_request.set_openshift_required_version(required)
build_request.set_params(**kwargs)
build_json = build_request.render()
# Using the sourceSecret scheme
assert 'sourceSecret' in build_json['spec']['source']
assert build_json['spec']['source']\
['sourceSecret']['name'] == secret_name
# Not using the secrets array scheme
assert 'secrets' not in build_json['spec']['strategy']['customStrategy']
# We shouldn't have pulp_secret_path set
env = build_json['spec']['strategy']['customStrategy']['env']
plugins_json = None
for d in env:
if d['name'] == 'DOCK_PLUGINS':
plugins_json = d['value']
break
assert plugins_json is not None
plugins = json.loads(plugins_json)
assert 'pulp_secret_path' not in plugin_value_get(plugins,
'postbuild_plugins',
'pulp_push',
'args')
# Set required version to 1.0.6
build_request = bm.get_build_request_by_type(PROD_BUILD_TYPE)
build_request.set_openshift_required_version(parse_version('1.0.6'))
build_json = build_request.render()
# Not using the sourceSecret scheme
assert 'sourceSecret' not in build_json['spec']['source']
# Using the secrets array scheme instead
assert 'secrets' in build_json['spec']['strategy']['customStrategy']
secrets = build_json['spec']['strategy']['customStrategy']['secrets']
pulp_secret = [secret for secret in secrets
if secret['secretSource']['name'] == secret_name]
assert len(pulp_secret) > 0
assert 'mountPath' in pulp_secret[0]
# Check that the secret's mountPath matches the plugin's
# configured path for the secret
mount_path = pulp_secret[0]['mountPath']
env = build_json['spec']['strategy']['customStrategy']['env']
plugins_json = None
for d in env:
if d['name'] == 'DOCK_PLUGINS':
plugins_json = d['value']
break
assert plugins_json is not None
plugins = json.loads(plugins_json)
assert plugin_value_get(plugins, 'postbuild_plugins', 'pulp_push',
'args', 'pulp_secret_path') == mount_path
```
#### File: osbs-client/tests/test_core.py
```python
import six
from osbs.http import HttpResponse
from osbs.constants import BUILD_FINISHED_STATES
from osbs.exceptions import OsbsResponseException
from osbs.core import check_response
from tests.constants import TEST_BUILD, TEST_LABEL, TEST_LABEL_VALUE
from tests.fake_api import openshift
import pytest
try:
# py2
import httplib
except ImportError:
# py3
import http.client as httplib
class Response(object):
def __init__(self, status_code, content=None, iterable=None):
self.status_code = status_code
self.iterable = iterable
if content is not None:
self.content = content
def iter_lines(self):
for line in self.iterable:
yield line
class TestCheckResponse(object):
@pytest.mark.parametrize('content', [None, 'OK'])
@pytest.mark.parametrize('status_code', [httplib.OK, httplib.CREATED])
def test_check_response_ok(self, status_code, content):
response = Response(status_code, content=content)
check_response(response)
def test_check_response_bad_stream(self, caplog):
iterable = ['iter', 'lines']
status_code = httplib.CONFLICT
response = Response(status_code, iterable=iterable)
with pytest.raises(OsbsResponseException):
check_response(response)
logged = [l.getMessage() for l in caplog.records()]
assert len(logged) == 1
assert logged[0] == '[{code}] {message}'.format(code=status_code,
message='iterlines')
def test_check_response_bad_nostream(self, caplog):
status_code = httplib.CONFLICT
content = 'content'
response = Response(status_code, content=content)
with pytest.raises(OsbsResponseException):
check_response(response)
logged = [l.getMessage() for l in caplog.records()]
assert len(logged) == 1
assert logged[0] == '[{code}] {message}'.format(code=status_code,
message=content)
class TestOpenshift(object):
def test_set_labels_on_build(self, openshift):
l = openshift.set_labels_on_build(TEST_BUILD, {TEST_LABEL: TEST_LABEL_VALUE})
assert l.json() is not None
def test_list_builds(self, openshift):
l = openshift.list_builds()
assert l is not None
assert bool(l.json()) # is there at least something
def test_list_pods(self, openshift):
response = openshift.list_pods(label="openshift.io/build.name=%s" %
TEST_BUILD)
assert isinstance(response, HttpResponse)
def test_get_oauth_token(self, openshift):
token = openshift.get_oauth_token()
assert token is not None
def test_get_user(self, openshift):
l = openshift.get_user()
assert l.json() is not None
def test_watch_build(self, openshift):
response = openshift.wait_for_build_to_finish(TEST_BUILD)
status_lower = response["status"]["phase"].lower()
assert response["metadata"]["name"] == TEST_BUILD
assert status_lower in BUILD_FINISHED_STATES
assert isinstance(TEST_BUILD, six.text_type)
assert isinstance(status_lower, six.text_type)
def test_create_build(self, openshift):
response = openshift.create_build({})
assert response is not None
assert response.json()["metadata"]["name"] == TEST_BUILD
assert response.json()["status"]["phase"].lower() in BUILD_FINISHED_STATES
``` |
{
"source": "jpopelka/packit-service-fedmsg",
"score": 2
} |
#### File: packit-service-fedmsg/tests/test_consumer.py
```python
import pytest
from packit_service_fedmsg.consumer import specfile_changed
@pytest.mark.parametrize(
"body, expected",
[
pytest.param(
{
"commit": {
"stats": {
"files": {
".gitignore": {"additions": 1, "deletions": 0, "lines": 1},
"buildah.spec": {
"additions": 5,
"deletions": 2,
"lines": 7,
},
"sources": {"additions": 1, "deletions": 1, "lines": 2},
},
"total": {
"additions": 7,
"deletions": 3,
"files": 3,
"lines": 10,
},
},
"summary": "buildah-1.12.0-0.73.dev.git1e6a70c",
"username": "rhcontainerbot",
},
},
True,
),
pytest.param(
{
"commit": {
"stats": {
"files": {
".gitignore": {"additions": 1, "deletions": 0, "lines": 1},
"sources": {"additions": 1, "deletions": 1, "lines": 2},
},
"total": {
"additions": 7,
"deletions": 3,
"files": 3,
"lines": 10,
},
},
"summary": "buildah-1.12.0-0.73.dev.git1e6a70c",
"username": "rhcontainerbot",
},
},
False,
),
pytest.param({}, False),
pytest.param(
{
"commit": {
"stats": {},
"summary": "buildah-1.12.0-0.73.dev.git1e6a70c",
"username": "rhcontainerbot",
},
},
False,
),
pytest.param(
{
"commit": {},
},
False,
),
],
)
def test_specfile_changed(body, expected):
assert specfile_changed(body) == expected
``` |
{
"source": "jpopelka/packit-service",
"score": 2
} |
#### File: service/api/srpm_builds.py
```python
from http import HTTPStatus
from logging import getLogger
from packit_service.service.urls import get_srpm_build_info_url
from flask_restx import Namespace, Resource
from packit_service.models import SRPMBuildModel, optional_timestamp
from packit_service.service.api.parsers import indices, pagination_arguments
from packit_service.service.api.utils import get_project_info_from_build, response_maker
logger = getLogger("packit_service")
ns = Namespace("srpm-builds", description="SRPM builds")
@ns.route("")
class SRPMBuildsList(Resource):
@ns.expect(pagination_arguments)
@ns.response(HTTPStatus.PARTIAL_CONTENT.value, "SRPM builds list follows")
def get(self):
"""List all SRPM builds."""
result = []
first, last = indices()
for build in SRPMBuildModel.get(first, last):
build_dict = {
"srpm_build_id": build.id,
"success": build.success,
"log_url": get_srpm_build_info_url(build.id),
"build_submitted_time": optional_timestamp(build.build_submitted_time),
}
project = build.get_project()
# Its possible that jobtrigger isnt stored in db
if project:
build_dict["repo_namespace"] = project.namespace
build_dict["repo_name"] = project.repo_name
build_dict["project_url"] = project.project_url
build_dict["pr_id"] = build.get_pr_id()
build_dict["branch_name"] = build.get_branch_name()
result.append(build_dict)
resp = response_maker(
result,
status=HTTPStatus.PARTIAL_CONTENT.value,
)
resp.headers["Content-Range"] = f"srpm-builds {first + 1}-{last}/*"
return resp
@ns.route("/<int:id>")
@ns.param("id", "Packit id of the SRPM build")
class SRPMBuildItem(Resource):
@ns.response(HTTPStatus.OK.value, "OK, SRPM build details follow")
@ns.response(HTTPStatus.NOT_FOUND.value, "SRPM build identifier not in db/hash")
def get(self, id):
"""A specific SRPM build details."""
build = SRPMBuildModel.get_by_id(int(id))
if not build:
return response_maker(
{"error": "No info about build stored in DB"},
status=HTTPStatus.NOT_FOUND.value,
)
build_dict = {
"success": build.success,
"build_submitted_time": optional_timestamp(build.build_submitted_time),
"url": build.url,
"logs": build.logs,
"run_ids": sorted(run.id for run in build.runs),
}
build_dict.update(get_project_info_from_build(build))
return response_maker(build_dict)
```
#### File: worker/events/testing_farm.py
```python
from datetime import datetime
from typing import Optional, Dict
from ogr.abstract import GitProject
from ogr.services.pagure import PagureProject
from packit_service.models import (
TestingFarmResult,
AbstractTriggerDbType,
PullRequestModel,
TFTTestRunModel,
)
from packit_service.worker.events.event import AbstractForgeIndependentEvent
class TestingFarmResultsEvent(AbstractForgeIndependentEvent):
def __init__(
self,
pipeline_id: str,
result: TestingFarmResult,
compose: str,
summary: str,
log_url: str,
copr_build_id: str,
copr_chroot: str,
commit_sha: str,
project_url: str,
created: datetime,
):
super().__init__(project_url=project_url)
self.pipeline_id = pipeline_id
self.result = result
self.compose = compose
self.summary = summary
self.log_url = log_url
self.copr_build_id = copr_build_id
self.copr_chroot = copr_chroot
self.commit_sha: str = commit_sha
self.created: datetime = created
# Lazy properties
self._pr_id: Optional[int] = None
self._db_trigger: Optional[AbstractTriggerDbType] = None
@property
def pr_id(self) -> Optional[int]:
if not self._pr_id and isinstance(self.db_trigger, PullRequestModel):
self._pr_id = self.db_trigger.pr_id
return self._pr_id
def get_dict(self, default_dict: Optional[Dict] = None) -> dict:
result = super().get_dict()
result["result"] = result["result"].value
result["pr_id"] = self.pr_id
result.pop("_db_trigger")
return result
@property
def db_trigger(self) -> Optional[AbstractTriggerDbType]:
if not self._db_trigger:
run_model = TFTTestRunModel.get_by_pipeline_id(pipeline_id=self.pipeline_id)
if run_model:
self._db_trigger = run_model.get_trigger_object()
return self._db_trigger
def get_base_project(self) -> Optional[GitProject]:
if self.pr_id is not None:
if isinstance(self.project, PagureProject):
pull_request = self.project.get_pr(pr_id=self.pr_id)
return self.project.service.get_project(
namespace=self.project.namespace,
username=pull_request.author,
repo=self.project.repo,
is_fork=True,
)
else:
return None # With Github app, we cannot work with fork repo
return self.project
```
#### File: worker/handlers/distgit.py
```python
import logging
from os import getenv
import shutil
from typing import Optional
from celery import Task
from packit.exceptions import PackitException
from packit.api import PackitAPI
from packit.config import JobConfig, JobType
from packit.config.aliases import get_branches
from packit.config.package_config import PackageConfig
from packit.local_project import LocalProject
from packit.utils.repo import RepositoryCache
from packit_service import sentry_integration
from packit_service.config import ProjectToSync
from packit_service.constants import (
CONTACTS_URL,
DEFAULT_RETRY_LIMIT,
FILE_DOWNLOAD_FAILURE,
MSG_RETRIGGER,
)
from packit_service.worker.events import (
PushPagureEvent,
ReleaseEvent,
IssueCommentEvent,
IssueCommentGitlabEvent,
)
from packit_service.worker.handlers.abstract import (
JobHandler,
TaskName,
configured_as,
reacts_to,
run_for_comment,
)
from packit_service.worker.result import TaskResults
logger = logging.getLogger(__name__)
@configured_as(job_type=JobType.sync_from_downstream)
@reacts_to(event=PushPagureEvent)
class SyncFromDownstream(JobHandler):
"""Sync new specfile changes to upstream after a new git push in the dist-git."""
task_name = TaskName.sync_from_downstream
def __init__(
self,
package_config: PackageConfig,
job_config: JobConfig,
event: dict,
):
super().__init__(
package_config=package_config,
job_config=job_config,
event=event,
)
self.dg_repo_name = event.get("repo_name")
self.dg_branch = event.get("git_ref")
self._project_to_sync: Optional[ProjectToSync] = None
@property
def project_to_sync(self) -> Optional[ProjectToSync]:
if self._project_to_sync is None:
if project_to_sync := self.service_config.get_project_to_sync(
dg_repo_name=self.dg_repo_name, dg_branch=self.dg_branch
):
self._project_to_sync = project_to_sync
return self._project_to_sync
def pre_check(self) -> bool:
return self.project_to_sync is not None
def run(self) -> TaskResults:
ogr_project_to_sync = self.service_config.get_project(
url=f"{self.project_to_sync.forge}/"
f"{self.project_to_sync.repo_namespace}/{self.project_to_sync.repo_name}"
)
upstream_local_project = LocalProject(
git_project=ogr_project_to_sync,
working_dir=self.service_config.command_handler_work_dir,
cache=RepositoryCache(
cache_path=self.service_config.repository_cache,
add_new=self.service_config.add_repositories_to_repository_cache,
)
if self.service_config.repository_cache
else None,
)
packit_api = PackitAPI(
self.service_config,
self.job_config,
upstream_local_project=upstream_local_project,
stage=self.service_config.use_stage(),
)
# rev is a commit
# we use branch on purpose so we get the latest thing
# TODO: check if rev is HEAD on {branch}, warn then?
packit_api.sync_from_downstream(
dist_git_branch=self.dg_branch,
upstream_branch=self.project_to_sync.branch,
sync_only_specfile=True,
)
return TaskResults(success=True, details={})
class AbortProposeDownstream(Exception):
"""Abort propose-downstream process"""
@configured_as(job_type=JobType.propose_downstream)
@run_for_comment(command="propose-downstream")
@run_for_comment(command="propose-update") # deprecated
@reacts_to(event=ReleaseEvent)
@reacts_to(event=IssueCommentEvent)
@reacts_to(event=IssueCommentGitlabEvent)
class ProposeDownstreamHandler(JobHandler):
topic = "org.fedoraproject.prod.git.receive"
task_name = TaskName.propose_downstream
def __init__(
self,
package_config: PackageConfig,
job_config: JobConfig,
event: dict,
task: Task = None,
):
super().__init__(
package_config=package_config,
job_config=job_config,
event=event,
)
self.task = task
def sync_branch(self, branch: str):
try:
self.api.sync_release(dist_git_branch=branch, tag=self.data.tag_name)
except Exception as ex:
# the archive has not been uploaded to PyPI yet
if FILE_DOWNLOAD_FAILURE in str(ex):
# retry for the archive to become available
logger.info(f"We were not able to download the archive: {ex}")
# when the task hits max_retries, it raises MaxRetriesExceededError
# and the error handling code would be never executed
retries = self.task.request.retries
if retries < int(getenv("CELERY_RETRY_LIMIT", DEFAULT_RETRY_LIMIT)):
# will retry in: 1m and then again in another 2m
delay = 60 * 2 ** retries
logger.info(f"Will retry for the {retries + 1}. time in {delay}s.")
# throw=False so that exception is not raised and task
# is not retried also automatically
self.task.retry(exc=ex, countdown=delay, throw=False)
raise AbortProposeDownstream()
raise ex
finally:
self.api.up.local_project.reset("HEAD")
def run(self) -> TaskResults:
"""
Sync the upstream release to dist-git as a pull request.
"""
self.local_project = LocalProject(
git_project=self.project,
working_dir=self.service_config.command_handler_work_dir,
cache=RepositoryCache(
cache_path=self.service_config.repository_cache,
add_new=self.service_config.add_repositories_to_repository_cache,
)
if self.service_config.repository_cache
else None,
)
self.api = PackitAPI(
self.service_config,
self.job_config,
self.local_project,
stage=self.service_config.use_stage(),
)
errors = {}
default_dg_branch = self.api.dg.local_project.git_project.default_branch
try:
for branch in get_branches(
*self.job_config.metadata.dist_git_branches, default=default_dg_branch
):
try:
self.sync_branch(branch=branch)
except AbortProposeDownstream:
return TaskResults(
success=True, # do not create a Sentry issue
details={
"msg": "Not able to download archive. Task will be retried."
},
)
except Exception as ex:
# eat the exception and continue with the execution
errors[branch] = str(ex)
sentry_integration.send_to_sentry(ex)
finally:
# remove temporary dist-git clone after we're done here - context:
# 1. the dist-git repo is cloned on worker, not sandbox
# 2. it's stored in /tmp, not in the mirrored sandbox PV
# 3. it's not being cleaned up and it wastes pod's filesystem space
shutil.rmtree(self.api.dg.local_project.working_dir)
if errors:
branch_errors = ""
for branch, err in sorted(
errors.items(), key=lambda branch_error: branch_error[0]
):
err_without_new_lines = err.replace("\n", " ")
branch_errors += f"| `{branch}` | `{err_without_new_lines}` |\n"
msg_retrigger = MSG_RETRIGGER.format(
job="update", command="propose-downstream", place="issue"
)
body_msg = (
f"Packit failed on creating pull-requests in dist-git:\n\n"
f"| dist-git branch | error |\n"
f"| --------------- | ----- |\n"
f"{branch_errors}\n\n"
f"{msg_retrigger}\n"
)
self.project.create_issue(
title=f"[packit] Propose downstream failed for release {self.data.tag_name}",
body=body_msg,
)
return TaskResults(
success=False,
details={"msg": "Propose downstream failed.", "errors": errors},
)
return TaskResults(success=True, details={})
@configured_as(job_type=JobType.koji_build)
@reacts_to(event=PushPagureEvent)
class DownstreamKojiBuildHandler(JobHandler):
"""
This handler can submit a build in Koji from a dist-git.
"""
topic = "org.fedoraproject.prod.git.receive"
task_name = TaskName.downstream_koji_build
def __init__(
self,
package_config: PackageConfig,
job_config: JobConfig,
event: dict,
):
super().__init__(
package_config=package_config,
job_config=job_config,
event=event,
)
self.dg_branch = event.get("git_ref")
def pre_check(self) -> bool:
if self.data.event_type in (PushPagureEvent.__name__,):
if self.data.git_ref not in (
configured_branches := get_branches(
*self.job_config.metadata.dist_git_branches, default="main"
)
):
logger.info(
f"Skipping build on '{self.data.git_ref}'. "
f"Koji build configured only for '{configured_branches}'."
)
return False
return True
def run(self) -> TaskResults:
self.local_project = LocalProject(
git_project=self.project,
working_dir=self.service_config.command_handler_work_dir,
cache=RepositoryCache(
cache_path=self.service_config.repository_cache,
add_new=self.service_config.add_repositories_to_repository_cache,
)
if self.service_config.repository_cache
else None,
)
packit_api = PackitAPI(
self.service_config,
self.job_config,
downstream_local_project=self.local_project,
stage=self.service_config.use_stage(),
)
try:
packit_api.build(
dist_git_branch=self.dg_branch,
scratch=self.job_config.metadata.scratch,
nowait=True,
from_upstream=False,
)
except PackitException as ex:
packit_api.downstream_local_project.git_project.commit_comment(
commit=packit_api.downstream_local_project.commit_hexsha,
body="Koji build failed:\n"
"```\n"
"{ex}\n"
"```\n\n"
f"*Get in [touch with us]({CONTACTS_URL}) if you need some help.*",
)
raise ex
return TaskResults(success=True, details={})
```
#### File: tests/integration/test_issue_comment.py
```python
import json
from datetime import datetime
import pytest
import shutil
from celery.canvas import Signature
from flexmock import flexmock
from ogr.abstract import GitTag
from ogr.abstract import PRStatus
from ogr.read_only import PullRequestReadOnly
from ogr.services.github import GithubProject, GithubRelease
from ogr.services.gitlab import GitlabProject, GitlabRelease
from packit.api import PackitAPI
from packit.distgit import DistGit
from packit.config import JobConfigTriggerType
from packit.local_project import LocalProject
from packit_service.config import ServiceConfig
from packit_service.constants import SANDCASTLE_WORK_DIR
from packit_service.models import IssueModel
from packit_service.worker.events import IssueCommentEvent, IssueCommentGitlabEvent
from packit_service.worker.jobs import SteveJobs
from packit_service.worker.monitoring import Pushgateway
from packit_service.worker.tasks import run_propose_downstream_handler
from packit_service.worker.allowlist import Allowlist
from tests.spellbook import DATA_DIR, first_dict_value, get_parameters_from_results
def issue_comment_propose_downstream_event(forge):
return json.loads(
(DATA_DIR / "webhooks" / forge / "issue_propose_downstream.json").read_text()
)
@pytest.fixture(scope="module")
def mock_comment(request):
project_class, release_class, forge, author = request.param
packit_yaml = (
"{'specfile_path': 'packit.spec', 'synced_files': [],"
"'jobs': [{'trigger': 'release', 'job': 'propose_downstream',"
"'metadata': {'dist-git-branch': 'main'}}],"
"'downstream_package_name': 'packit'}"
)
flexmock(
project_class,
get_file_content=lambda path, ref: packit_yaml,
full_repo_name="packit-service/packit",
get_web_url=lambda: f"https://{forge}.com/packit-service/packit",
default_branch="main",
)
(
flexmock(project_class)
.should_receive("can_merge_pr")
.with_args(author)
.and_return(True)
)
issue = flexmock()
flexmock(project_class).should_receive("get_issue").and_return(issue)
comment = flexmock()
flexmock(issue).should_receive("get_comment").and_return(comment)
flexmock(comment).should_receive("add_reaction").with_args("+1").once()
flexmock(issue).should_receive("close").and_return(issue)
gr = release_class(
tag_name="0.5.1",
url="packit-service/packit",
created_at="",
tarball_url="https://foo/bar",
git_tag=flexmock(GitTag),
project=flexmock(project_class),
raw_release=flexmock(),
)
flexmock(project_class).should_receive("get_latest_release").and_return(gr)
config = ServiceConfig()
config.command_handler_work_dir = SANDCASTLE_WORK_DIR
flexmock(ServiceConfig).should_receive("get_service_config").and_return(config)
flexmock(LocalProject, refresh_the_arguments=lambda: None)
lp = flexmock(git_project=flexmock(default_branch="main"))
lp.working_dir = ""
flexmock(DistGit).should_receive("local_project").and_return(lp)
flexmock(Allowlist, check_and_report=True)
yield project_class, issue_comment_propose_downstream_event(forge)
@pytest.mark.parametrize(
"mock_comment,event_type",
[
(
(GithubProject, GithubRelease, "github", "phracek"),
IssueCommentEvent,
),
(
(GitlabProject, GitlabRelease, "gitlab", "shreyaspapi"),
IssueCommentGitlabEvent,
),
],
indirect=[
"mock_comment",
],
)
def test_issue_comment_propose_downstream_handler(
mock_comment,
event_type,
):
project_class, comment_event = mock_comment
flexmock(PackitAPI).should_receive("sync_release").and_return(
PullRequestReadOnly(
title="foo",
description="bar",
target_branch="baz",
source_branch="yet",
id=1,
status=PRStatus.open,
url="https://xyz",
author="me",
created=datetime.now(),
)
)
flexmock(
project_class,
get_files=lambda ref, filter_regex: [],
is_private=lambda: False,
)
flexmock(LocalProject).should_receive("reset").with_args("HEAD").once()
flexmock(IssueCommentGitlabEvent).should_receive("db_trigger").and_return(
flexmock(id=123, job_config_trigger_type=JobConfigTriggerType.release)
)
flexmock(IssueModel).should_receive("get_or_create").and_return(
flexmock(id=123, job_config_trigger_type=JobConfigTriggerType.release)
)
flexmock(Signature).should_receive("apply_async").once()
flexmock(Pushgateway).should_receive("push").once().and_return()
flexmock(shutil).should_receive("rmtree").with_args("")
processing_results = SteveJobs().process_message(comment_event)
event_dict, job, job_config, package_config = get_parameters_from_results(
processing_results
)
assert json.dumps(event_dict)
results = run_propose_downstream_handler(
package_config=package_config,
event=event_dict,
job_config=job_config,
)
assert first_dict_value(results["job"])["success"]
``` |
{
"source": "jpopham91/spitball",
"score": 3
} |
#### File: spitball/berserker/ensemble.py
```python
import sys
from berserker.layers import Layer
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.pipeline import make_pipeline, make_union
import numpy as np
from time import time, sleep
#from fn import _
class Ensemble(object):
"""
vanilla ensemble object
contains N layers, where layers 0..N-1 are collections of models and transformations
and the Nth layer is a single (meta)estimator for making final predictions
"""
def __init__(self, X, y, metric, holdout=()):
self.X_trn = X
self.y_trn = y
self.holdout = holdout
self.metric = metric
self.layers = []
def add_node(self, node, **kwargs):
self.layers[-1].add(node, **kwargs)
return self
def add_meta_estimator(self, node, **kwargs):
self.add_layer(folds=1)
self.add_node(node, **kwargs)
return self
def add_layer(self, **kwargs):
self.layers.append(Layer(self.X_trn, self.y_trn, **kwargs))
return self
def _predict_all(self, X):
"""recursively trace through each layer, using the previous layer's output as training data"""
def _predict_layer(layers, X, new_data):
head, *tail = layers
preds, val_data = head.predict(X, new_data)
if not tail:
return preds
else:
return _predict_layer(tail, preds, val_data)
return _predict_layer(self.layers, X, (self.X_trn, self.y_trn))
def predict(self, X, y=None):
start = time()
preds = self._predict_all(X)
elapsed = time() - start
sleep(1)
print('\n' + '='*53)
print('R E S U L T S'.center(53, '-'))
print('-'*53)
print('Elapsed time: {:.3g} seconds'.format(elapsed))
print('Total models in ensemble: {:d}'.format(sum([layer.size() for layer in self.layers])))
print('Cached predictions used: {:d} / {:d}'.format(sum([node.cached_preds for layer in self.layers for node in layer.nodes]),
sum([node.total_preds for layer in self.layers for node in layer.nodes])))
print('-'*53)
for n, layer in enumerate(self.layers):
print('{: <36} {: <16}'.format('\nLevel {:d} Estimators ({} features)'.format(n+1, layer.X_trn.shape[1]), 'Validation Score'))
print('-'*53)
for node, pred in zip(layer.nodes, layer.val_preds):
print('{: <36} {:.4g}'.format(node.name, self.metric(layer.y_val, pred)))
if y is not None:
print('{: <36} {: <16}'.format('\nFull Ensemble'.format(n+1), 'Holdout Score'))
print('-'*53)
print('\033[1m{: <36} {:.4g}\033[0m'.format('', self.metric(y, preds)))
print('\n' + '='*53)
return preds
```
#### File: berserker/estimators/meta.py
```python
__author__ = 'jake'
from sklearn.base import BaseEstimator
import numpy as np
class Averager(BaseEstimator):
"""
Simple meta-estimator which averages predictions
May use any of the pythagorean means
"""
class StepwiseRegressor(Averager):
"""
An averager which iteratively adds predictions which optimize a metric
"""
class FeatureWeightedEstimator(BaseEstimator):
"""
Expands the feature space by taking the outer product of the features and predictions at each sample
This is then fit using some estimator (log/lin regression)
"""
def __init__(self, estimator):
self.estimator = estimator
@staticmethod
def _combine_features(X, y_pred):
Xy = np.empty_like((X.shape[0], X.shape[1]*y_pred.shape[1]))
for i in X.shape[1]:
for j in y_pred.shape[1]:
Xy[:, i*X.shape[0]+j] = X[i]*y_pred[j]
return Xy
def fit(self, X, y_pred, y_true):
"""Takes the feature vectors AND predictions as training data"""
assert X.shape[0] == y_pred.shape[0] == len(y_true)
Xy = self._combine_features(X, y_pred)
self.estimator.fit(Xy, y_true)
def predict(self, X, y_pred):
assert X.shape[0] == y_pred.shape[0]
Xy = self._combine_features(X, y_pred)
return self.estimator._predict_all(Xy)
``` |
{
"source": "jportasa/depot",
"score": 2
} |
#### File: depot/yum/base.py
```python
import collections
from defusedxml import lxml
from lxml.builder import ElementMaker
from lxml.etree import QName
import six
class YumMeta(collections.OrderedDict):
nsmap = {}
def __init__(self, *args, **kwargs):
self.filename = kwargs.pop('filename') if 'filename' in kwargs else None
super(YumMeta, self).__init__(*args, **kwargs)
@classmethod
def from_file(cls, filename=None, fileobj=None, *args, **kwargs):
fileobj = fileobj or open(filename, 'rb')
kwargs['filename'] = filename
kwargs['root'] = lxml.parse(fileobj)
return cls.from_element(*args, **kwargs)
@classmethod
def from_element(cls, root, *args, **kwargs):
raise NotImplementedError
def to_element(self, E):
raise NotImplementedError
def encode(self):
return lxml.tostring(
self.to_element(ElementMaker(nsmap=self.nsmap)),
xml_declaration=True,
encoding='UTF-8',
)
class YumData(collections.OrderedDict):
NS = None
# Sentinel object
NoInsert = object()
@classmethod
def from_element(cls, root):
self = cls(**cls.root_from_element(root))
for elm in root.findall('*'):
key = QName(elm.tag).localname
fn = getattr(self, '{0}_from_element'.format(key.replace('-', '_')), None)
val = fn(key, elm) if fn else elm.text
if val is not self.NoInsert:
self[key] = val
return self
@classmethod
def root_from_element(cls, root):
return root.attrib
def to_element(self, E):
sub = []
for key, value in six.iteritems(self):
ns_key = '{{{0}}}{1}'.format(self.NS, key) if self.NS else key
fn = getattr(self, '{0}_to_element'.format(key.replace('-', '_')), None)
if fn:
elm = fn(E, ns_key, value)
elif value:
elm = E(ns_key, value)
else:
elm = E(ns_key)
sub.append(elm)
return self.root_to_element(E, sub)
def root_to_element(self, E, sub):
return E.data(*sub)
```
#### File: depot/test/test_yum.py
```python
import gzip
import os
import re
import pytest
import six
from depot.yum import YumRepoMD, YumPrimary, YumFileLists, YumOther
# Convert XML into a format that diffs nicely
def unify_spacing(data):
if not isinstance(data, six.binary_type):
data._fileobj.seek(0, 0)
data = data._fileobj.read()
return re.sub('>', '>\n', re.sub('\s+', ' ', re.sub('>', '>\n', re.sub('\n', '', data))))
def fixture(cls, name):
path = os.path.join(os.path.dirname(__file__), 'data', name)
open_ = gzip.open if path.endswith('.gz') else open
fileobj = open_(path, 'rb')
obj = cls.from_file(path, fileobj=fileobj)
obj._fileobj = fileobj
return obj
class TestYumRepoMD(object):
@pytest.fixture
def epel(self):
return fixture(YumRepoMD, 'epel_repomd.xml')
@pytest.fixture
def pgdg(self):
return fixture(YumRepoMD, 'pgdg_repomd.xml')
def test_from_file(self, epel):
assert epel.revision == 1389466441
assert epel.tags == ['binary-i386']
assert epel['filelists']['checksum'] == '8892c3e34eef269cea677558ee5a40057052ecff'
assert epel['filelists']['open-checksum'] == 'b6081d9789bf5cef2ffac8c03fa67224b8c22f53'
assert epel['group']['location'] == 'repodata/fdddf90da3a700ad6da5ff78e13c17258655bbe3-comps-el5.xml'
def test_str_epel(self, epel):
assert unify_spacing(epel.encode()) == unify_spacing(epel)
def test_str_pgdg(self, pgdg):
assert unify_spacing(pgdg.encode()) == unify_spacing(pgdg)
class TestYumPrimary(object):
@pytest.fixture
def epel(self):
return fixture(YumPrimary, 'epel_primary.xml.gz')
@pytest.fixture
def pgdg(self):
return fixture(YumPrimary, 'pgdg_primary.xml')
@pytest.fixture
def pgdgmini(self):
return fixture(YumPrimary, 'pgdgmini_primary.xml')
def test_from_file_pgdgmini(self, pgdgmini):
assert pgdgmini[('ip4r93', 'x86_64', '0:1.05-3.rhel6')]['summary'] == 'IPv4 and IPv4 range index types for PostgreSQL'
assert pgdgmini[('ip4r93', 'x86_64', '0:1.05-3.rhel6')]['url'] == 'http://pgfoundry.org/projects/ip4r'
assert pgdgmini[('postgresql93-debuginfo', 'x86_64', '0:9.3.1-1PGDG.rhel6')]['checksum'] == '048be8c5573c0d92bf64e476beb739c31b2d0f91'
assert pgdgmini[('postgresql93-debuginfo', 'x86_64', '0:9.3.1-1PGDG.rhel6')]['url'] == ''
def test_str_epel(self, epel):
assert unify_spacing(epel.encode()) == unify_spacing(epel)
def test_str_pgdg(self, pgdg):
assert unify_spacing(pgdg.encode()) == unify_spacing(pgdg)
def test_str_pgdgmini(self, pgdgmini):
assert unify_spacing(pgdgmini.encode()) == unify_spacing(pgdgmini)
class TestYumFileLists(object):
@pytest.fixture
def epel(self):
return fixture(YumFileLists, 'epel_filelists.xml.gz')
@pytest.fixture
def pgdg(self):
return fixture(YumFileLists, 'pgdg_filelists.xml')
def test_str_epel(self, epel):
assert unify_spacing(epel.encode()) == unify_spacing(epel)
def test_str_pgdg(self, pgdg):
assert unify_spacing(pgdg.encode()) == unify_spacing(pgdg)
class TestYumOther(object):
@pytest.fixture
def epel(self):
return fixture(YumOther, 'epel_other.xml.gz')
@pytest.fixture
def pgdg(self):
return fixture(YumOther, 'pgdg_other.xml')
def test_str_epel(self, epel):
assert unify_spacing(epel.encode()) == unify_spacing(epel)
def test_str_pgdg(self, pgdg):
assert unify_spacing(pgdg.encode()) == unify_spacing(pgdg)
``` |
{
"source": "jportner/kibbe",
"score": 2
} |
#### File: src/commands/ctx.py
```python
import os
from pathlib import Path
from src.git import find_existing_worktree, get_worktree_list_flat
from src.tmux import get_current_panel, is_inside_tmux
import subprocess
import click
from termcolor import colored
from src.util import get_valid_filename
@click.command()
@click.argument(
"name", type=click.STRING, autocompletion=get_worktree_list_flat, required=False
)
@click.option(
"--branch",
help="Branch name to use for the new worktree",
default="",
)
@click.option(
"--parent-path",
help="Custom parent path to use to set the new worktree. Defaults to ../",
default="",
)
@click.option(
"--source",
help="Branch to create the worktree from. Defaults to current branch. e.g. master",
default="",
)
@click.option(
"-i",
"--interactive",
help="Ask questions about the new context",
is_flag=True,
default=True,
)
@click.option(
"-B",
"--overwrite-branch",
help="If the branch already exists, reset it to source",
is_flag=True,
default=False,
)
@click.option(
"--cd",
"--change-dir",
help="Change to new context directory (Tmux only)",
is_flag=True,
default=True,
)
@click.option(
"--delete",
help="Removes the worktree if exists",
is_flag=True,
default=False,
confirmation_prompt=True,
)
@click.option(
"-l",
"--list-worktrees",
help="List existing worktree. Alias for `git worktree list`",
is_flag=True,
default=False,
)
def ctx(
name,
parent_path,
source,
branch,
interactive,
overwrite_branch,
cd,
list_worktrees,
delete,
):
"""
ctx is a wrapper kibbe subcommand for git worktree with some quality of life improvements.
NAME accepts a name of the "context" you want to switch. It is a shorthand to not have to remember
paths as is required with git worktree.
It allows you to quickly switch and create git worktrees without having to type or memorize
all the git worktree parameterse.
ctx works better when you use it with tmux. In mac, if you use iterm2,
you can start tmux with `tmux -CC`. Install it first with `brew install tmux`
ctx is not intended to be a replacement for git worktree, if you can't perform the operation
you want with ctx please see the git worktree manual entry https://git-scm.com/docs/git-worktree
"""
if list_worktrees:
subprocess.run(["git", "worktree", "list"])
exit(0)
if not name:
raise click.ClickException(
colored(
"You must pass the NAME of the worktree you want to change to", "red"
)
)
if name.startswith("../"):
name = name[3:]
path_name = get_valid_filename(name)
existing_worktree = find_existing_worktree(path_name)
if delete and not existing_worktree:
raise click.ClickException(
"Can not remove worktree. Worktree doesn't exist: "
+ colored(path_name, "red")
)
elif delete:
if click.confirm(
"Are you sure you want to delete the wortree"
+ colored(existing_worktree["worktree"], "yellow")
):
click.echo("Deleting worktree...")
subprocess.run(["git", "worktree", "remove", existing_worktree["worktree"]])
exit(0)
if existing_worktree:
return handle_existing_worktree(existing_worktree)
if not branch:
if interactive:
branch = click.prompt("Git branch name fore the new worktree", default=name)
else:
branch = name
if not parent_path:
possible_path = os.path.join(Path(os.getcwd()).parent.absolute())
if interactive:
parent_path = click.prompt(
"Parent path target for the git worktree",
default=possible_path,
type=click.Path(dir_okay=True, file_okay=False),
)
else:
parent_path = possible_path
full_path = os.path.join(parent_path, path_name)
if not source:
possible_source = subprocess.getoutput("git rev-parse --abbrev-ref HEAD")
if interactive:
source = click.prompt(
"Source branch for the git worktree. e.g. master",
default=possible_source,
)
else:
source = possible_source
click.echo("Will create a new git worktree called: " + colored(path_name, "yellow"))
click.echo("In this location: " + colored(parent_path, "blue"))
click.echo("With a new branch name: " + colored(branch, "blue"))
click.echo("From this branch: " + colored(source, "blue"))
click.echo("---git output--")
b_option = "-b" if not overwrite_branch else "-B"
command = ["git", "worktree", "add", full_path, source, b_option, branch]
process = subprocess.run(command)
click.echo("--- end git output---")
if process.returncode != 0:
raise click.ClickException(
colored(
"Something went wrong with git. See git output and verify your"
" parameters",
"red",
)
)
click.echo(
colored("Success!", "green")
+ " a new git worktree was created in "
+ colored(full_path, "blue")
)
click.echo("To change to your new worktree run:")
click.echo(colored("cd %s" % full_path, "yellow"))
# this must always be the last command
if cd and is_inside_tmux():
click.echo("Tmux session detected. Changing to worktree")
current_pane = get_current_panel()
current_pane.send_keys("cd %s && nvm use" % full_path)
exit(0)
elif not is_inside_tmux():
click.echo(
"Changing to a worktree is only supported if you are running inside tmux"
)
def handle_existing_worktree(existing_worktree):
existing_path_name = existing_worktree["worktree"]
click.echo(
"Existing worktree with the same name found at "
+ colored(existing_path_name, "yellow")
)
click.echo("Worktree branch: " + colored(existing_worktree["branch"], "blue"))
click.echo("Head commit: " + colored(existing_worktree["HEAD"], "blue"))
click.echo()
if Path(existing_path_name) == Path(os.getcwd()):
click.echo(colored("You are already on this worktree", "yellow"))
exit(0)
if not is_inside_tmux():
click.echo("You can switch to it by running: ")
click.echo(colored("cd %s" % existing_path_name, "blue"))
click.echo()
click.echo("Run this command inside tmux to automatically cd to it")
else:
click.echo("Tmux session detected. Changing to worktree")
current_pane = get_current_panel()
current_pane.send_keys("cd %s && nvm use" % existing_path_name)
exit(0)
```
#### File: src/commands/es.py
```python
from pathlib import Path
import re
from shutil import rmtree
import subprocess
import tempfile
import click
from termcolor import colored
from src.config import get_config, persist_config
from src.util import get_valid_filename, merge_params, unparsed_to_map
pathDataRe = re.compile(r"path\.data\s?=", re.IGNORECASE)
@click.command(
context_settings=dict(
ignore_unknown_options=True,
)
)
@click.option(
"--data-dir",
"-d",
type=click.STRING,
default="",
help="Path where this elastic search will store its data (path.data)",
)
@click.option(
"--no-persist",
"-n",
default=False,
is_flag=True,
help=(
"If passed will use a disposable data dir. This option will overwrite other"
" options related to data dir."
),
)
@click.option(
"--save-config",
default=False,
is_flag=True,
help=(
"If passed it will write your kibbe configuration with all the current passed"
" parameters. This will not modify your kibana repo clone."
),
)
@click.option(
"--flush",
is_flag=True,
default=False,
help="If passed will flush the ES datadir directory before starting es.",
)
@click.option(
"-E",
multiple=True,
help="Additional options to pass to elastic search. `path.data` will be ignored",
)
@click.argument("unparsed_args", nargs=-1, type=click.UNPROCESSED)
def es(data_dir, no_persist, e, unparsed_args, save_config, flush):
"""
Runs elastic search from the current kibana clone.
You can also pass the same parameters as you'd pass to `node scritps/es`
You can persist the -E parameters by using a configuration file `~/.kibbe`.
with the [elastic.params] section.
See more about the configuration file here:
https://github.com/academo/kibbe#configuration-file
"""
e_params = get_eparams(data_dir, no_persist)
# additional -E params
for item in e:
item = item.strip()
# ignore if passing a path.data -E param
if pathDataRe.match(item):
continue
item = item.split("=")
try:
e_params[str(item[0])] = str(item[1]) if str(item[1]) else ""
except ValueError:
pass
params = []
config = get_config()
config_params = []
if "elastic.params" in config:
config_params = config.items("elastic.params", raw=True)
params = merge_params(config_params, unparsed_args)
if not no_persist and "path.data" not in e_params:
# not a path data passed. Will create one based on the current branch name
try:
current_branch = subprocess.getoutput("git rev-parse --abbrev-ref HEAD")
data_dir = "../" + get_valid_filename("kibbe-esdata-" + current_branch)
e_params["path.data"] = get_data_dir(data_dir, no_persist)
except ValueError:
pass
if flush:
for param in e_params:
if param == "path.data":
try:
dataDir = e_params[param]
click.echo(colored("Will remove data dir %s" % (dataDir), "red"))
rmtree(dataDir, ignore_errors=True)
except ValueError:
pass
finally:
break
e_params = normalize_eparams(e_params)
if save_config:
persist_config(
{"elastic.eparams": e_params, "elastic.params": unparsed_to_map(params)}
)
exit()
command = get_command(e_params, extra_params=params)
click.echo("Will run elastic search as: " + colored(" ".join(command), "yellow"))
subprocess.run(command)
def get_command(e_params, extra_params):
final_params = []
for param in e_params:
final_params.append("-E")
final_params.append(param)
return ["node", "scripts/es", "snapshot"] + final_params + extra_params
def get_eparams(data_dir, no_persist):
CONFIG_KEY = "elastic.eparams"
config = get_config()
params = {}
if CONFIG_KEY in config:
for (key, value) in config.items(CONFIG_KEY, raw=True):
# ignore path.data if this command overwrites it
if key == "path.data":
if len(data_dir) > 0:
value = get_data_dir(data_dir, no_persist)
else:
value = get_data_dir(value, no_persist)
params[str(key)] = str(value) if value else ""
return params
def get_data_dir(data_dir, no_persist):
if no_persist or len(data_dir) == 0:
return tempfile.mkdtemp(suffix="kibbe")
return str(Path(data_dir).resolve())
def normalize_eparams(params):
final = []
for param in params:
final.append("%s=%s" % (param, params[param]))
return final
```
#### File: src/commands/kibana.py
```python
import subprocess
import sys
import click
import re
import atexit
import psutil
import enlighten
from termcolor import colored
from src.config import get_config, persist_config
from src.util import merge_params, unparsed_to_map
from src.util import wait_for_elastic_search
this = sys.modules[__name__]
@click.command(
context_settings=dict(
ignore_unknown_options=True,
)
)
@click.option(
"--wait",
"-w",
default=False,
is_flag=True,
help=(
"If passed. It will wait for an elastic search instance in the default port"
" (9200) to be ready before starting kibana"
),
)
@click.option(
"--alt",
default=False,
is_flag=True,
help="Shows an alterantive kibana loading log. Based on text parsing and regex.",
)
@click.option(
"--save-config",
default=False,
is_flag=True,
help=(
"If passed it will write your kibbe configuration with all the current passed"
" parameters. This will not modify your kibana repo clone."
),
)
@click.argument("unparsed_args", nargs=-1, type=click.UNPROCESSED)
def kibana(save_config, unparsed_args, wait, alt):
"""
Runs Kibana from the current clone.
You can pass the same parameters as you'd pass to `node scritps/kibana`
You can persist some parameters by using a configuration file `~/.kibbe`.
with the [kibana.params] section.
See more about the configuration file here:
https://github.com/academo/kibbe#configuration-file
"""
if wait:
click.echo(
colored("Waiting for elasticsearch in port 9200. Timeout in 60s", "blue")
)
wait_for_elastic_search()
config_params = []
config = get_config()
if "kibana.params" in config:
config_params = config.items("kibana.params", raw=True)
params = merge_params(config_params, unparsed_args, useEqual=True)
if save_config:
persist_config({"kibana.params": unparsed_to_map(params)})
exit()
command = ["node", "scripts/kibana", "--dev"] + params
click.echo("Will run kibana search as: " + colored(" ".join(command), "yellow"))
if alt:
run_kibana_alt(command)
else:
subprocess.run(command)
def run_kibana_alt(command):
# command = ["node test.js"]
manager = enlighten.get_manager()
pbar = manager.counter(
total=100, desc="Optimizer", unit="bundles", color="blue_on_green"
)
status = manager.status_bar(
fields={"kibana_status": "Initializing"},
status_format="Kibana is {kibana_status}",
color="white_on_black",
)
process = subprocess.Popen(
"FORCE_COLOR=1 " + " ".join(command),
shell=True,
stdout=subprocess.PIPE,
)
pbar.count = int(0)
pbar.refresh()
if process.stdout:
while True:
# exit kibbe if the node process died
if process.poll() is not None or process.returncode:
sys.exit()
output = process.stdout.readline()
if output:
line = output.decode("utf-8")
sys.stdout.write(line)
parse_line(line, pbar, status)
optimizerProgressRe = re.compile(r"^.*?@kbn\/optimizer.*?\[(\d+)\/(\d+)\]\s.*$")
optimizerSuccessRe = re.compile(r"^.*?success.*?kbn\/optimizer.*")
kibanaServerRunning = re.compile(r".*http\.server\.Kibana.*?http server running")
kibanaServerStatus = re.compile(r".*?status.*?\sKibana\sis\snow\s(.+)(?:\s|$)")
def parse_line(line: str, pbar: enlighten.Counter, status: enlighten.StatusBar):
progressMatch = optimizerProgressRe.match(line)
if progressMatch:
current = int(progressMatch.group(1))
total = int(progressMatch.group(2))
pbar.total = total
pbar.count = current
pbar.refresh()
return
successMatch = optimizerSuccessRe.match(line)
if successMatch:
pbar.clear()
return
if kibanaServerRunning.match(line):
status.fields["kibana_status"] = "⌛ Server loading"
status.refresh()
return
kibanaStatusMatch = kibanaServerStatus.match(line)
if kibanaStatusMatch:
message = str(kibanaStatusMatch.group(1))
message = get_kibana_icon(message) + message
status.fields["kibana_status"] = message
status.refresh()
return
def get_kibana_icon(message):
if "available" in message:
return "✅ "
if "degraded" in message:
return "⌛ "
return ""
def exit_():
"""
Makes sure that when exiting kibbe any remaninig subprocess is killed.
This is useful because if kibbe starts a nodejs process it might spawn
more sub-proceses but they will not be terminated if the parent is asked to
do so.
"""
current_process = psutil.Process()
children = current_process.children(recursive=True)
for child in children:
try:
child.terminate()
except:
pass
atexit.register(exit_)
"""
const fs = require('fs');
const lines = fs.readFileSync('output.log').toString().split('\n');
let current = 0;
const int = setInterval(() => {
console.log(lines[current]);
current++;
if (lines[current] === undefined) {
clearInterval(int);
}
});
"""
```
#### File: kibbe/src/git.py
```python
import os
from pathlib import Path
import subprocess
def get_worktree_list():
try:
raw_list = subprocess.getoutput("git worktree list --porcelain")
worktrees = []
raw_list = raw_list.split("\n")
current = {}
for item in raw_list:
if not item:
worktrees.append(current)
current = {}
continue
[name, value] = item.split(" ")
current[name] = value
return worktrees
except ValueError:
return []
def get_worktree_list_flat(ctx, param, incomplete=""):
final = []
try:
worktrees = get_worktree_list()
for tree in worktrees:
name = os.path.basename(tree["worktree"])
if not incomplete or (incomplete and name.startswith(incomplete)):
final.append(name)
except ValueError:
pass
return final
def find_existing_worktree(path_name):
worktrees = get_worktree_list()
existing_worktree = {}
for tree in worktrees:
path = Path(tree["worktree"])
if path.name == path_name:
existing_worktree = tree
break
return existing_worktree
``` |
{
"source": "jportorreal00/Submitty",
"score": 2
} |
#### File: autograder/autograder/grade_item.py
```python
import json
import os
import tempfile
import shutil
import subprocess
import socket
import traceback
from . import autograding_utils, testcase
from . execution_environments import jailed_sandbox
def get_item_from_item_pool(complete_config_obj, item_name):
for item in complete_config_obj['item_pool']:
if item['item_name'] == item_name:
return item
return None
def get_testcases(
complete_config_obj,
config,
queue_obj,
working_directory,
which_untrusted,
item_name,
notebook_data=None
):
'''
Retrieve testcases from a config obj. If notebook_data is
not null, return testcases corresponding to it, else return all testcases.
'''
testcase_objs = []
testcase_specs = complete_config_obj['testcases']
if notebook_data is not None:
# Gather the testcase specifications for all itempool testcases
for notebook_item in notebook_data:
item_dict = get_item_from_item_pool(complete_config_obj, notebook_item)
if item_dict is None:
config.logger.log_message(
f"ERROR: could not find {notebook_item} in item pool.",
job_id=queue_obj["job_id"],
is_batch=queue_obj["regrade"],
which_untrusted=which_untrusted,
jobname=item_name,
)
continue
testcase_specs += item_dict['testcases']
else:
for item in complete_config_obj['item_pool']:
testcase_specs += item['testcases']
is_vcs = queue_obj['vcs_checkout']
# Construct the testcase objects
for t in testcase_specs:
tmp_test = testcase.Testcase(
config,
t['testcase_id'],
queue_obj,
complete_config_obj,
t,
which_untrusted,
is_vcs,
queue_obj["regrade"],
queue_obj["job_id"],
working_directory,
testcase_objs,
'',
config.log_path,
config.error_path,
is_test_environment=False
)
testcase_objs.append(tmp_test)
return testcase_objs
def killall(config, which_untrusted, log_file):
''' Killalll removes any stray processes belonging to the untrusted user '''
killall_success = subprocess.call(
[
os.path.join(config.submitty['submitty_install_dir'], "sbin", "untrusted_execute"),
which_untrusted,
os.path.join(config.submitty['submitty_install_dir'], "sbin", "killall.py")
],
stdout=log_file
)
if killall_success != 0:
print(
f'KILLALL: had to kill {killall_success} process(es)',
file=log_file
)
log_file.flush()
def run_compilation(testcases, config, which_untrusted, seperator, log_file):
# COMPILE THE SUBMITTED CODE
print(f"{seperator}COMPILATION STARTS", file=log_file)
log_file.flush()
for tc in testcases:
if tc.type != 'Execution' and not os.path.exists(tc.secure_environment.directory):
tc.execute()
killall(config, which_untrusted, log_file)
log_file.flush()
subprocess.call(['ls', '-lR', '.'], stdout=log_file)
log_file.flush()
def generate_input(testcases, config, which_untrusted, seperator, log_file):
# GENERATE INPUT
print(f"{seperator}INPUT GENERATION STARTS", file=log_file)
for tc in testcases:
if tc.has_input_generator_commands:
tc.generate_random_inputs()
killall(config, which_untrusted, log_file)
subprocess.call(['ls', '-lR', '.'], stdout=log_file)
log_file.flush()
def run_execution(testcases, config, which_untrusted, seperator, log_file):
# RUN EXECUTION TESTCASES
print(f"{seperator}RUNNER STARTS", file=log_file)
log_file.flush()
for tc in testcases:
if tc.type == 'Execution' and not os.path.exists(tc.secure_environment.directory):
tc.execute()
killall(config, which_untrusted, log_file)
subprocess.call(['ls', '-lR', '.'], stdout=log_file)
log_file.flush()
def generate_output(testcases, config, which_untrusted, seperator, log_file):
# RANDOM OUTPUT GENERATION
print(f"{seperator}OUTPUT GENERATION STARTS", file=log_file)
for tc in testcases:
if tc.has_solution_commands:
tc.generate_random_outputs()
killall(config, which_untrusted, log_file)
subprocess.call(['ls', '-lR', '.'], stdout=log_file)
log_file.flush()
def run_validation(
testcases,
config,
which_untrusted,
seperator,
queue_obj,
tmp_work,
is_vcs,
complete_config_obj,
working_directory,
submission_string,
log_file,
tmp_logs,
generate_all_output
):
# VALIDATE STUDENT OUTPUT
print(f"{seperator}VALIDATION STARTS", file=log_file)
log_file.flush()
# Create a jailed sandbox to run validation inside of.
validation_environment = jailed_sandbox.JailedSandbox(
config,
queue_obj["job_id"],
which_untrusted,
tmp_work,
is_vcs,
queue_obj["regrade"],
complete_config_obj,
{},
working_directory,
config.log_path,
config.error_path,
False
)
# Copy sensitive expected output files into tmp_work.
autograding_utils.setup_for_validation(
config,
working_directory,
complete_config_obj,
is_vcs,
testcases,
queue_obj["job_id"],
)
with open(os.path.join(tmp_logs, "validator_log.txt"), 'w') as logfile:
arguments = [
queue_obj["gradeable"],
queue_obj["who"],
str(queue_obj["version"]),
submission_string
]
if generate_all_output:
arguments.append('--generate_all_output')
success = validation_environment.execute(
which_untrusted,
'my_validator.out',
arguments,
logfile,
cwd=tmp_work
)
if success == 0:
print(socket.gethostname(), which_untrusted, "VALIDATOR OK")
else:
print(socket.gethostname(), which_untrusted, "VALIDATOR FAILURE")
subprocess.call(['ls', '-lR', '.'], stdout=log_file)
log_file.flush()
# Remove the temporary .submit.notebook from tmp_work (not to be confused
# with the copy tmp_submission/submission/.submit.notebook)
submit_notebook_path = os.path.join(tmp_work, '.submit.notebook')
if os.path.exists(submit_notebook_path):
os.remove(submit_notebook_path)
os.chdir(working_directory)
autograding_utils.untrusted_grant_rwx_access(
config.submitty['submitty_install_dir'],
which_untrusted,
tmp_work
)
autograding_utils.add_all_permissions(tmp_work)
def archive(
testcases,
config,
working_directory,
queue_obj,
which_untrusted,
item_name,
complete_config_obj,
gradeable_config_obj,
seperator,
log_file
):
# ARCHIVE STUDENT RESULTS
print(f"{seperator}ARCHIVING STARTS", file=log_file)
log_file.flush()
for tc in testcases:
# Removes test input files, makes details directory for the testcase.
tc.setup_for_archival(log_file)
try:
autograding_utils.archive_autograding_results(
config,
working_directory,
queue_obj["job_id"],
which_untrusted,
queue_obj["regrade"],
complete_config_obj,
gradeable_config_obj,
queue_obj,
False
)
except Exception:
print("\n\nERROR: Grading incomplete -- could not archive autograding results")
config.logger.log_message(
"ERROR: could not archive autograding results. See stack trace for more info.",
job_id=queue_obj['job_id'],
is_batch=queue_obj["regrade"],
which_untrusted=which_untrusted,
jobname=item_name,
)
config.logger.log_stack_trace(
traceback.format_exc(),
job_id=queue_obj['job_id'],
is_batch=queue_obj["regrade"],
which_untrusted=which_untrusted,
jobname=item_name,
)
subprocess.call(['ls', '-lR', '.'], stdout=log_file)
def grade_from_zip(
config,
working_directory,
which_untrusted,
autograding_zip_file,
submission_zip_file
):
os.chdir(config.submitty['submitty_data_dir'])
# Removes the working directory if it exists, creates subdirectories and unzips files.
autograding_utils.prepare_directory_for_autograding(
config,
working_directory,
which_untrusted,
autograding_zip_file,
submission_zip_file,
False,
)
# Now that the files are unzipped, we no longer need them.
os.remove(autograding_zip_file)
os.remove(submission_zip_file)
# Initialize variables needed for autograding.
tmp_autograding = os.path.join(working_directory, "TMP_AUTOGRADING")
tmp_submission = os.path.join(working_directory, "TMP_SUBMISSION")
tmp_work = os.path.join(working_directory, "TMP_WORK")
tmp_logs = os.path.join(working_directory, "TMP_SUBMISSION", "tmp_logs")
tmp_results = os.path.join(working_directory, "TMP_RESULTS")
# Used to separate sections of printed messages
seperator = "====================================\n"
# Open the JSON and timestamp files needed to grade. Initialize needed variables.
with open(os.path.join(tmp_submission, "queue_file.json"), 'r') as infile:
queue_obj = json.load(infile)
waittime = queue_obj["waittime"]
is_batch_job = queue_obj["regrade"]
job_id = queue_obj["job_id"]
with open(os.path.join(tmp_autograding, "complete_config.json"), 'r') as infile:
complete_config_obj = json.load(infile)
with open(os.path.join(tmp_autograding, "form.json"), 'r') as infile:
gradeable_config_obj = json.load(infile)
is_vcs = gradeable_config_obj["upload_type"] == "repository"
if "generate_output" in queue_obj and queue_obj["generate_output"]:
''' Cache the results when there are solution commands be no input generation commands'''
item_name = os.path.join(
queue_obj["semester"],
queue_obj["course"],
"generated_output",
queue_obj["gradeable"]
)
testcases = list()
for tmp_test in get_testcases(
complete_config_obj,
config,
queue_obj,
working_directory,
which_untrusted,
item_name
):
if tmp_test.has_solution_commands and not tmp_test.has_input_generator_commands:
testcases.append(tmp_test)
with open(os.path.join(tmp_logs, "overall.txt"), 'a') as overall_log:
os.chdir(tmp_work)
generate_output(testcases, config, which_untrusted, seperator, overall_log)
archive(
testcases,
config,
working_directory,
queue_obj,
which_untrusted,
item_name,
complete_config_obj,
gradeable_config_obj,
seperator,
overall_log
)
else:
sub_timestamp_path = os.path.join(tmp_submission, 'submission', ".submit.timestamp")
with open(sub_timestamp_path, 'r') as submission_time_file:
submission_string = submission_time_file.read().rstrip()
item_name = os.path.join(
queue_obj["semester"],
queue_obj["course"],
"submissions",
queue_obj["gradeable"],
queue_obj["who"],
str(queue_obj["version"])
)
config.logger.log_message(
"",
job_id=job_id,
is_batch=is_batch_job,
which_untrusted=which_untrusted,
jobname=item_name,
timelabel="wait:",
elapsed_time=waittime,
)
notebook_data_path = os.path.join(tmp_submission, 'submission', ".submit.notebook")
if os.path.exists(notebook_data_path):
with open(notebook_data_path, 'r') as infile:
notebook_data = json.load(infile).get('item_pools_selected', [])
else:
notebook_data = []
# Load all testcases.
testcases = get_testcases(
complete_config_obj,
config,
queue_obj,
working_directory,
which_untrusted,
item_name,
notebook_data=notebook_data
)
with open(os.path.join(tmp_logs, "overall.txt"), 'a') as overall_log:
os.chdir(tmp_work)
run_compilation(testcases, config, which_untrusted, seperator, overall_log)
generate_input(testcases, config, which_untrusted, seperator, overall_log)
run_execution(testcases, config, which_untrusted, seperator, overall_log)
generate_output(testcases, config, which_untrusted, seperator, overall_log)
run_validation(
testcases,
config,
which_untrusted,
seperator,
queue_obj,
tmp_work,
is_vcs,
complete_config_obj,
working_directory,
submission_string,
overall_log,
tmp_logs,
False
)
archive(
testcases,
config,
working_directory,
queue_obj,
which_untrusted,
item_name,
complete_config_obj,
gradeable_config_obj,
seperator,
overall_log
)
# Zip the results
filehandle, my_results_zip_file = tempfile.mkstemp()
autograding_utils.zip_my_directory(tmp_results, my_results_zip_file)
os.close(filehandle)
# Remove the tmp directory.
shutil.rmtree(working_directory)
try:
autograding_utils.cleanup_stale_containers(which_untrusted, config.logger.log_message)
except Exception:
config.logger.log_stack_trace(
traceback.format_exc(),
job_id=queue_obj['job_id'],
is_batch=queue_obj["regrade"],
which_untrusted=which_untrusted,
jobname=item_name
)
return my_results_zip_file
if __name__ == "__main__":
raise SystemExit('ERROR: Do not call this script directly')
```
#### File: migrations/course/20200815082257_link_itempool_component.py
```python
def up(config, database, semester, course):
# Link Itempool with the rubric-component
database.execute("ALTER TABLE IF EXISTS gradeable_component ADD COLUMN IF NOT EXISTS gc_is_itempool_linked BOOL NOT NULL DEFAULT FALSE")
database.execute("ALTER TABLE IF EXISTS gradeable_component ADD COLUMN IF NOT EXISTS gc_itempool varchar(100) NOT NULL DEFAULT ''")
# Link Itempool with Solution/Ta notes panels
database.execute("ALTER TABLE IF EXISTS solution_ta_notes ADD COLUMN IF NOT EXISTS itempool_item VARCHAR(100) NOT NULL DEFAULT ''")
def down(config, database, semester, course):
pass
```
#### File: migrations/course/20210111163413_add_poll_data_folder.py
```python
import shutil
from pathlib import Path
def up(config, database, semester, course):
course_dir = Path(config.submitty['submitty_data_dir'], 'courses', semester, course, 'reports')
polls_dir = Path(course_dir, 'polls')
#create directory
polls_dir.mkdir(mode=0o750, exist_ok=True)
php_user = config.submitty_users['php_user']
# get course group
course_group_id = course_dir.stat().st_gid
# set the owner/group/permissions
shutil.chown(polls_dir, php_user, course_group_id)
def down(config, database, semester, course):
pass
```
#### File: migrations/course/20210610100802_course_materials_json_to_db.py
```python
import json
from pathlib import Path
from sqlalchemy import insert
def up(config, database, semester, course):
"""
Run up migration.
:param config: Object holding configuration details about Submitty
:type config: migrator.config.Config
:param database: Object for interacting with given database for environment
:type database: migrator.db.Database
:param semester: Semester of the course being migrated
:type semester: str
:param course: Code of course being migrated
:type course: str
"""
# create tables here
database.execute(
"""
CREATE TABLE IF NOT EXISTS course_materials (
id serial PRIMARY KEY,
path varchar(255) UNIQUE,
type smallint NOT NULL,
release_date timestamptz,
hidden_from_students BOOL,
priority float8 NOT NULL
);
"""
)
database.execute(
"""
CREATE TABLE IF NOT EXISTS course_materials_sections (
course_material_id integer NOT NULL,
section_id varchar(255) NOT NULL,
CONSTRAINT fk_course_material_id
FOREIGN KEY(course_material_id)
REFERENCES course_materials(id)
ON DELETE CASCADE,
CONSTRAINT fk_section_id
FOREIGN KEY(section_id)
REFERENCES sections_registration(sections_registration_id)
ON DELETE CASCADE,
CONSTRAINT pk_course_material_section PRIMARY KEY (course_material_id, section_id)
);
"""
)
course_dir = Path(config.submitty['submitty_data_dir'], 'courses', semester, course)
json_file = Path(course_dir, 'uploads', 'course_materials_file_data.json')
course_materials_dir = Path(course_dir, 'uploads', 'course_materials')
paths = set()
if json_file.is_file():
with json_file.open('r') as file:
data = json.load(file)
if isinstance(data, dict):
for itemkey, itemvalue in data.items():
if itemkey == 'release_time':
continue
material_type = 0
path = itemkey
if 'external_link' in itemvalue and itemvalue['external_link'] is True:
material_type = 1
sections = []
if 'sort_priority' not in itemvalue:
itemvalue['sort_priority'] = 0.0
if 'sections' in itemvalue and itemvalue['sections'] is not None:
for section in itemvalue['sections']:
sections.append(section)
if itemvalue['release_datetime'][:4] == "0000" or itemvalue['release_datetime'][:4] == "0001":
itemvalue['release_datetime'] = "1001" + itemvalue['release_datetime'][4:]
has_sections = len(sections) != 0
query = """
INSERT INTO course_materials (
type,
path,
release_date,
hidden_from_students,
priority
)
VALUES (
:type, :path, :release_date, :hidden_from_students, :priority
) ON CONFLICT(path) DO UPDATE SET
release_date = EXCLUDED.release_date,
hidden_from_students = EXCLUDED.hidden_from_students,
priority = EXCLUDED.priority
RETURNING id
"""
params = {
'path': path,
'type': material_type,
'release_date': itemvalue['release_datetime'],
'hidden_from_students': "false" if 'hide_from_students' in itemvalue and itemvalue['hide_from_students'] == "off" else "true",
'priority': itemvalue['sort_priority']
}
result = database.session.execute(query, params)
course_material_id = result.fetchone()[0]
for section in sections:
query = """
INSERT INTO course_materials_sections (
course_material_id,
section_id
)
VALUES (
:course_material_id, :section_id
) ON CONFLICT(course_material_id, section_id) DO NOTHING
"""
params = {
'course_material_id': course_material_id,
'section_id': section
}
database.session.execute(query, params)
subpath = path[len(str(course_materials_dir))+1:]
dirs = subpath.split('/')
dirs.pop()
curpath = str(course_materials_dir)
for dir in dirs:
curpath += '/' + dir
paths.add(curpath)
for dir in paths:
query = """
INSERT INTO course_materials (
type,
path,
release_date,
hidden_from_students,
priority
)
VALUES (
:type, :path, :release_date, :hidden_from_students, :priority
) ON CONFLICT(path) DO UPDATE SET
type = EXCLUDED.type,
release_date = EXCLUDED.release_date,
hidden_from_students = EXCLUDED.hidden_from_students,
priority = EXCLUDED.priority
"""
priority = 0
if isinstance(data, dict) and dir in data:
priority = data[dir]['sort_priority']
params = {
'path': dir,
'type': 2,
'release_date': None,
'hidden_from_students': None,
'priority': priority
}
database.session.execute(query, params)
def down(config, database, semester, course):
"""
Run down migration (rollback).
:param config: Object holding configuration details about Submitty
:type config: migrator.config.Config
:param database: Object for interacting with given database for environment
:type database: migrator.db.Database
:param semester: Semester of the course being migrated
:type semester: str
:param course: Code of course being migrated
:type course: str
"""
pass
```
#### File: tests/e2e/base_testcase.py
```python
import shutil
import tempfile
from datetime import date
import os
import unittest
import json
from urllib.parse import urlencode
from urllib.parse import urlparse
from selenium import webdriver
from websocket import create_connection
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import sys
# explicitly add this import path, so we can run it on a local host
sys.path.append('../python_submitty_utils/')
from submitty_utils import dateutils
# noinspection PyPep8Naming
class BaseTestCase(unittest.TestCase):
"""
Base class that all e2e tests should extend. It provides several useful
helper functions, sets up the selenium webdriver, and provides a common
interface for logging in/out a user. Each test then only really needs to
override user_id, user_name, and user_password as necessary for a
particular testcase and this class will handle the rest to setup the test.
"""
TEST_URL = "http://localhost:1511"
USER_ID = "student"
USER_NAME = "Joe"
USER_PASSWORD = "<PASSWORD>"
WAIT_TIME = 20
def __init__(self, testname, user_id=None, user_password=<PASSWORD>, user_name=None, log_in=True, use_websockets=False, socket_page=''):
super().__init__(testname)
if "TEST_URL" in os.environ and os.environ['TEST_URL'] is not None:
self.test_url = os.environ['TEST_URL']
else:
self.test_url = BaseTestCase.TEST_URL
self.driver = None
""" :type driver: webdriver.Chrome """
self.options = webdriver.ChromeOptions()
self.options.add_argument('--no-sandbox')
self.options.add_argument('--headless')
self.options.add_argument("--disable-extensions")
self.options.add_argument('--hide-scrollbars')
self.options.add_argument('--disable-gpu')
self.options.add_argument('--no-proxy-server')
self.download_dir = tempfile.mkdtemp(prefix="vagrant-submitty")
# https://stackoverflow.com/a/26916386/214063
profile = {
'download.prompt_for_download': False,
'download.default_directory': self.download_dir,
'download.directory_upgrade': True,
'plugins.plugins_disabled': ['Chrome PDF Viewer']
}
self.options.add_experimental_option('prefs', profile)
self.user_id = user_id if user_id is not None else BaseTestCase.USER_ID
self.user_name = user_name if user_name is not None else BaseTestCase.USER_NAME
if user_password is None and user_id is not None:
user_password = <PASSWORD>
self.user_password = <PASSWORD> if user_password is not None else BaseTestCase.USER_PASSWORD
self.semester = dateutils.get_current_semester()
self.full_semester = BaseTestCase.get_display_semester(self.semester)
self.logged_in = False
self.use_log_in = log_in
self.use_websockets = use_websockets
self.socket_page = socket_page
def setUp(self):
# attempt to set-up the connection to Chrome. Repeat a handful of times
# in-case Chrome crashes during initialization
for _ in range(5):
try:
self.driver = webdriver.Chrome(options=self.options)
break
except WebDriverException:
pass
if self.driver is None:
self.driver = webdriver.Chrome(options=self.options)
self.driver.set_window_size(1600, 900)
self.enable_download_in_headless_chrome(self.download_dir)
if self.use_log_in:
self.log_in()
if self.use_websockets:
self.enable_websockets()
def tearDown(self):
self.driver.quit()
shutil.rmtree(self.download_dir)
if self.use_websockets:
self.ws.close()
def get(self, url=None, parts=None):
if url is None:
# Can specify parts = [('semester', 's18'), ...]
self.assertIsNotNone(parts)
url = "/index.php?" + urlencode(parts)
if url[0] != "/":
url = "/" + url
self.driver.get(self.test_url + url)
# Frog robot
self.assertNotEqual(self.driver.title, "Submitty - Error", "Got Error Page")
def log_in(self, url=None, title="Submitty", user_id=None, user_password=<PASSWORD>, user_name=None):
"""
Provides a common function for logging into the site (and ensuring
that we're logged in)
:return:
"""
if url is None:
url = "/index.php"
if user_password is None:
user_password = <PASSWORD> if user_id is not None else <PASSWORD>
if user_id is None:
user_id = self.user_id
if user_name is None:
user_name = self.user_name
self.get(url)
# print(self.driver.page_source)
self.driver.find_element(By.NAME, 'user_id').send_keys(user_id)
self.driver.find_element(By.NAME, 'password').send_keys(<PASSWORD>)
self.driver.find_element(By.NAME, 'login').click()
# OLD self.assertEqual(user_name, self.driver.find_element(By.ID, "login-id").get_attribute('innerText').strip(' \t\r\n'))
# FIXME: WANT SOMETHING LIKE THIS... WHEN WE HAVE JUST ONE ELEMENT WITH THIS ID
# self.assertEqual("Logout "+user_name, self.driver.find_element(By.ID, "logout").get_attribute('innerText').strip(' \t\r\n'))
# instead, just make sure this element exists
self.driver.find_element(By.ID, "logout")
self.logged_in = True
def log_out(self):
if self.logged_in:
self.logged_in = False
self.driver.find_element(By.ID, 'logout').click()
self.driver.find_element(By.ID, 'login-guest')
def click_class(self, course, course_name=None):
if course_name is None:
course_name = course
course_name = course_name.title()
self.driver.find_element(By.ID, dateutils.get_current_semester() + '_' + course).click()
# print(self.driver.page_source)
WebDriverWait(self.driver, BaseTestCase.WAIT_TIME).until(EC.title_is('Gradeables - ' + course_name))
# see Navigation.twig for html attributes to use as arguments
# loaded_selector must recognize an element on the page being loaded (test_simple_grader.py has xpath example)
def click_nav_grade_button(self, gradeable_category, gradeable_id, button_name, loaded_selector):
self.driver.find_element(By.XPATH,
"//div[@id='{}']/div[@class='course-button']/a[contains(@class, 'btn-nav-grade')]".format(
gradeable_id)).click()
WebDriverWait(self.driver, BaseTestCase.WAIT_TIME).until(EC.presence_of_element_located(loaded_selector))
def click_nav_submit_button(self, gradeable_category, gradeable_id, button_name, loaded_selector):
self.driver.find_element(By.XPATH,
"//div[@id='{}']/div[@class='course-button']/a[contains(@class, 'btn-nav-submit')]".format(
gradeable_id)).click()
WebDriverWait(self.driver, BaseTestCase.WAIT_TIME).until(EC.presence_of_element_located(loaded_selector))
# clicks the navigation header text to 'go back' pages
# for homepage, selector can be gradeable list
def click_header_link_text(self, text, loaded_selector):
self.driver.find_element(
By.XPATH,
"//div[@id='breadcrumbs']/div[@class='breadcrumb']/a[text()='{}']".format(text)
).click()
WebDriverWait(self.driver, BaseTestCase.WAIT_TIME).until(EC.presence_of_element_located(loaded_selector))
def wait_after_ajax(self):
WebDriverWait(self.driver, 10).until(lambda driver: driver.execute_script("return jQuery.active == 0"))
def wait_for_element(self, element_selector, visibility=True, timeout=WAIT_TIME):
"""
Waits for an element to be present in the DOM. By default, also waits for the element to be
visible/interactable
"""
if visibility:
WebDriverWait(self.driver, timeout).until(EC.visibility_of_element_located(element_selector))
else:
WebDriverWait(self.driver, timeout).until(EC.presence_of_element_located(element_selector))
@staticmethod
def wait_user_input():
"""
Causes the running selenium test to pause until the user has hit the enter key in the
terminal that is running python. This is useful for using in the middle of building tests
as then you cna use the javascript console to inspect the page, get the name/id of elements
or other such actions and then use that to continue building the test
"""
input("Hit enter to continue...")
@staticmethod
def get_display_semester(current_semester):
s = 'Fall' if current_semester[0] == 'f' else 'Summer' if current_semester[0] == 'u' else 'Spring'
s += ' 20' + current_semester[1:]
return s
# https://stackoverflow.com/a/47366981/214063
def enable_download_in_headless_chrome(self, download_dir):
# add missing support for chrome "send_command" to selenium webdriver
self.driver.command_executor._commands["send_command"] = ("POST", '/session/$sessionId/chromium/send_command')
params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior': 'allow', 'downloadPath': download_dir}}
self.driver.execute("send_command", params)
def enable_websockets(self):
submitty_session_cookie = self.driver.get_cookie('submitty_session')
address = self.test_url.replace('http', 'ws') + '/ws'
parsed = urlparse(address)
netloc = parsed.netloc
if ':' in netloc:
netloc = netloc.split(':', 1)[0]
netloc += ':8443'
address = parsed._replace(netloc=netloc).geturl()
self.ws = create_connection(address, cookie = submitty_session_cookie['name'] +'='+ submitty_session_cookie['value'], header={"User-Agent": "python-socket-client"})
new_connection_msg = json.dumps({'type': 'new_connection', 'page': self.semester + '-sample-' + self.socket_page})
self.ws.send(new_connection_msg)
def check_socket_message(self, message):
ws_msg = json.loads(self.ws.recv())
self.assertIn('type', ws_msg.keys())
self.assertEqual(ws_msg['type'], message)
```
#### File: tests/e2e/test_navigation_page_non_student.py
```python
from .base_testcase import BaseTestCase
from collections import OrderedDict
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class TestNavigationPageNonStudent(BaseTestCase):
def __init__(self, testname):
super().__init__(testname, log_in=False)
#sections: list of tuples of size 2 (section name, section length)
def validate_navigation_page_sections(self, sections):
elements = self.driver.find_elements(By.CLASS_NAME, 'course-section-heading')
self.assertEqual(len(sections), len(elements))
index = 0
for (section_name, section_size), element in zip(sections.items(), elements):
self.assertEqual(section_name, element.get_attribute('id'))
self.assertEqual(section_size, len(self.driver
.find_element(By.ID, section_name + '-section')
.find_elements(By.CLASS_NAME, "gradeable-row")), msg=section_name)
def test_instructor(self):
self.log_in(user_id="instructor", user_name="Quinn")
self.click_class('sample')
sections = OrderedDict()
sections["future"] = 4
sections["beta"] = 3
sections["open"] = 4
sections["closed"] = 3
sections["items_being_graded"] = 9
sections["graded"] = 10
self.assertEqual(4, len(self.driver
.find_element(By.CLASS_NAME, 'gradeable-row')
.find_elements(By.CLASS_NAME, 'course-button')))
gradeable_id = "future_no_tas_homework"
self.validate_navigation_page_sections(sections)
element = self.driver.find_element(By.XPATH, "//div[@id='"+gradeable_id+"']//a[contains(@href,'quick_link?action=open_ta_now')]")
self.assertEqual("OPEN TO TAS NOW", element.find_element_by_class_name("subtitle").text)
element.click()
sections["future"] -= 1
sections["beta"] += 1
self.validate_navigation_page_sections(sections)
element = self.driver.find_element(By.XPATH, "//div[@id='"+gradeable_id+"']//a[contains(@href,'quick_link?action=open_students_now')]")
self.assertEqual("OPEN NOW", element.find_element_by_class_name("subtitle").text)
element.click()
sections["beta"] -= 1
sections["open"] += 1
self.validate_navigation_page_sections(sections)
element = self.driver.find_element(By.XPATH, "//div[@id='"+gradeable_id+"']//a[contains(@onclick,'quick_link?action=close_submissions')]")
self.assertEqual("CLOSE SUBMISSIONS NOW", element.find_element_by_class_name("subtitle").text)
element.click()
self.driver.find_element(By.XPATH, "//div[@id='close-submissions-form']//input[contains(@value,'Close Submissions')]").click()
sections["open"] -= 1
sections["closed"] += 1
self.validate_navigation_page_sections(sections)
element = self.driver.find_element(By.XPATH, "//div[@id='"+gradeable_id+"']//a[contains(@href,'quick_link?action=open_grading_now')]")
self.assertEqual("OPEN TO GRADING NOW", element.find_element_by_class_name("subtitle").text)
element.click()
sections["closed"] -= 1
sections["items_being_graded"] += 1
self.validate_navigation_page_sections(sections)
element = self.driver.find_element(By.XPATH, "//div[@id='"+gradeable_id+"']//a[contains(@href,'quick_link?action=release_grades_now')]")
self.assertEqual("RELEASE GRADES NOW", element.find_element_by_class_name("subtitle").text)
element.click()
sections["items_being_graded"] -= 1
sections["graded"] += 1
self.validate_navigation_page_sections(sections)
self.driver.find_element(By.XPATH, "//div[@id='"+gradeable_id+"']//a[contains(@href,'gradeable/"+gradeable_id+"/update')]").click()
self.driver.find_element(By.XPATH, "//form[@id='gradeable-form']//div[@class='tab-bar-wrapper']//a[contains(text(), 'Dates')]").click()
wait = WebDriverWait(self.driver, self.WAIT_TIME)
element = self.driver.find_element(By.ID, "date_released")
element.send_keys(Keys.CONTROL, "a")
wait.until(lambda d: 'active' in element.get_attribute('class'))
element.send_keys("9998-12-31 23:59:59")
element.send_keys(Keys.ENTER)
wait.until(EC.text_to_be_present_in_element((By.ID, "save_status"), "All Changes Saved"), message=self.driver.find_element(By.ID, "save_status").text)
element = self.driver.find_element(By.ID, "date_grade_due")
element.send_keys(Keys.CONTROL, "a")
wait.until(lambda d: 'active' in element.get_attribute('class'))
element.send_keys("9998-12-31 23:59:59")
element.send_keys(Keys.ENTER)
wait.until(EC.text_to_be_present_in_element((By.ID, "save_status"), "All Changes Saved"))
element = self.driver.find_element(By.ID, "date_grade")
element.send_keys(Keys.CONTROL, "a")
wait.until(lambda d: 'active' in element.get_attribute('class'))
element.send_keys("9997-12-31 23:59:59")
element.send_keys(Keys.ENTER)
wait.until(EC.text_to_be_present_in_element((By.ID, "save_status"), "All Changes Saved"))
element = self.driver.find_element(By.ID, "date_due")
element.send_keys(Keys.CONTROL, "a")
wait.until(lambda d: 'active' in element.get_attribute('class'))
element.send_keys("9996-12-31 23:59:59")
element.send_keys(Keys.ENTER)
wait.until(EC.text_to_be_present_in_element((By.ID, "save_status"), "All Changes Saved"))
element = self.driver.find_element(By.ID, "date_submit")
element.send_keys(Keys.CONTROL, "a")
wait.until(lambda d: 'active' in element.get_attribute('class'))
element.send_keys("9995-12-31 23:59:59")
element.send_keys(Keys.ENTER)
wait.until(EC.text_to_be_present_in_element((By.ID, "save_status"), "All Changes Saved"))
element = self.driver.find_element(By.ID, "date_ta_view")
element.send_keys(Keys.CONTROL, "a")
wait.until(lambda d: 'active' in element.get_attribute('class'))
element.send_keys("9994-12-31 23:59:59")
element.send_keys(Keys.ENTER)
wait.until(EC.text_to_be_present_in_element((By.ID, "save_status"), "All Changes Saved"))
self.driver.find_element(By.XPATH, "//a[@id='nav-sidebar-submitty']").click()
sections["graded"] -= 1
sections["future"] += 1
element = self.driver.find_element(By.XPATH, "//div[@id='"+gradeable_id+"']//a[contains(@href,'quick_link?action=open_ta_now')]")
self.assertEqual("OPEN TO TAS NOW", element.find_element_by_class_name("subtitle").text)
self.validate_navigation_page_sections(sections)
def test_ta(self):
self.log_in(user_id="ta", user_name="Jill")
self.click_class('sample')
elements = self.driver.find_elements(By.CLASS_NAME, 'course-section-heading')
self.assertEqual(5, len(elements))
self.assertEqual("beta", elements[0].get_attribute('id'))
self.assertEqual(3, len(self.driver
.find_element(By.ID, 'beta-section')
.find_elements(By.CLASS_NAME, "gradeable-row")))
self.assertEqual("open", elements[1].get_attribute('id'))
self.assertEqual(4, len(self.driver
.find_element(By.ID, 'open-section')
.find_elements(By.CLASS_NAME, "gradeable-row")))
self.assertEqual("closed", elements[2].get_attribute('id'))
self.assertEqual(3, len(self.driver
.find_element(By.ID, 'closed-section')
.find_elements(By.CLASS_NAME, "gradeable-row")))
self.assertEqual("items_being_graded", elements[3].get_attribute('id'))
self.assertEqual(9, len(self.driver
.find_element(By.ID, 'items_being_graded-section')
.find_elements(By.CLASS_NAME, "gradeable-row")))
self.assertEqual("graded", elements[4].get_attribute('id'))
self.assertEqual(10, len(self.driver
.find_element(By.ID, 'graded-section')
.find_elements(By.CLASS_NAME, "gradeable-row")))
self.assertEqual(3, len(self.driver.find_element(By.CLASS_NAME,
'gradeable-row').find_elements(By.CLASS_NAME, 'course-button')))
if __name__ == "__main__":
import unittest
unittest.main()
```
#### File: tests/e2e/test_prev_next.py
```python
from .base_testcase import BaseTestCase
from selenium.webdriver.common.by import By
import unittest
import os
class TestGradingNextPrev(BaseTestCase):
def __init__(self, testname):
super().__init__(testname, log_in=False)
@unittest.skipUnless(os.environ.get('CI') is None, "cannot run in CI")
def test_instructor(self):
self.log_in(user_id="instructor", user_name="Quinn")
self.click_class('sample')
self.driver.find_element_by_xpath('//a[contains(@href,"/sample/gradeable/grading_homework/grading/status")]').click()
self.driver.find_element_by_link_text("Grading Index").click()
self.driver.find_element_by_xpath('//a[contains(@href,"/sample/gradeable/grading_homework/grading/grade?who_id=student&sort=id&direction=ASC")]').click()
self.driver.find_element_by_id('prev-student').click()
self.assertIn("<NAME>", self.driver.find_element_by_id("student_info").text)
self.driver.find_element_by_id('next-student').click()
self.assertIn("Joe Student", self.driver.find_element_by_id("student_info").text)
self.driver.find_element_by_id('prev-ungraded-student').click()
self.assertIn("<NAME>", self.driver.find_element_by_id("student_info").text)
self.driver.find_element_by_id('next-ungraded-student').click()
self.assertIn("Joe Student", self.driver.find_element_by_id("student_info").text)
self.driver.find_element_by_id('next-ungraded-student').click()
self.assertIn("<NAME>", self.driver.find_element_by_id("student_info").text)
@unittest.skipUnless(os.environ.get('CI') is None, "cannot run in CI")
def test_ta(self):
self.log_in(user_id="ta", user_name="Jill")
self.click_class('sample')
self.driver.find_element_by_xpath('//a[contains(@href,"/sample/gradeable/grading_homework/grading/status")]').click()
self.driver.find_element_by_link_text("Grading Index").click()
self.driver.find_element_by_xpath('//a[contains(@href,"/sample/gradeable/grading_homework/grading/grade?who_id=student&sort=id&direction=ASC")]').click()
self.driver.find_element_by_id('prev-student').click()
self.assertIn("<NAME>", self.driver.find_element_by_id("student_info").text)
self.driver.find_element_by_id('next-student').click()
self.assertIn("Joe Student", self.driver.find_element_by_id("student_info").text)
self.driver.find_element_by_id('prev-ungraded-student').click()
self.assertIn("<NAME>", self.driver.find_element_by_id("student_info").text)
self.driver.find_element_by_id('next-ungraded-student').click()
self.assertIn("Joe Student", self.driver.find_element_by_id("student_info").text)
self.driver.find_element_by_id('next-ungraded-student').click()
self.assertIn("<NAME>", self.driver.find_element_by_id("student_info").text)
@unittest.skipUnless(os.environ.get('CI') is None, "cannot run in CI")
def test_instructor_team(self):
self.log_in(user_id="instructor", user_name="Quinn")
self.click_class('sample')
self.driver.find_element_by_xpath('//a[contains(@href,"/sample/gradeable/grading_team_homework/grading/status")]').click()
self.driver.find_element_by_link_text("Grading Index").click()
proper_team_names_column = (self.driver.find_element_by_xpath("//tbody[@class='details-content panel-content-active']/tr[5]/td[7]").find_elements_by_tag_name("a")[0]).click()
self.driver.find_element_by_id('prev-student').click()
self.assertIn("<NAME>", self.driver.find_element_by_id("student_info").text)
self.driver.find_element_by_id('next-student').click()
self.assertIn("<NAME>", self.driver.find_element_by_id("student_info").text)
self.driver.find_element_by_id('prev-ungraded-student').click()
self.assertIn("<NAME>", self.driver.find_element_by_id("student_info").text)
self.driver.find_element_by_id('next-ungraded-student').click()
self.assertIn("<NAME>", self.driver.find_element_by_id("student_info").text)
@unittest.skipUnless(os.environ.get('CI') is None, "cannot run in CI")
def test_ta_team(self):
self.log_in(user_id="ta", user_name="Jill")
self.click_class('sample')
self.driver.find_element_by_xpath('//a[contains(@href,"/sample/gradeable/grading_team_homework/grading/status")]').click()
self.driver.find_element_by_link_text("Grading Index").click()
proper_team_names_column = (self.driver.find_element_by_xpath("//tbody[@class='details-content panel-content-active']/tr[5]/td[5]").find_elements_by_tag_name("a")[0]).click()
self.driver.find_element_by_id('prev-student').click()
self.assertIn("<NAME>", self.driver.find_element_by_id("student_info").text)
self.driver.find_element_by_id('next-student').click()
self.assertIn("<NAME>", self.driver.find_element_by_id("student_info").text)
self.driver.find_element_by_id('prev-ungraded-student').click()
self.assertIn("<NAME>", self.driver.find_element_by_id("student_info").text)
self.driver.find_element_by_id('next-ungraded-student').click()
self.assertIn("<NAME>", self.driver.find_element_by_id("student_info").text)
@unittest.skipUnless(os.environ.get('CI') is None, "cannot run in CI")
def test_instructor_navigate_away(self):
self.log_in(user_id="instructor", user_name="Quinn")
self.click_class('sample')
self.driver.find_element_by_xpath('//a[contains(@href,"/sample/gradeable/grading_homework/grading/status")]').click()
self.driver.find_element_by_link_text("Grading Index").click()
self.driver.find_element_by_xpath('//a[contains(@href,"/sample/gradeable/grading_homework/grading/grade?who_id=aphacker&sort=id&direction=ASC")]').click()
self.driver.find_element_by_id('prev-student').click()
self.assertIn("No prev assigned ungraded student found!", self.driver.find_element_by_id("messages").text)
if __name__ == "__main__":
import unittest
unittest.main()
```
#### File: tests/e2e/test_stats.py
```python
from .base_testcase import BaseTestCase
from selenium.webdriver.common.by import By
import os
import unittest
class TestStats(BaseTestCase):
def __init__(self, testname):
super().__init__(testname, log_in=False)
def individual_grading_stats_test_helper(self, user_id, full_access):
self.log_in(user_id=user_id)
self.click_class('sample')
self.driver.find_element_by_xpath('//a[contains(@href,"/sample/gradeable/grading_homework/grading/status")]').click()
numerical_data_text = self.driver.find_element_by_id("numerical-data").text
if full_access:
self.assertTrue("Students who have submitted: 66 / 101 (65.3%)" in numerical_data_text)
self.assertTrue("Current percentage of TA grading done: 30.75 / 66 (46.6%)" in numerical_data_text)
self.assertTrue("Section 1: 2 / 7 (28.6%)" in numerical_data_text)
else:
self.assertTrue("Students who have submitted: 10 / 20 (50%)" in numerical_data_text)
self.assertTrue("Current percentage of TA grading done: 7 / 10 (70.0%)" in numerical_data_text)
self.assertTrue("Section 4: 4 / 6 (66.7%)" in numerical_data_text)
self.log_out()
def individual_released_stats_test_helper(self, user_id, full_access):
self.log_in(user_id=user_id)
self.click_class('sample')
self.driver.find_element_by_xpath('//a[contains(@href,"/sample/gradeable/grades_released_homework/grading/status")]').click()
numerical_data_text = self.driver.find_element_by_id("numerical-data").text
if full_access:
self.assertTrue("Students who have submitted: 73 / 101 (72.3%)" in numerical_data_text)
self.assertTrue("Current percentage of TA grading done: 73 / 73 (100.0%)" in numerical_data_text)
self.assertTrue("Section 1: 12 / 12 (100.0%)" in numerical_data_text)
self.assertTrue("Number of students who have viewed their grade: 50 / 73 (68.5%)" in numerical_data_text)
else:
self.assertTrue("Students who have submitted: 13 / 20 (65%)" in numerical_data_text)
self.assertTrue("Current percentage of TA grading done: 13 / 13 (100.0%)" in numerical_data_text)
self.assertTrue("Section 4: 6 / 6 (100.0%)" in numerical_data_text)
self.assertTrue("Number of students who have viewed their grade: 10 / 13 (76.9%)" in numerical_data_text)
self.log_out()
def team_grading_stats_test_helper(self, user_id, full_access):
self.log_in(user_id=user_id)
self.click_class('sample')
self.driver.find_element_by_xpath('//a[contains(@href,"/sample/gradeable/grading_team_homework/grading/status")]').click()
numerical_data_text = self.driver.find_element_by_id("numerical-data").text
if full_access:
self.assertTrue("Students on a team: 101/101 (100%)" in numerical_data_text)
self.assertTrue("Number of teams: 36" in numerical_data_text)
self.assertTrue("Teams who have submitted: 32 / 36 (88.9%)" in numerical_data_text)
self.assertTrue("Section 1: 5 / 5 (100.0%)" in numerical_data_text)
else:
self.assertTrue("Students on a team: 20/20 (100%)" in numerical_data_text)
self.assertTrue("Number of teams: 8" in numerical_data_text)
self.assertTrue("Teams who have submitted: 5 / 8 (62.5%)" in numerical_data_text)
self.assertTrue("Section 4: 3 / 3 (100.0%)" in numerical_data_text)
self.log_out()
@unittest.skipUnless(os.environ.get('CI') is None, "cannot run in CI")
def test_team_grading_stats(self):
self.team_grading_stats_test_helper("instructor", True)
self.team_grading_stats_test_helper("ta", True)
self.team_grading_stats_test_helper("grader", False)
@unittest.skipUnless(os.environ.get('CI') is None, "cannot run in CI")
def test_individual_grading_stats(self):
self.individual_grading_stats_test_helper("instructor", True)
self.individual_grading_stats_test_helper("ta", True)
self.individual_grading_stats_test_helper("grader", False)
@unittest.skipUnless(os.environ.get('CI') is None, "cannot run in CI")
def test_individual_released_stats(self):
self.individual_released_stats_test_helper("instructor", True)
self.individual_released_stats_test_helper("ta", True)
self.individual_released_stats_test_helper("grader", False)
if __name__ == "__main__":
import unittest
unittest.main()
``` |
{
"source": "jposada202020/Adafruit_CircuitPython_AHTx0",
"score": 2
} |
#### File: jposada202020/Adafruit_CircuitPython_AHTx0/adafruit_ahtx0.py
```python
import time
from adafruit_bus_device.i2c_device import I2CDevice
from micropython import const
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_AHTx0.git"
AHTX0_I2CADDR_DEFAULT = const(0x38) # Default I2C address
AHTX0_CMD_CALIBRATE = const(0xE1) # Calibration command
AHTX0_CMD_TRIGGER = const(0xAC) # Trigger reading command
AHTX0_CMD_SOFTRESET = const(0xBA) # Soft reset command
AHTX0_STATUS_BUSY = const(0x80) # Status bit for busy
AHTX0_STATUS_CALIBRATED = const(0x08) # Status bit for calibrated
class AHTx0:
"""
Interface library for AHT10/AHT20 temperature+humidity sensors
:param ~busio.I2C i2c_bus: The I2C bus the AHT10/AHT20 is connected to.
:param int address: The I2C device address. Default is :const:`0x38`
**Quickstart: Importing and using the AHT10/AHT20 temperature sensor**
Here is an example of using the :class:`AHTx0` class.
First you will need to import the libraries to use the sensor
.. code-block:: python
import board
import adafruit_ahtx0
Once this is done you can define your `board.I2C` object and define your sensor object
.. code-block:: python
i2c = board.I2C() # uses board.SCL and board.SDA
aht = adafruit_ahtx0.AHTx0(i2c)
Now you have access to the temperature and humidity using
the :attr:`temperature` and :attr:`relative_humidity` attributes
.. code-block:: python
temperature = aht.temperature
relative_humidity = aht.relative_humidity
"""
def __init__(self, i2c_bus, address=AHTX0_I2CADDR_DEFAULT):
time.sleep(0.02) # 20ms delay to wake up
self.i2c_device = I2CDevice(i2c_bus, address)
self._buf = bytearray(6)
self.reset()
if not self.calibrate():
raise RuntimeError("Could not calibrate")
self._temp = None
self._humidity = None
def reset(self):
"""Perform a soft-reset of the AHT"""
self._buf[0] = AHTX0_CMD_SOFTRESET
with self.i2c_device as i2c:
i2c.write(self._buf, start=0, end=1)
time.sleep(0.02) # 20ms delay to wake up
def calibrate(self):
"""Ask the sensor to self-calibrate. Returns True on success, False otherwise"""
self._buf[0] = AHTX0_CMD_CALIBRATE
self._buf[1] = 0x08
self._buf[2] = 0x00
with self.i2c_device as i2c:
i2c.write(self._buf, start=0, end=3)
while self.status & AHTX0_STATUS_BUSY:
time.sleep(0.01)
if not self.status & AHTX0_STATUS_CALIBRATED:
return False
return True
@property
def status(self):
"""The status byte initially returned from the sensor, see datasheet for details"""
with self.i2c_device as i2c:
i2c.readinto(self._buf, start=0, end=1)
# print("status: "+hex(self._buf[0]))
return self._buf[0]
@property
def relative_humidity(self):
"""The measured relative humidity in percent."""
self._readdata()
return self._humidity
@property
def temperature(self):
"""The measured temperature in degrees Celsius."""
self._readdata()
return self._temp
def _readdata(self):
"""Internal function for triggering the AHT to read temp/humidity"""
self._buf[0] = AHTX0_CMD_TRIGGER
self._buf[1] = 0x33
self._buf[2] = 0x00
with self.i2c_device as i2c:
i2c.write(self._buf, start=0, end=3)
while self.status & AHTX0_STATUS_BUSY:
time.sleep(0.01)
with self.i2c_device as i2c:
i2c.readinto(self._buf, start=0, end=6)
self._humidity = (
(self._buf[1] << 12) | (self._buf[2] << 4) | (self._buf[3] >> 4)
)
self._humidity = (self._humidity * 100) / 0x100000
self._temp = ((self._buf[3] & 0xF) << 16) | (self._buf[4] << 8) | self._buf[5]
self._temp = ((self._temp * 200.0) / 0x100000) - 50
``` |
{
"source": "jposada202020/Adafruit_CircuitPython_AS7341",
"score": 2
} |
#### File: jposada202020/Adafruit_CircuitPython_AS7341/adafruit_as7341.py
```python
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_AS7341.git"
from time import sleep, monotonic
from micropython import const
import adafruit_bus_device.i2c_device as i2c_device
from adafruit_register.i2c_struct import UnaryStruct, Struct # , ROUnaryStruct
from adafruit_register.i2c_bit import RWBit
from adafruit_register.i2c_bits import ROBits, RWBits
_AS7341_DEVICE_ID = const(0b001001) # Correct content of WHO_AM_I register
_AS7341_I2CADDR_DEFAULT = const(0x39) # AS7341 default i2c address
_AS7341_CHIP_ID = const(0x09) # AS7341 default device id from WHOAMI
_AS7341_WHOAMI = const(0x92) # Chip ID register
_AS7341_CONFIG = const(0x70) # Enables LED control and sets light sensing mode
_AS7341_GPIO = const(0x73) # Connects photo diode to GPIO or INT pins
_AS7341_LED = const(0x74) # LED Register; Enables and sets current limit
_AS7341_ENABLE = const(
0x80
) # Main enable register. Controls SMUX, Flicker Detection,Spectral and Power
_AS7341_ATIME = const(0x81) # Sets ADC integration step count
_AS7341_SP_LOW_TH_L = const(0x84) # Spectral measurement Low Threshold low byte
_AS7341_SP_LOW_TH_H = const(0x85) # 0 Spectral measurement Low Threshold high byte
_AS7341_SP_HIGH_TH_L = const(0x86) # Spectral measurement High Threshold low byte
_AS7341_SP_HIGH_TH_H = const(0x87) # Spectral measurement High Threshold low byte
_AS7341_STATUS = const(
0x93
) # Interrupt status registers. Indicates the occourance of an interrupt
_AS7341_ASTATUS = const(
0x94
) # Spectral Saturation and Gain status. Reading from here latches the data
_AS7341_CH0_DATA_L = const(0x95) # ADC Channel 0 Data
_AS7341_CH0_DATA_H = const(0x96) # ADC Channel 0 Data
_AS7341_CH1_DATA_L = const(0x97) # ADC Channel 1 Data
_AS7341_CH1_DATA_H = const(0x98) # ADC Channel 1 Data
_AS7341_CH2_DATA_L = const(0x99) # ADC Channel 2 Data
_AS7341_CH2_DATA_H = const(0x9A) # ADC Channel 2 Data
_AS7341_CH3_DATA_L = const(0x9B) # ADC Channel 3 Data
_AS7341_CH3_DATA_H = const(0x9C) # ADC Channel 3 Data
_AS7341_CH4_DATA_L = const(0x9D) # ADC Channel 4 Data
_AS7341_CH4_DATA_H = const(0x9E) # ADC Channel 4 Data
_AS7341_CH5_DATA_L = const(0x9F) # ADC Channel 5 Data
_AS7341_CH5_DATA_H = const(0xA0) # ADC Channel 5 Data
_AS7341_STATUS2 = const(0xA3) # Measurement status flags; saturation, validity
_AS7341_STATUS3 = const(0xA4) # Spectral interrupt source, high or low threshold
_AS7341_CFG0 = const(
0xA9
) # Sets Low power mode, Register bank, and Trigger lengthening
_AS7341_CFG1 = const(0xAA) # Controls ADC Gain
_AS7341_CFG6 = const(0xAF) # Used to configure Smux
_AS7341_CFG9 = const(0xB2) # flicker detect and SMUX command system ints
_AS7341_CFG12 = const(0xB5) # ADC channel for interrupts, persistence and auto-gain
_AS7341_PERS = const(
0xBD
) # number of measurements outside thresholds to trigger an interrupt
_AS7341_GPIO2 = const(
0xBE
) # GPIO Settings and status: polarity, direction, sets output, reads
_AS7341_ASTEP_L = const(0xCA) # Integration step size ow byte
_AS7341_ASTEP_H = const(0xCB) # Integration step size high byte
_AS7341_FD_TIME1 = const(0xD8) # Flicker detection integration time low byte
_AS7341_FD_TIME2 = const(0xDA) # Flicker detection gain and high nibble
_AS7341_FD_STATUS = const(
0xDB
) # Flicker detection status; measurement valid, saturation, flicker
_AS7341_INTENAB = const(0xF9) # Enables individual interrupt types
_AS7341_CONTROL = const(0xFA) # Auto-zero, fifo clear, clear SAI active
_AS7341_FD_CFG0 = const(0xD7) # Enables FIFO for flicker detection
def _low_bank(func):
# pylint:disable=protected-access
def _decorator(self, *args, **kwargs):
self._low_bank_active = True
retval = func(self, *args, **kwargs)
self._low_bank_active = False
return retval
return _decorator
class CV:
"""struct helper"""
@classmethod
def add_values(cls, value_tuples):
"""Add CV values to the class"""
cls.string = {}
cls.lsb = {}
for value_tuple in value_tuples:
name, value, string, lsb = value_tuple
setattr(cls, name, value)
cls.string[value] = string
cls.lsb[value] = lsb
@classmethod
def is_valid(cls, value):
"""Validate that a given value is a member"""
return value in cls.string
# class Flicker(CV):
# """Options for ``flicker_detection_type``"""
# pass # pylint: disable=unnecessary-pass
# Flicker.add_values((("FLICKER_100HZ", 0, 100, None), ("FLICKER_1000HZ", 1, 1000, None)))
class Gain(CV):
"""Options for ``accelerometer_range``"""
pass # pylint: disable=unnecessary-pass
Gain.add_values(
(
("GAIN_0_5X", 0, 0.5, None),
("GAIN_1X", 1, 1, None),
("GAIN_2X", 2, 2, None),
("GAIN_4X", 3, 4, None),
("GAIN_8X", 4, 8, None),
("GAIN_16X", 5, 16, None),
("GAIN_32X", 6, 32, None),
("GAIN_64X", 7, 64, None),
("GAIN_128X", 8, 128, None),
("GAIN_256X", 9, 256, None),
("GAIN_512X", 10, 512, None),
)
)
class SMUX_OUT(CV):
"""Options for ``smux_out``"""
pass # pylint: disable=unnecessary-pass
SMUX_OUT.add_values(
(
("DISABLED", 0, 0, None),
("ADC0", 1, 1, None),
("ADC1", 2, 2, None),
("ADC2", 3, 3, None),
("ADC3", 4, 4, None),
("ADC4", 5, 5, None),
("ADC5", 6, 6, None),
)
)
class SMUX_IN(CV):
"""Options for ``smux_in``"""
pass # pylint: disable=unnecessary-pass
SMUX_IN.add_values(
(
("NC_F3L", 0, 0, None),
("F1L_NC", 1, 1, None),
("NC_NC0", 2, 2, None),
("NC_F8L", 3, 3, None),
("F6L_NC", 4, 4, None),
("F2L_F4L", 5, 5, None),
("NC_F5L", 6, 6, None),
("F7L_NC", 7, 7, None),
("NC_CL", 8, 8, None),
("NC_F5R", 9, 9, None),
("F7R_NC", 10, 10, None),
("NC_NC1", 11, 11, None),
("NC_F2R", 12, 12, None),
("F4R_NC", 13, 13, None),
("F8R_F6R", 14, 14, None),
("NC_F3R", 15, 15, None),
("F1R_EXT_GPIO", 16, 16, None),
("EXT_INT_CR", 17, 17, None),
("NC_DARK", 18, 18, None),
("NIR_F", 19, 19, None),
)
)
class AS7341: # pylint:disable=too-many-instance-attributes, no-member
"""Library for the AS7341 Sensor
:param ~busio.I2C i2c_bus: The I2C bus the device is connected to
:param int address: The I2C device address. Defaults to :const:`0x39`
**Quickstart: Importing and using the device**
Here is an example of using the :class:`AS7341`.
First you will need to import the libraries to use the sensor
.. code-block:: python
import board
from adafruit_as7341 import AS7341
Once this is done you can define your `board.I2C` object and define your sensor object
.. code-block:: python
i2c = board.I2C() # uses board.SCL and board.SDA
sensor = AS7341(i2c)
Now you have access to the different channels
.. code-block:: python
channel_415nm = channel_415nm
channel_445nm = channel_445nm
channel_480nm = channel_480nm
channel_515nm = channel_515nm
channel_555nm = channel_555nm
channel_590nm = channel_590nm
channel_630nm = channel_630nm
channel_680nm = channel_680nm
"""
_device_id = ROBits(6, _AS7341_WHOAMI, 2)
_smux_enable_bit = RWBit(_AS7341_ENABLE, 4)
_led_control_enable_bit = RWBit(_AS7341_CONFIG, 3)
_color_meas_enabled = RWBit(_AS7341_ENABLE, 1)
_power_enabled = RWBit(_AS7341_ENABLE, 0)
_low_bank_active = RWBit(_AS7341_CFG0, 4)
_smux_command = RWBits(2, _AS7341_CFG6, 3)
_fd_status = UnaryStruct(_AS7341_FD_STATUS, "<B")
_channel_0_data = UnaryStruct(_AS7341_CH0_DATA_L, "<H")
_channel_1_data = UnaryStruct(_AS7341_CH1_DATA_L, "<H")
_channel_2_data = UnaryStruct(_AS7341_CH2_DATA_L, "<H")
_channel_3_data = UnaryStruct(_AS7341_CH3_DATA_L, "<H")
_channel_4_data = UnaryStruct(_AS7341_CH4_DATA_L, "<H")
_channel_5_data = UnaryStruct(_AS7341_CH5_DATA_L, "<H")
# "Reading the ASTATUS register (0x60 or 0x94) latches
# all 12 spectral data bytes to that status read." Datasheet Sec. 10.2.7
_all_channels = Struct(_AS7341_ASTATUS, "<BHHHHHH")
_led_current_bits = RWBits(7, _AS7341_LED, 0)
_led_enabled = RWBit(_AS7341_LED, 7)
atime = UnaryStruct(_AS7341_ATIME, "<B")
"""The integration time step count.
Total integration time will be ``(ATIME + 1) * (ASTEP + 1) * 2.78µS``
"""
astep = UnaryStruct(_AS7341_ASTEP_L, "<H")
""" The integration time step size in 2.78 microsecond increments"""
_gain = UnaryStruct(_AS7341_CFG1, "<B")
_data_ready_bit = RWBit(_AS7341_STATUS2, 6)
"""
* @brief
*
* @return true: success false: failure
"""
def __init__(self, i2c_bus, address=_AS7341_I2CADDR_DEFAULT):
self.i2c_device = i2c_device.I2CDevice(i2c_bus, address)
if not self._device_id in [_AS7341_DEVICE_ID]:
raise RuntimeError("Failed to find an AS7341 sensor - check your wiring!")
self.initialize()
self._buffer = bytearray(2)
self._low_channels_configured = False
self._high_channels_configured = False
self._flicker_detection_1k_configured = False
def initialize(self):
"""Configure the sensors with the default settings"""
self._power_enabled = True
self._led_control_enabled = True
self.atime = 100
self.astep = 999
self.gain = Gain.GAIN_128X # pylint:disable=no-member
@property
def all_channels(self):
"""The current readings for all six ADC channels"""
self._configure_f1_f4()
adc_reads_f1_f4 = self._all_channels
reads = adc_reads_f1_f4[1:-2]
self._configure_f5_f8()
adc_reads_f5_f8 = self._all_channels
reads += adc_reads_f5_f8[1:-2]
return reads
@property
def channel_415nm(self):
"""The current reading for the 415nm band"""
self._configure_f1_f4()
return self._channel_0_data
@property
def channel_445nm(self):
"""The current reading for the 445nm band"""
self._configure_f1_f4()
return self._channel_1_data
@property
def channel_480nm(self):
"""The current reading for the 480nm band"""
self._configure_f1_f4()
return self._channel_2_data
@property
def channel_515nm(self):
"""The current reading for the 515nm band"""
self._configure_f1_f4()
return self._channel_3_data
@property
def channel_555nm(self):
"""The current reading for the 555nm band"""
self._configure_f5_f8()
return self._channel_0_data
@property
def channel_590nm(self):
"""The current reading for the 590nm band"""
self._configure_f5_f8()
return self._channel_1_data
@property
def channel_630nm(self):
"""The current reading for the 630nm band"""
self._configure_f5_f8()
return self._channel_2_data
@property
def channel_680nm(self):
"""The current reading for the 680nm band"""
self._configure_f5_f8()
return self._channel_3_data
# TODO: Add clear and NIR accessors
def _wait_for_data(self, timeout=1.0):
"""Wait for sensor data to be ready"""
start = monotonic()
while not self._data_ready_bit:
if monotonic() - start > timeout:
raise RuntimeError("Timeout occurred waiting for sensor data")
sleep(0.001)
def _write_register(self, addr, data):
self._buffer[0] = addr
self._buffer[1] = data
with self.i2c_device as i2c:
i2c.write(self._buffer)
def _configure_f1_f4(self):
"""Configure the sensor to read from elements F1-F4, Clear, and NIR"""
# disable SP_EN bit while making config changes
if self._low_channels_configured:
return
self._high_channels_configured = False
self._flicker_detection_1k_configured = False
self._color_meas_enabled = False
# ENUM-ify
self._smux_command = 2
# Write new configuration to all the 20 registers
self._f1f4_clear_nir()
# Start SMUX command
self._smux_enabled = True
# Enable SP_EN bit
self._color_meas_enabled = True
self._low_channels_configured = True
self._wait_for_data()
def _configure_f5_f8(self):
"""Configure the sensor to read from elements F5-F8, Clear, and NIR"""
# disable SP_EN bit while making config changes
if self._high_channels_configured:
return
self._low_channels_configured = False
self._flicker_detection_1k_configured = False
self._color_meas_enabled = False
# ENUM-ify
self._smux_command = 2
# Write new configuration to all the 20 registers
self._f5f8_clear_nir()
# Start SMUX command
self._smux_enabled = True
# Enable SP_EN bit
self._color_meas_enabled = True
self._high_channels_configured = True
self._wait_for_data()
@property
def flicker_detected(self):
"""The flicker frequency detected in Hertz"""
if not self._flicker_detection_1k_configured:
AttributeError(
"Flicker detection must be enabled to access `flicker_detected`"
)
flicker_status = self._fd_status
if flicker_status == 45:
return 1000
if flicker_status == 46:
return 1200
return None
# if we haven't returned yet either there was an error or an unknown frequency was detected
@property
def flicker_detection_enabled(self):
"""The flicker detection status of the sensor. True if the sensor is configured\
to detect flickers. Currently only 1000Hz and 1200Hz flicker detection is supported
"""
return self._flicker_detection_1k_configured
@flicker_detection_enabled.setter
def flicker_detection_enabled(self, flicker_enable):
if flicker_enable:
self._configure_1k_flicker_detection()
else:
self._configure_f1_f4() # sane default
def _f1f4_clear_nir(self):
"""Configure SMUX for sensors F1-F4, Clear and NIR"""
self._set_smux(SMUX_IN.NC_F3L, SMUX_OUT.DISABLED, SMUX_OUT.ADC2)
self._set_smux(SMUX_IN.F1L_NC, SMUX_OUT.ADC0, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_NC0, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F8L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F6L_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F2L_F4L, SMUX_OUT.ADC1, SMUX_OUT.ADC3)
self._set_smux(SMUX_IN.NC_F5L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F7L_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_CL, SMUX_OUT.DISABLED, SMUX_OUT.ADC4)
self._set_smux(SMUX_IN.NC_F5R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F7R_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_NC1, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F2R, SMUX_OUT.DISABLED, SMUX_OUT.ADC1)
self._set_smux(SMUX_IN.F4R_NC, SMUX_OUT.ADC3, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F8R_F6R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F3R, SMUX_OUT.DISABLED, SMUX_OUT.ADC2)
self._set_smux(SMUX_IN.F1R_EXT_GPIO, SMUX_OUT.ADC0, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.EXT_INT_CR, SMUX_OUT.DISABLED, SMUX_OUT.ADC4)
self._set_smux(SMUX_IN.NC_DARK, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NIR_F, SMUX_OUT.ADC5, SMUX_OUT.DISABLED)
def _f5f8_clear_nir(self):
# SMUX Config for F5,F6,F7,F8,NIR,Clear
self._set_smux(SMUX_IN.NC_F3L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F1L_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_NC0, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F8L, SMUX_OUT.DISABLED, SMUX_OUT.ADC3)
self._set_smux(SMUX_IN.F6L_NC, SMUX_OUT.ADC1, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F2L_F4L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F5L, SMUX_OUT.DISABLED, SMUX_OUT.ADC0)
self._set_smux(SMUX_IN.F7L_NC, SMUX_OUT.ADC2, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_CL, SMUX_OUT.DISABLED, SMUX_OUT.ADC4)
self._set_smux(SMUX_IN.NC_F5R, SMUX_OUT.DISABLED, SMUX_OUT.ADC0)
self._set_smux(SMUX_IN.F7R_NC, SMUX_OUT.ADC2, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_NC1, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F2R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F4R_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F8R_F6R, SMUX_OUT.ADC3, SMUX_OUT.ADC1)
self._set_smux(SMUX_IN.NC_F3R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F1R_EXT_GPIO, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.EXT_INT_CR, SMUX_OUT.DISABLED, SMUX_OUT.ADC4)
self._set_smux(SMUX_IN.NC_DARK, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NIR_F, SMUX_OUT.ADC5, SMUX_OUT.DISABLED)
# TODO: Convert as much of this as possible to properties or named attributes
def _configure_1k_flicker_detection(self):
self._low_channels_configured = False
self._high_channels_configured = False
# RAM_BANK 0 select which RAM bank to access in register addresses 0x00-0x7f
self._write_register(_AS7341_CFG0, 0x00)
# The coefficient calculated are stored into the RAM bank 0 and RAM bank 1,
# they are used instead of 100Hz and 120Hz coefficients which are the default
# flicker detection coefficients
# write new coefficients to detect the 1000Hz and 1200Hz - part 1
self._write_register(0x04, 0x9E)
self._write_register(0x05, 0x36)
self._write_register(0x0E, 0x2E)
self._write_register(0x0F, 0x1B)
self._write_register(0x18, 0x7D)
self._write_register(0x19, 0x36)
self._write_register(0x22, 0x09)
self._write_register(0x23, 0x1B)
self._write_register(0x2C, 0x5B)
self._write_register(0x2D, 0x36)
self._write_register(0x36, 0xE5)
self._write_register(0x37, 0x1A)
self._write_register(0x40, 0x3A)
self._write_register(0x41, 0x36)
self._write_register(0x4A, 0xC1)
self._write_register(0x4B, 0x1A)
self._write_register(0x54, 0x18)
self._write_register(0x55, 0x36)
self._write_register(0x5E, 0x9C)
self._write_register(0x5F, 0x1A)
self._write_register(0x68, 0xF6)
self._write_register(0x69, 0x35)
self._write_register(0x72, 0x78)
self._write_register(0x73, 0x1A)
self._write_register(0x7C, 0x4D)
self._write_register(0x7D, 0x35)
# RAM_BANK 1 select which RAM bank to access in register addresses 0x00-0x7f
self._write_register(_AS7341_CFG0, 0x01)
# write new coefficients to detect the 1000Hz and 1200Hz - part 1
self._write_register(0x06, 0x54)
self._write_register(0x07, 0x1A)
self._write_register(0x10, 0xB3)
self._write_register(0x11, 0x35)
self._write_register(0x1A, 0x2F)
self._write_register(0x1B, 0x1A)
self._write_register(_AS7341_CFG0, 0x01)
# select RAM coefficients for flicker detection by setting
# fd_disable_constant_init to „1“ (FD_CFG0 register) in FD_CFG0 register -
# 0xD7
# fd_disable_constant_init=1
# fd_samples=4
self._write_register(_AS7341_FD_CFG0, 0x60)
# in FD_CFG1 register - 0xd8 fd_time(7:0) = 0x40
self._write_register(_AS7341_FD_TIME1, 0x40)
# in FD_CFG2 register - 0xd9 fd_dcr_filter_size=1 fd_nr_data_sets(2:0)=5
self._write_register(0xD9, 0x25)
# in FD_CFG3 register - 0xda fd_gain=9
self._write_register(_AS7341_FD_TIME2, 0x48)
# in CFG9 register - 0xb2 sien_fd=1
self._write_register(_AS7341_CFG9, 0x40)
# in ENABLE - 0x80 fden=1 and pon=1 are enabled
self._write_register(_AS7341_ENABLE, 0x41)
self._flicker_detection_1k_configured = True
def _smux_template(self):
# SMUX_OUT.DISABLED
# SMUX_OUT.ADC0
# SMUX_OUT.ADC1
# SMUX_OUT.ADC2
# SMUX_OUT.ADC3
# SMUX_OUT.ADC4
# SMUX_OUT.ADC5
self._set_smux(SMUX_IN.NC_F3L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F1L_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_NC0, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F8L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F6L_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F2L_F4L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F5L, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F7L_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_CL, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F5R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F7R_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_NC1, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F2R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F4R_NC, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F8R_F6R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_F3R, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.F1R_EXT_GPIO, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.EXT_INT_CR, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NC_DARK, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
self._set_smux(SMUX_IN.NIR_F, SMUX_OUT.DISABLED, SMUX_OUT.DISABLED)
def _set_smux(self, smux_addr, smux_out1, smux_out2):
"""Connect a pair of sensors to an ADC channel"""
low_nibble = smux_out1
high_nibble = smux_out2 << 4
smux_byte = high_nibble | low_nibble
self._write_register(smux_addr, smux_byte)
@property
def gain(self):
"""The ADC gain multiplier. Must be a valid :meth:`adafruit_as7341.Gain`"""
return self._gain
@gain.setter
def gain(self, gain_value):
if not Gain.is_valid(gain_value):
raise AttributeError("`gain` must be a valid `adafruit_as7341.Gain`")
self._gain = gain_value
@property
def _smux_enabled(self):
return self._smux_enable_bit
@_smux_enabled.setter
def _smux_enabled(self, enable_smux):
self._low_bank_active = False
self._smux_enable_bit = enable_smux
while self._smux_enable_bit is True:
sleep(0.001)
@property
@_low_bank
def led_current(self):
"""The maximum allowed current through the attached LED in milliamps.
Odd numbered values will be rounded down to the next lowest even number due
to the internal configuration restrictions"""
current_val = self._led_current_bits
return (current_val * 2) + 4
@led_current.setter
@_low_bank
def led_current(self, led_curent):
new_current = int((min(258, max(4, led_curent)) - 4) / 2)
self._led_current_bits = new_current
@property
@_low_bank
def led(self):
"""The attached LED. Set to True to turn on, False to turn off"""
return self._led_enabled
@led.setter
@_low_bank
def led(self, led_on):
self._led_enabled = led_on
@property
@_low_bank
def _led_control_enabled(self):
return self._led_control_enable_bit
@_led_control_enabled.setter
@_low_bank
def _led_control_enabled(self, enabled):
self._led_control_enable_bit = enabled
``` |
{
"source": "jposada202020/Adafruit_CircuitPython_BME280",
"score": 2
} |
#### File: Adafruit_CircuitPython_BME280/adafruit_bme280/basic.py
```python
import math
from time import sleep
from micropython import const
try:
import struct
except ImportError:
import ustruct as struct
__version__ = "2.6.4"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BME280.git"
# I2C ADDRESS/BITS/SETTINGS
# -----------------------------------------------------------------------
"""General Information"""
_BME280_ADDRESS = const(0x77)
_BME280_CHIPID = const(0x60)
_BME280_REGISTER_CHIPID = const(0xD0)
"""overscan values for temperature, pressure, and humidity"""
OVERSCAN_X1 = const(0x01)
OVERSCAN_X16 = const(0x05)
"""mode values"""
_BME280_MODES = (0x00, 0x01, 0x03)
"""iir_filter values"""
IIR_FILTER_DISABLE = const(0)
"""
standby timeconstant values
TC_X[_Y] where X=milliseconds and Y=tenths of a millisecond
"""
STANDBY_TC_125 = const(0x02) # 125ms
"""mode values"""
MODE_SLEEP = const(0x00)
MODE_FORCE = const(0x01)
MODE_NORMAL = const(0x03)
"""Other Registers"""
_BME280_REGISTER_SOFTRESET = const(0xE0)
_BME280_REGISTER_CTRL_HUM = const(0xF2)
_BME280_REGISTER_STATUS = const(0xF3)
_BME280_REGISTER_CTRL_MEAS = const(0xF4)
_BME280_REGISTER_CONFIG = const(0xF5)
_BME280_REGISTER_TEMPDATA = const(0xFA)
_BME280_REGISTER_HUMIDDATA = const(0xFD)
class Adafruit_BME280:
"""Driver from BME280 Temperature, Humidity and Barometric Pressure sensor
.. note::
The operational range of the BME280 is 300-1100 hPa.
Pressure measurements outside this range may not be as accurate.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self):
"""Check the BME280 was found, read the coefficients and enable the sensor"""
# Check device ID.
chip_id = self._read_byte(_BME280_REGISTER_CHIPID)
if _BME280_CHIPID != chip_id:
raise RuntimeError("Failed to find BME280! Chip ID 0x%x" % chip_id)
# Set some reasonable defaults.
self._iir_filter = IIR_FILTER_DISABLE
self.overscan_humidity = OVERSCAN_X1
self.overscan_temperature = OVERSCAN_X1
self.overscan_pressure = OVERSCAN_X16
self._t_standby = STANDBY_TC_125
self._mode = MODE_SLEEP
self._reset()
self._read_coefficients()
self._write_ctrl_meas()
self._write_config()
self.sea_level_pressure = 1013.25
"""Pressure in hectoPascals at sea level. Used to calibrate `altitude`."""
self._t_fine = None
def _read_temperature(self):
# perform one measurement
if self.mode != MODE_NORMAL:
self.mode = MODE_FORCE
# Wait for conversion to complete
while self._get_status() & 0x08:
sleep(0.002)
raw_temperature = (
self._read24(_BME280_REGISTER_TEMPDATA) / 16
) # lowest 4 bits get dropped
var1 = (
raw_temperature / 16384.0 - self._temp_calib[0] / 1024.0
) * self._temp_calib[1]
var2 = (
(raw_temperature / 131072.0 - self._temp_calib[0] / 8192.0)
* (raw_temperature / 131072.0 - self._temp_calib[0] / 8192.0)
) * self._temp_calib[2]
self._t_fine = int(var1 + var2)
def _reset(self):
"""Soft reset the sensor"""
self._write_register_byte(_BME280_REGISTER_SOFTRESET, 0xB6)
sleep(0.004) # Datasheet says 2ms. Using 4ms just to be safe
def _write_ctrl_meas(self):
"""
Write the values to the ctrl_meas and ctrl_hum registers in the device
ctrl_meas sets the pressure and temperature data acquisition options
ctrl_hum sets the humidity oversampling and must be written to first
"""
self._write_register_byte(_BME280_REGISTER_CTRL_HUM, self.overscan_humidity)
self._write_register_byte(_BME280_REGISTER_CTRL_MEAS, self._ctrl_meas)
def _get_status(self):
"""Get the value from the status register in the device """
return self._read_byte(_BME280_REGISTER_STATUS)
def _read_config(self):
"""Read the value from the config register in the device """
return self._read_byte(_BME280_REGISTER_CONFIG)
def _write_config(self):
"""Write the value to the config register in the device """
normal_flag = False
if self._mode == MODE_NORMAL:
# Writes to the config register may be ignored while in Normal mode
normal_flag = True
self.mode = MODE_SLEEP # So we switch to Sleep mode first
self._write_register_byte(_BME280_REGISTER_CONFIG, self._config)
if normal_flag:
self.mode = MODE_NORMAL
@property
def mode(self):
"""
Operation mode
Allowed values are the constants MODE_*
"""
return self._mode
@mode.setter
def mode(self, value):
if not value in _BME280_MODES:
raise ValueError("Mode '%s' not supported" % (value))
self._mode = value
self._write_ctrl_meas()
@property
def _config(self):
"""Value to be written to the device's config register """
config = 0
if self.mode == 0x03: # MODE_NORMAL
config += self._t_standby << 5
if self._iir_filter:
config += self._iir_filter << 2
return config
@property
def _ctrl_meas(self):
"""Value to be written to the device's ctrl_meas register """
ctrl_meas = self.overscan_temperature << 5
ctrl_meas += self.overscan_pressure << 2
ctrl_meas += self.mode
return ctrl_meas
@property
def temperature(self):
"""The compensated temperature in degrees Celsius."""
self._read_temperature()
return self._t_fine / 5120.0
@property
def pressure(self):
"""
The compensated pressure in hectoPascals.
returns None if pressure measurement is disabled
"""
self._read_temperature()
# Algorithm from the BME280 driver
# https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c
adc = (
self._read24(0xF7) / 16 # BME280_REGISTER_PRESSUREDATA
) # lowest 4 bits get dropped
var1 = float(self._t_fine) / 2.0 - 64000.0
var2 = var1 * var1 * self._pressure_calib[5] / 32768.0
var2 = var2 + var1 * self._pressure_calib[4] * 2.0
var2 = var2 / 4.0 + self._pressure_calib[3] * 65536.0
var3 = self._pressure_calib[2] * var1 * var1 / 524288.0
var1 = (var3 + self._pressure_calib[1] * var1) / 524288.0
var1 = (1.0 + var1 / 32768.0) * self._pressure_calib[0]
if not var1: # avoid exception caused by division by zero
raise ArithmeticError(
"Invalid result possibly related to error while reading the calibration registers"
)
pressure = 1048576.0 - adc
pressure = ((pressure - var2 / 4096.0) * 6250.0) / var1
var1 = self._pressure_calib[8] * pressure * pressure / 2147483648.0
var2 = pressure * self._pressure_calib[7] / 32768.0
pressure = pressure + (var1 + var2 + self._pressure_calib[6]) / 16.0
pressure /= 100
return pressure
@property
def relative_humidity(self):
"""
The relative humidity in RH %
returns None if humidity measurement is disabled
"""
return self.humidity
@property
def humidity(self):
"""
The relative humidity in RH %
returns None if humidity measurement is disabled
"""
self._read_temperature()
hum = self._read_register(0xFD, 2) # BME280_REGISTER_HUMIDDATA
adc = float(hum[0] << 8 | hum[1])
# Algorithm from the BME280 driver
# https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c
var1 = float(self._t_fine) - 76800.0
var2 = (
self._humidity_calib[3] * 64.0 + (self._humidity_calib[4] / 16384.0) * var1
)
var3 = adc - var2
var4 = self._humidity_calib[1] / 65536.0
var5 = 1.0 + (self._humidity_calib[2] / 67108864.0) * var1
var6 = 1.0 + (self._humidity_calib[5] / 67108864.0) * var1 * var5
var6 = var3 * var4 * (var5 * var6)
humidity = var6 * (1.0 - self._humidity_calib[0] * var6 / 524288.0)
if humidity > 100:
return 100
if humidity < 0:
return 0
# else...
return humidity
@property
def altitude(self):
"""The altitude based on current :attr:`pressure` versus the sea level pressure
(``sea_level_pressure``) - which you must enter ahead of time)"""
pressure = self.pressure # in Si units for hPascal
return 44330 * (1.0 - math.pow(pressure / self.sea_level_pressure, 0.1903))
def _read_coefficients(self):
"""Read & save the calibration coefficients"""
coeff = self._read_register(0x88, 24) # BME280_REGISTER_DIG_T1
coeff = list(struct.unpack("<HhhHhhhhhhhh", bytes(coeff)))
coeff = [float(i) for i in coeff]
self._temp_calib = coeff[:3]
self._pressure_calib = coeff[3:]
self._humidity_calib = [0] * 6
self._humidity_calib[0] = self._read_byte(0xA1) # BME280_REGISTER_DIG_H1
coeff = self._read_register(0xE1, 7) # BME280_REGISTER_DIG_H2
coeff = list(struct.unpack("<hBbBbb", bytes(coeff)))
self._humidity_calib[1] = float(coeff[0])
self._humidity_calib[2] = float(coeff[1])
self._humidity_calib[3] = float((coeff[2] << 4) | (coeff[3] & 0xF))
self._humidity_calib[4] = float((coeff[4] << 4) | (coeff[3] >> 4))
self._humidity_calib[5] = float(coeff[5])
def _read_byte(self, register):
"""Read a byte register value and return it"""
return self._read_register(register, 1)[0]
def _read24(self, register):
"""Read an unsigned 24-bit value as a floating point and return it."""
ret = 0.0
for b in self._read_register(register, 3):
ret *= 256.0
ret += float(b & 0xFF)
return ret
def _read_register(self, register, length):
raise NotImplementedError()
def _write_register_byte(self, register, value):
raise NotImplementedError()
class Adafruit_BME280_I2C(Adafruit_BME280):
"""Driver for BME280 connected over I2C
:param ~busio.I2C i2c: The I2C bus the BME280 is connected to.
:param int address: I2C device address. Defaults to :const:`0x77`.
but another address can be passed in as an argument
.. note::
The operational range of the BMP280 is 300-1100 hPa.
Pressure measurements outside this range may not be as accurate.
**Quickstart: Importing and using the BME280**
Here is an example of using the :class:`Adafruit_BME280_I2C`.
First you will need to import the libraries to use the sensor
.. code-block:: python
import board
from adafruit_bme280 import basic
Once this is done you can define your `board.I2C` object and define your sensor object
.. code-block:: python
i2c = board.I2C() # uses board.SCL and board.SDA
bme280 = basic.Adafruit_BME280_I2C(i2c)
You need to setup the pressure at sea level
.. code-block:: python
bme280.sea_level_pressure = 1013.25
Now you have access to the :attr:`temperature`, :attr:`relative_humidity`
:attr:`pressure` and :attr:`altitude` attributes
.. code-block:: python
temperature = bme280.temperature
relative_humidity = bme280.relative_humidity
pressure = bme280.pressure
altitude = bme280.altitude
"""
def __init__(self, i2c, address=0x77): # BME280_ADDRESS
import adafruit_bus_device.i2c_device as i2c_device # pylint: disable=import-outside-toplevel
self._i2c = i2c_device.I2CDevice(i2c, address)
super().__init__()
def _read_register(self, register, length):
with self._i2c as i2c:
i2c.write(bytes([register & 0xFF]))
result = bytearray(length)
i2c.readinto(result)
return result
def _write_register_byte(self, register, value):
with self._i2c as i2c:
i2c.write(bytes([register & 0xFF, value & 0xFF]))
# print("$%02X <= 0x%02X" % (register, value))
class Adafruit_BME280_SPI(Adafruit_BME280):
"""Driver for BME280 connected over SPI
:param ~busio.SPI spi: SPI device
:param ~digitalio.DigitalInOut cs: Chip Select
:param int baudrate: Clock rate, default is 100000. Can be changed with :meth:`baudrate`
.. note::
The operational range of the BMP280 is 300-1100 hPa.
Pressure measurements outside this range may not be as accurate.
**Quickstart: Importing and using the BME280**
Here is an example of using the :class:`Adafruit_BME280_SPI` class.
First you will need to import the libraries to use the sensor
.. code-block:: python
import board
from digitalio import DigitalInOut
from adafruit_bme280 import basic
Once this is done you can define your `board.SPI` object and define your sensor object
.. code-block:: python
cs = digitalio.DigitalInOut(board.D10)
spi = board.SPI()
bme280 = basic.Adafruit_BME280_SPI(spi, cs)
You need to setup the pressure at sea level
.. code-block:: python
bme280.sea_level_pressure = 1013.25
Now you have access to the :attr:`temperature`, :attr:`relative_humidity`
:attr:`pressure` and :attr:`altitude` attributes
.. code-block:: python
temperature = bme280.temperature
relative_humidity = bme280.relative_humidity
pressure = bme280.pressure
altitude = bme280.altitude
"""
def __init__(self, spi, cs, baudrate=100000):
import adafruit_bus_device.spi_device as spi_device # pylint: disable=import-outside-toplevel
self._spi = spi_device.SPIDevice(spi, cs, baudrate=baudrate)
super().__init__()
def _read_register(self, register, length):
register = (register | 0x80) & 0xFF # Read single, bit 7 high.
with self._spi as spi:
spi.write(bytearray([register])) # pylint: disable=no-member
result = bytearray(length)
spi.readinto(result) # pylint: disable=no-member
return result
def _write_register_byte(self, register, value):
register &= 0x7F # Write, bit 7 low.
with self._spi as spi:
spi.write(bytes([register, value & 0xFF])) # pylint: disable=no-member
``` |
{
"source": "jposada202020/Adafruit_CircuitPython_DPS310",
"score": 2
} |
#### File: jposada202020/Adafruit_CircuitPython_DPS310/adafruit_dps310.py
```python
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_DPS310.git"
# Common imports; remove if unused or pylint will complain
import math
from time import sleep
import adafruit_bus_device.i2c_device as i2c_device
from adafruit_register.i2c_struct import UnaryStruct, ROUnaryStruct
from adafruit_register.i2c_bit import RWBit, ROBit
from adafruit_register.i2c_bits import RWBits, ROBits
_DPS310_DEFAULT_ADDRESS = 0x77 # DPS310 default i2c address
_DPS310_DEVICE_ID = 0x10 # DPS310 device identifier
_DPS310_PRSB2 = 0x00 # Highest byte of pressure data
_DPS310_TMPB2 = 0x03 # Highest byte of temperature data
_DPS310_PRSCFG = 0x06 # Pressure configuration
_DPS310_TMPCFG = 0x07 # Temperature configuration
_DPS310_MEASCFG = 0x08 # Sensor configuration
_DPS310_CFGREG = 0x09 # Interrupt/FIFO configuration
_DPS310_RESET = 0x0C # Soft reset
_DPS310_PRODREVID = 0x0D # Register that contains the part ID
_DPS310_TMPCOEFSRCE = 0x28 # Temperature calibration src
# pylint: disable=no-member,unnecessary-pass
class CV:
"""struct helper"""
@classmethod
def add_values(cls, value_tuples):
"""Add CV values to the class"""
cls.string = {}
cls.lsb = {}
for value_tuple in value_tuples:
name, value, string, lsb = value_tuple
setattr(cls, name, value)
cls.string[value] = string
cls.lsb[value] = lsb
@classmethod
def is_valid(cls, value):
"""Validate that a given value is a member"""
return value in cls.string
class Mode(CV):
"""Options for ``mode``
+--------------------------+------------------------------------------------------------------+
| Mode | Description |
+--------------------------+------------------------------------------------------------------+
| ``Mode.IDLE`` | Puts the sensor into a shutdown state |
+--------------------------+------------------------------------------------------------------+
| ``Mode.ONE_PRESSURE`` | Setting `mode` to ``Mode.ONE_PRESSURE`` takes a single pressure |
| | measurement then switches to ``Mode.IDLE`` |
+--------------------------+------------------------------------------------------------------+
| ``Mode.ONE_TEMPERATURE`` | Setting `mode` to ``Mode.ONE_TEMPERATURE`` takes a single |
| | temperature measurement then switches to ``Mode.IDLE`` |
+--------------------------+------------------------------------------------------------------+
| ``Mode.CONT_PRESSURE`` | Take pressure measurements at the current `pressure_rate`. |
| | `temperature` will not be updated |
+--------------------------+------------------------------------------------------------------+
| ``Mode.CONT_TEMP`` | Take temperature measurements at the current `temperature_rate`. |
| | `pressure` will not be updated |
+--------------------------+------------------------------------------------------------------+
| ``Mode.CONT_PRESTEMP`` | Take temperature and pressure measurements at the current |
| | `pressure_rate` and `temperature_rate` |
+--------------------------+------------------------------------------------------------------+
"""
pass # pylint: disable=unnecessary-pass
Mode.add_values(
(
("IDLE", 0, "Idle", None),
("ONE_PRESSURE", 1, "One-Shot Pressure", None),
("ONE_TEMPERATURE", 2, "One-Shot Temperature", None),
("CONT_PRESSURE", 5, "Continuous Pressure", None),
("CONT_TEMP", 6, "Continuous Temperature", None),
("CONT_PRESTEMP", 7, "Continuous Pressure & Temperature", None),
)
)
class Rate(CV):
"""Options for :attr:`pressure_rate` and :attr:`temperature_rate`"""
pass
Rate.add_values(
(
("RATE_1_HZ", 0, 1, None),
("RATE_2_HZ", 1, 2, None),
("RATE_4_HZ", 2, 4, None),
("RATE_8_HZ", 3, 8, None),
("RATE_16_HZ", 4, 16, None),
("RATE_32_HZ", 5, 32, None),
("RATE_64_HZ", 6, 64, None),
("RATE_128_HZ", 7, 128, None),
)
)
class SampleCount(CV):
"""Options for :attr:`temperature_oversample_count` and :attr:`pressure_oversample_count`"""
pass
SampleCount.add_values(
(
("COUNT_1", 0, 1, None),
("COUNT_2", 1, 2, None),
("COUNT_4", 2, 4, None),
("COUNT_8", 3, 8, None),
("COUNT_16", 4, 16, None),
("COUNT_32", 5, 32, None),
("COUNT_64", 6, 64, None),
("COUNT_128", 7, 128, None),
)
)
# pylint: enable=unnecessary-pass
class DPS310:
# pylint: disable=too-many-instance-attributes
"""Library for the DPS310 Precision Barometric Pressure Sensor.
:param ~busio.I2C i2c_bus: The I2C bus the DPS310 is connected to.
:param int address: The I2C device address. Defaults to :const:`0x77`
**Quickstart: Importing and using the DPS310**
Here is an example of using the :class:`DPS310` class.
First you will need to import the libraries to use the sensor
.. code-block:: python
import board
import adafruit_dps310
Once this is done you can define your `board.I2C` object and define your sensor object
.. code-block:: python
i2c = board.I2C() # uses board.SCL and board.SDA
dps310 = adafruit_dps310.DPS310(i2c)
Now you have access to the :attr:`temperature` and :attr:`pressure` attributes.
.. code-block:: python
temperature = dps310.temperature
pressure = dps310.pressure
"""
# Register definitions
_device_id = ROUnaryStruct(_DPS310_PRODREVID, ">B")
_reset_register = UnaryStruct(_DPS310_RESET, ">B")
_mode_bits = RWBits(3, _DPS310_MEASCFG, 0)
_pressure_ratebits = RWBits(3, _DPS310_PRSCFG, 4)
_pressure_osbits = RWBits(4, _DPS310_PRSCFG, 0)
_temp_ratebits = RWBits(3, _DPS310_TMPCFG, 4)
_temp_osbits = RWBits(4, _DPS310_TMPCFG, 0)
_temp_measurement_src_bit = RWBit(_DPS310_TMPCFG, 7)
_pressure_shiftbit = RWBit(_DPS310_CFGREG, 2)
_temp_shiftbit = RWBit(_DPS310_CFGREG, 3)
_coefficients_ready = RWBit(_DPS310_MEASCFG, 7)
_sensor_ready = RWBit(_DPS310_MEASCFG, 6)
_temp_ready = RWBit(_DPS310_MEASCFG, 5)
_pressure_ready = RWBit(_DPS310_MEASCFG, 4)
_raw_pressure = ROBits(24, _DPS310_PRSB2, 0, 3, lsb_first=False)
_raw_temperature = ROBits(24, _DPS310_TMPB2, 0, 3, lsb_first=False)
_calib_coeff_temp_src_bit = ROBit(_DPS310_TMPCOEFSRCE, 7)
_reg0e = RWBits(8, 0x0E, 0)
_reg0f = RWBits(8, 0x0F, 0)
_reg62 = RWBits(8, 0x62, 0)
def __init__(self, i2c_bus, address=_DPS310_DEFAULT_ADDRESS):
self.i2c_device = i2c_device.I2CDevice(i2c_bus, address)
if self._device_id != _DPS310_DEVICE_ID:
raise RuntimeError("Failed to find DPS310 - check your wiring!")
self._pressure_scale = None
self._temp_scale = None
self._c0 = None
self._c1 = None
self._c00 = None
self._c00 = None
self._c10 = None
self._c10 = None
self._c01 = None
self._c11 = None
self._c20 = None
self._c21 = None
self._c30 = None
self._oversample_scalefactor = (
524288,
1572864,
3670016,
7864320,
253952,
516096,
1040384,
2088960,
)
self.sea_level_pressure = 1013.25
"""Pressure in hectoPascals at sea level. Used to calibrate :attr:`altitude`."""
self.initialize()
def initialize(self):
"""Initialize the sensor to continuous measurement"""
self.reset()
self.pressure_rate = Rate.RATE_64_HZ
self.pressure_oversample_count = SampleCount.COUNT_64
self.temperature_rate = Rate.RATE_64_HZ
self.temperature_oversample_count = SampleCount.COUNT_64
self.mode = Mode.CONT_PRESTEMP
# wait until we have at least one good measurement
self.wait_temperature_ready()
self.wait_pressure_ready()
# (https://github.com/Infineon/DPS310-Pressure-Sensor#temperature-measurement-issue)
# similar to DpsClass::correctTemp(void) from infineon's c++ library
def _correct_temp(self):
"""Correct temperature readings on ICs with a fuse bit problem"""
self._reg0e = 0xA5
self._reg0f = 0x96
self._reg62 = 0x02
self._reg0e = 0
self._reg0f = 0
# perform a temperature measurement
# the most recent temperature will be saved internally
# and used for compensation when calculating pressure
_unused = self._raw_temperature
def reset(self):
"""Reset the sensor"""
self._reset_register = 0x89
# wait for hardware reset to finish
sleep(0.010)
while not self._sensor_ready:
sleep(0.001)
self._correct_temp()
self._read_calibration()
# make sure we're using the temperature source used for calibration
self._temp_measurement_src_bit = self._calib_coeff_temp_src_bit
@property
def pressure(self):
"""Returns the current pressure reading in kPA"""
temp_reading = self._raw_temperature
raw_temperature = self._twos_complement(temp_reading, 24)
pressure_reading = self._raw_pressure
raw_pressure = self._twos_complement(pressure_reading, 24)
_scaled_rawtemp = raw_temperature / self._temp_scale
_temperature = _scaled_rawtemp * self._c1 + self._c0 / 2.0
p_red = raw_pressure / self._pressure_scale
pres_calc = (
self._c00
+ p_red * (self._c10 + p_red * (self._c20 + p_red * self._c30))
+ _scaled_rawtemp * (self._c01 + p_red * (self._c11 + p_red * self._c21))
)
final_pressure = pres_calc / 100
return final_pressure
@property
def altitude(self):
"""The altitude based on the sea level pressure (:attr:`sea_level_pressure`) -
which you must enter ahead of time)"""
return 44330 * (1.0 - math.pow(self.pressure / self.sea_level_pressure, 0.1903))
@property
def temperature(self):
"""The current temperature reading in degrees Celsius"""
_scaled_rawtemp = self._raw_temperature / self._temp_scale
_temperature = _scaled_rawtemp * self._c1 + self._c0 / 2.0
return _temperature
@property
def temperature_ready(self):
"""Returns true if there is a temperature reading ready"""
return self._temp_ready
def wait_temperature_ready(self):
"""Wait until a temperature measurement is available.
To avoid waiting indefinitely this function raises an
error if the sensor isn't configured for temperate measurements,
ie. ``Mode.ONE_TEMPERATURE``, ``Mode.CONT_TEMP`` or ``Mode.CONT_PRESTEMP``.
See the `Mode` documentation for details.
"""
if (
self._mode_bits == Mode.IDLE
or self._mode_bits == Mode.ONE_PRESSURE
or self._mode_bits == Mode.CONT_PRESSURE
):
raise RuntimeError(
"Sensor mode is set to idle or pressure measurement,\
can't wait for a temperature measurement"
)
while self._temp_ready is False:
sleep(0.001)
@property
def pressure_ready(self):
"""Returns true if pressure readings are ready"""
return self._pressure_ready
def wait_pressure_ready(self):
"""Wait until a pressure measurement is available
To avoid waiting indefinitely this function raises an
error if the sensor isn't configured for pressure measurements,
ie. ``Mode.ONE_PRESSURE``, ``Mode.CONT_PRESSURE`` or ``Mode.CONT_PRESTEMP``
See the `Mode` documentation for details.
"""
if (
self._mode_bits == Mode.IDLE
or self._mode_bits == Mode.ONE_TEMPERATURE
or self._mode_bits == Mode.CONT_TEMP
):
raise RuntimeError(
"Sensor mode is set to idle or temperature measurement,\
can't wait for a pressure measurement"
)
while self._pressure_ready is False:
sleep(0.001)
@property
def mode(self):
"""The measurement mode. Must be a `Mode`. See the `Mode` documentation for details"""
return self._mode_bits
@mode.setter
def mode(self, value):
if not Mode.is_valid(value):
raise AttributeError("mode must be an `Mode`")
self._mode_bits = value
@property
def pressure_rate(self):
"""Configure the pressure measurement rate. Must be a `Rate`"""
return self._pressure_ratebits
@pressure_rate.setter
def pressure_rate(self, value):
if not Rate.is_valid(value):
raise AttributeError("pressure_rate must be a Rate")
self._pressure_ratebits = value
@property
def pressure_oversample_count(self):
"""The number of samples taken per pressure measurement. Must be a ``SampleCount``"""
return self._pressure_osbits
@pressure_oversample_count.setter
def pressure_oversample_count(self, value):
if not SampleCount.is_valid(value):
raise AttributeError("pressure_oversample_count must be a SampleCount")
self._pressure_osbits = value
self._pressure_shiftbit = value > SampleCount.COUNT_8
self._pressure_scale = self._oversample_scalefactor[value]
@property
def temperature_rate(self):
"""Configure the temperature measurement rate. Must be a `Rate`"""
return self._temp_ratebits
@temperature_rate.setter
def temperature_rate(self, value):
if not Rate.is_valid(value):
raise AttributeError("temperature_rate must be a Rate")
self._temp_ratebits = value
@property
def temperature_oversample_count(self):
"""The number of samples taken per temperature measurement. Must be a ``SampleCount``"""
return self._temp_osbits
@temperature_oversample_count.setter
def temperature_oversample_count(self, value):
if not SampleCount.is_valid(value):
raise AttributeError("temperature_oversample_count must be a SampleCount")
self._temp_osbits = value
self._temp_scale = self._oversample_scalefactor[value]
self._temp_shiftbit = value > SampleCount.COUNT_8
@staticmethod
def _twos_complement(val, bits):
if val & (1 << (bits - 1)):
val -= 1 << bits
return val
def _read_calibration(self):
while not self._coefficients_ready:
sleep(0.001)
buffer = bytearray(19)
coeffs = [None] * 18
for offset in range(18):
buffer = bytearray(2)
buffer[0] = 0x10 + offset
with self.i2c_device as i2c:
i2c.write_then_readinto(buffer, buffer, out_end=1, in_start=1)
coeffs[offset] = buffer[1]
self._c0 = (coeffs[0] << 4) | ((coeffs[1] >> 4) & 0x0F)
self._c0 = self._twos_complement(self._c0, 12)
self._c1 = self._twos_complement(((coeffs[1] & 0x0F) << 8) | coeffs[2], 12)
self._c00 = (coeffs[3] << 12) | (coeffs[4] << 4) | ((coeffs[5] >> 4) & 0x0F)
self._c00 = self._twos_complement(self._c00, 20)
self._c10 = ((coeffs[5] & 0x0F) << 16) | (coeffs[6] << 8) | coeffs[7]
self._c10 = self._twos_complement(self._c10, 20)
self._c01 = self._twos_complement((coeffs[8] << 8) | coeffs[9], 16)
self._c11 = self._twos_complement((coeffs[10] << 8) | coeffs[11], 16)
self._c20 = self._twos_complement((coeffs[12] << 8) | coeffs[13], 16)
self._c21 = self._twos_complement((coeffs[14] << 8) | coeffs[15], 16)
self._c30 = self._twos_complement((coeffs[16] << 8) | coeffs[17], 16)
``` |
{
"source": "jposada202020/Adafruit_CircuitPython_LIS2MDL",
"score": 3
} |
#### File: Adafruit_CircuitPython_LIS2MDL/examples/lis2mdl_compass.py
```python
import time
import math
import board
import adafruit_lis2mdl
i2c = board.I2C() # uses board.SCL and board.SDA
sensor = adafruit_lis2mdl.LIS2MDL(i2c)
# You will need the calibration values from your magnetometer calibration
# these values are in uT and are in X, Y, Z order (min and max values).
#
# To get these values run the lis2mdl_calibrate.py script on your device.
# Twist the device around in 3D space while it calibrates. It will print
# some calibration values like these:
# ...
# Calibrating - X: -46.62, Y: -22.33, Z: -16.94 uT
# ...
# Calibration complete:
# hardiron_calibration = [[-63.5487, 33.0313], [-40.5145, 53.8293], [-43.7153, 55.5101]]
#
# You need t copy your own value for hardiron_calibration from the output and paste it
# into this script here:
hardiron_calibration = [[-61.4879, 34.4782], [-43.6714, 53.5662], [-40.7337, 52.4554]]
# This will take the magnetometer values, adjust them with the calibrations
# and return a new array with the XYZ values ranging from -100 to 100
def normalize(_magvals):
ret = [0, 0, 0]
for i, axis in enumerate(_magvals):
minv, maxv = hardiron_calibration[i]
axis = min(max(minv, axis), maxv) # keep within min/max calibration
ret[i] = (axis - minv) * 200 / (maxv - minv) + -100
return ret
while True:
magvals = sensor.magnetic
normvals = normalize(magvals)
print("magnetometer: %s -> %s" % (magvals, normvals))
# we will only use X and Y for the compass calculations, so hold it level!
compass_heading = int(math.atan2(normvals[1], normvals[0]) * 180.0 / math.pi)
# compass_heading is between -180 and +180 since atan2 returns -pi to +pi
# this translates it to be between 0 and 360
compass_heading += 180
print("Heading:", compass_heading)
time.sleep(0.1)
``` |
{
"source": "jposada202020/Adafruit_CircuitPython_TLA202x",
"score": 2
} |
#### File: Adafruit_CircuitPython_TLA202x/adafruit_tla202x/__init__.py
```python
from micropython import const
from adafruit_bus_device.i2c_device import I2CDevice
from adafruit_register.i2c_struct import ROUnaryStruct
from adafruit_register.i2c_bits import RWBits
from adafruit_register.i2c_bit import RWBit
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_TLA202x.git"
_TLA_DEFAULT_ADDRESS = const(0x48)
_DATA_REG = const(0x00)
_CONFIG_REG = const(0x01)
class CV:
"""struct helper"""
@classmethod
def add_values(cls, value_tuples):
"creates CV entires"
cls.string = {}
cls.lsb = {}
for value_tuple in value_tuples:
name, value, string, lsb = value_tuple
setattr(cls, name, value)
cls.string[value] = string
cls.lsb[value] = lsb
@classmethod
def is_valid(cls, value):
"Returns true if the given value is a member of the CV"
return value in cls.string
class DataRate(CV):
"""Options for :py:attr:`~adafruit_tla202x.TLA2024.data_rate`, to select the rate at which
samples are taken while measuring the voltage across the input pins
+-------------------------------+-------------------------+
| Rate | Measurement Rate |
+===============================+=========================+
| :py:const:`Rate.RATE_128SPS` | 128 Samples per second |
+-------------------------------+-------------------------+
| :py:const:`Rate.RATE_250SPS` | 250 Samples per second |
+-------------------------------+-------------------------+
| :py:const:`Rate.RATE_490SPS` | 490 Samples per second |
+-------------------------------+-------------------------+
| :py:const:`Rate.RATE_920SPS` | 920 Samples per second |
+-------------------------------+-------------------------+
| :py:const:`Rate.RATE_1600SPS` | 1600 Samples per second |
+-------------------------------+-------------------------+
| :py:const:`Rate.RATE_2400SPS` | 2400 Samples per second |
+-------------------------------+-------------------------+
| :py:const:`Rate.RATE_3300SPS` | 3300 Samples per second |
+-------------------------------+-------------------------+
"""
DataRate.add_values(
(
("RATE_128SPS", 0x0, 128, None),
("RATE_250SPS", 0x1, 250, None),
("RATE_490SPS", 0x2, 490, None),
("RATE_920SPS", 0x3, 920, None),
("RATE_1600SPS", 0x4, 1600, None),
("RATE_2400SPS", 0x5, 2400, None),
("RATE_3300SPS", 0x6, 3300, None),
)
)
class Mode(CV):
"""Options for :py:attr:`~adafruit_tla202x.TLA2024.mode`
+----------------------------+--------------------------------------------------------------+
| Mode | Description |
+============================+==============================================================+
| :py:const:`Mode.CONTINUOUS`| In Continuous mode, measurements are taken |
| | |
| | continuously and getting |
| | :py:attr:`~adafruit_tla202x.TLA2024.voltage` |
| | |
| | will return the latest measurement. |
+----------------------------+--------------------------------------------------------------+
| :py:const:`Mode.ONE_SHOT` | Setting the mode to :py:data:`~Mode.ONE_SHOT` takes a single |
| | |
| | measurement and then goes into a low power state. |
+----------------------------+--------------------------------------------------------------+
"""
Mode.add_values(
(
("CONTINUOUS", 0, "Continuous", None),
("ONE_SHOT", 1, "One Shot", None),
)
)
class Range(CV):
"""Options for :py:attr:`~adafruit_tla202x.TLA2024.range`, used to select the measurement range
by adjusting the gain of the internal amplifier
+--------------------------------+-------------------+------------+
| Range | Measurement Range | Resolution |
+================================+===================+============+
| :py:const:`Range.RANGE_6_144V` | ±6.144 V | 3 mV |
+--------------------------------+-------------------+------------+
| :py:const:`Range.RANGE_4_096V` | ±4.096 V | 2 mV |
+--------------------------------+-------------------+------------+
| :py:const:`Range.RANGE_2_048V` | ±2.048 V | 1 mV |
+--------------------------------+-------------------+------------+
| :py:const:`Range.RANGE_1_024V` | ±1.024 V | 0.5 mV |
+--------------------------------+-------------------+------------+
| :py:const:`Range.RANGE_0_512V` | ±0.512 V | 0.25 mV |
+--------------------------------+-------------------+------------+
"""
Range.add_values(
(
("RANGE_6_144V", 0x0, 6.144, 3),
("RANGE_4_096V", 0x1, 4.096, 2),
("RANGE_2_048V", 0x2, 2.048, 1),
("RANGE_1_024V", 0x3, 1.024, 0.5),
("RANGE_0_512V", 0x4, 0.512, 0.25),
("RANGE_0_256V", 0x5, 0.256, 0.125),
)
)
class Mux(CV):
"""Options for :py:attr:`~adafruit_tla202x.TLA2024.mux` to choose the inputs that voltage will
be measured across
+-------------------------------+--------------+--------------+
| Mux | Positive Pin | Negative Pin |
+===============================+==============+==============+
| :py:const:`Mux.MUX_AIN0_AIN1` | AIN 0 | AIN 1 |
+-------------------------------+--------------+--------------+
| :py:const:`Mux.MUX_AIN0_AIN3` | AIN 0 | AIN 3 |
+-------------------------------+--------------+--------------+
| :py:const:`Mux.MUX_AIN1_AIN3` | AIN 1 | AIN 3 |
+-------------------------------+--------------+--------------+
| :py:const:`Mux.MUX_AIN2_AIN3` | AIN 2 | AIN 3 |
+-------------------------------+--------------+--------------+
| :py:const:`Mux.MUX_AIN0_GND` | AIN 0 | GND |
+-------------------------------+--------------+--------------+
| :py:const:`Mux.MUX_AIN1_GND` | AIN 1 | GND |
+-------------------------------+--------------+--------------+
| :py:const:`Mux.MUX_AIN2_GND` | AIN 2 | GND |
+-------------------------------+--------------+--------------+
| :py:const:`Mux.MUX_AIN3_GND` | AIN 3 | GND |
+-------------------------------+--------------+--------------+
"""
Mux.add_values(
(
("MUX_AIN0_AIN1", 0x0, "AIN 0 to AIN 1", None),
("MUX_AIN0_AIN3", 0x1, "AIN 0 to AIN 3", None),
("MUX_AIN1_AIN3", 0x2, "AIN 1 to AIN 3", None),
("MUX_AIN2_AIN3", 0x3, "AIN 2 to AIN 3", None),
("MUX_AIN0_GND", 0x4, "AIN 0 to GND", None),
("MUX_AIN1_GND", 0x5, "AIN 1 to GND", None),
("MUX_AIN2_GND", 0x6, "AIN 2 to GND", None),
("MUX_AIN3_GND", 0x7, "AIN 3 to GND", None),
)
)
class TLA2024: # pylint:disable=too-many-instance-attributes
"""
I2C Interface for analog voltage measurements using the TI TLA2024 12-bit 4-channel ADC
:param i2c_bus: The I2C bus that the ADC is on.
:param int address: The I2C address for the ADC. Defaults to ~0x48
"""
_raw_adc_read = ROUnaryStruct(_DATA_REG, ">h")
_os = RWBit(_CONFIG_REG, 15, 2, lsb_first=False)
_mux = RWBits(3, _CONFIG_REG, 12, 2, lsb_first=False)
_pga = RWBits(3, _CONFIG_REG, 9, 2, lsb_first=False)
_mode = RWBit(_CONFIG_REG, 8, 2, lsb_first=False)
_data_rate = RWBits(3, _CONFIG_REG, 5, 2, lsb_first=False)
def __init__(self, i2c_bus, address=_TLA_DEFAULT_ADDRESS):
# pylint:disable=no-member
self.i2c_device = I2CDevice(i2c_bus, address)
self._last_one_shot = None
self.mode = Mode.CONTINUOUS
self.mux = Mux.MUX_AIN0_GND
# default to widest range and highest sample rate
self.data_rate = DataRate.RATE_3300SPS
self.range = Range.RANGE_6_144V
@property
def voltage(self):
"""The voltage between the two selected inputs"""
if self.mode == Mode.ONE_SHOT: # pylint:disable=no-member
return self._last_one_shot
return self._read_volts()
@property
def input_channel(self):
"""The channel to be sampled"""
return self._mux
@input_channel.setter
def input_channel(self, channel):
"""The input number to measure the voltage at, referenced to GND.
:param channel: The channel number to switch to, from 0-4"""
if channel not in range(4):
raise AttributeError("input_channel must be set to a number from 0 to 3")
self._mux = 4 + channel
@property
def mode(self):
"""The measurement mode of the sensor. Must be a :py:const:`~Mode`. See the documentation
for :py:const:`~Mode` for more information"""
return self._mode
@mode.setter
def mode(self, mode):
if not Mode.is_valid(mode):
raise AttributeError("mode must be a valid Mode")
if mode == Mode.CONTINUOUS: # pylint:disable=no-member
self._mode = mode
return
# One Shot mode; switch mode, take a measurement and store it
self._mode = mode
self._os = True
while self._os:
pass
self._last_one_shot = self._read_volts()
@property
def range(self):
"""The measurement range of the ADC, changed by adjusting the Programmable Gain Amplifier
`range` must be a :py:const:`~Range`. See the documentation for :py:const:`~Range`
for more information"""
return self._pga
@range.setter
def range(self, measurement_range):
if not Range.is_valid(measurement_range):
raise AttributeError("range must be a valid Range")
self._pga = measurement_range
@property
def data_rate(self):
"""selects the rate at which measurement samples are taken. Must be a :py:const:`~DataRate`
. See the documentation for :py:const:`~DataRate` for more information"""
return self._data_rate
@data_rate.setter
def data_rate(self, rate):
if not DataRate.is_valid(rate): # pylint:disable=no-member
raise AttributeError("data_rate must be a valid DataRate")
self._data_rate = rate
@property
def mux(self):
"""selects the inputs that voltage will be measured between. Must be a
:py:const:`~adafruit_tla202x.Mux`. See the :py:const:`~adafruit_tla202x.Mux` documentation
for more information about the available options"""
return self._mux
@mux.setter
def mux(self, mux_connection):
if not Mux.is_valid(mux_connection): # pylint:disable=no-member
raise AttributeError("mux must be a valid Mux")
self._mux = mux_connection
def read(self, channel):
"""Switch to the given channel and take a single ADC reading in One Shot mode
:param channel: The channel number to switch to, from 0-3
"""
if not self.input_channel == channel:
self.input_channel = channel
self.mode = Mode.ONE_SHOT # pylint:disable=no-member
return self._read_adc()
def _read_volts(self):
value_lsb = self._read_adc()
return value_lsb * Range.lsb[self.range] / 1000.0
def _read_adc(self):
value_lsb = self._raw_adc_read
value_lsb >>= 4
if value_lsb & (1 << 11):
value_lsb |= 0xF000
else:
value_lsb &= ~0xF000
return value_lsb
``` |
{
"source": "jposada202020/Adafruit_CircuitPython_TSL2591",
"score": 2
} |
#### File: jposada202020/Adafruit_CircuitPython_TSL2591/adafruit_tsl2591.py
```python
from micropython import const
import adafruit_bus_device.i2c_device as i2c_device
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_TSL2591.git"
# Internal constants:
_TSL2591_ADDR = const(0x29)
_TSL2591_COMMAND_BIT = const(0xA0)
_TSL2591_ENABLE_POWEROFF = const(0x00)
_TSL2591_ENABLE_POWERON = const(0x01)
_TSL2591_ENABLE_AEN = const(0x02)
_TSL2591_ENABLE_AIEN = const(0x10)
_TSL2591_ENABLE_NPIEN = const(0x80)
_TSL2591_REGISTER_ENABLE = const(0x00)
_TSL2591_REGISTER_CONTROL = const(0x01)
_TSL2591_REGISTER_DEVICE_ID = const(0x12)
_TSL2591_REGISTER_CHAN0_LOW = const(0x14)
_TSL2591_REGISTER_CHAN1_LOW = const(0x16)
_TSL2591_LUX_DF = 408.0
_TSL2591_LUX_COEFB = 1.64
_TSL2591_LUX_COEFC = 0.59
_TSL2591_LUX_COEFD = 0.86
_TSL2591_MAX_COUNT_100MS = const(36863) # 0x8FFF
_TSL2591_MAX_COUNT = const(65535) # 0xFFFF
# User-facing constants:
GAIN_LOW = 0x00 # low gain (1x)
"""Low gain (1x)"""
GAIN_MED = 0x10 # medium gain (25x)
"""Medium gain (25x)"""
GAIN_HIGH = 0x20 # medium gain (428x)
"""High gain (428x)"""
GAIN_MAX = 0x30 # max gain (9876x)
"""Max gain (9876x)"""
INTEGRATIONTIME_100MS = 0x00 # 100 millis
"""100 millis"""
INTEGRATIONTIME_200MS = 0x01 # 200 millis
"""200 millis"""
INTEGRATIONTIME_300MS = 0x02 # 300 millis
"""300 millis"""
INTEGRATIONTIME_400MS = 0x03 # 400 millis
"""400 millis"""
INTEGRATIONTIME_500MS = 0x04 # 500 millis
"""500 millis"""
INTEGRATIONTIME_600MS = 0x05 # 600 millis
"""600 millis"""
class TSL2591:
"""TSL2591 high precision light sensor.
:param ~busio.I2C i2c: The I2C bus the device is connected to
:param int address: The I2C device address. Defaults to :const:`0x29`
**Quickstart: Importing and using the device**
Here is an example of using the :class:`TSL2591` class.
First you will need to import the libraries to use the sensor
.. code-block:: python
import board
import adafruit_tsl2591
Once this is done you can define your `board.I2C` object and define your sensor object
.. code-block:: python
i2c = board.I2C() # uses board.SCL and board.SDA
sensor = adafruit_tsl2591.TSL2591(i2c)
Now you have access to the :attr:`lux`, :attr:`infrared`
:attr:`visible` and :attr:`full_spectrum` attributes
.. code-block:: python
lux = sensor.lux
infrared = sensor.infrared
visible = sensor.visible
full_spectrum = sensor.full_spectrum
"""
# Class-level buffer to reduce memory usage and allocations.
# Note this is NOT thread-safe or re-entrant by design.
_BUFFER = bytearray(2)
def __init__(self, i2c, address=_TSL2591_ADDR):
self._integration_time = 0
self._gain = 0
self._device = i2c_device.I2CDevice(i2c, address)
# Verify the chip ID.
if self._read_u8(_TSL2591_REGISTER_DEVICE_ID) != 0x50:
raise RuntimeError("Failed to find TSL2591, check wiring!")
# Set default gain and integration times.
self.gain = GAIN_MED
self.integration_time = INTEGRATIONTIME_100MS
# Put the device in a powered on state after initialization.
self.enable()
def _read_u8(self, address):
# Read an 8-bit unsigned value from the specified 8-bit address.
with self._device as i2c:
# Make sure to add command bit to read request.
self._BUFFER[0] = (_TSL2591_COMMAND_BIT | address) & 0xFF
i2c.write_then_readinto(self._BUFFER, self._BUFFER, out_end=1, in_end=1)
return self._BUFFER[0]
# Disable invalid name check since pylint isn't smart enough to know LE
# is an abbreviation for little-endian.
# pylint: disable=invalid-name
def _read_u16LE(self, address):
# Read a 16-bit little-endian unsigned value from the specified 8-bit
# address.
with self._device as i2c:
# Make sure to add command bit to read request.
self._BUFFER[0] = (_TSL2591_COMMAND_BIT | address) & 0xFF
i2c.write_then_readinto(self._BUFFER, self._BUFFER, out_end=1, in_end=2)
return (self._BUFFER[1] << 8) | self._BUFFER[0]
# pylint: enable=invalid-name
def _write_u8(self, address, val):
# Write an 8-bit unsigned value to the specified 8-bit address.
with self._device as i2c:
# Make sure to add command bit to write request.
self._BUFFER[0] = (_TSL2591_COMMAND_BIT | address) & 0xFF
self._BUFFER[1] = val & 0xFF
i2c.write(self._BUFFER, end=2)
def enable(self):
"""Put the device in a fully powered enabled mode."""
self._write_u8(
_TSL2591_REGISTER_ENABLE,
_TSL2591_ENABLE_POWERON
| _TSL2591_ENABLE_AEN
| _TSL2591_ENABLE_AIEN
| _TSL2591_ENABLE_NPIEN,
)
def disable(self):
"""Disable the device and go into low power mode."""
self._write_u8(_TSL2591_REGISTER_ENABLE, _TSL2591_ENABLE_POWEROFF)
@property
def gain(self):
"""Get and set the gain of the sensor. Can be a value of:
- ``GAIN_LOW`` (1x)
- ``GAIN_MED`` (25x)
- ``GAIN_HIGH`` (428x)
- ``GAIN_MAX`` (9876x)
"""
control = self._read_u8(_TSL2591_REGISTER_CONTROL)
return control & 0b00110000
@gain.setter
def gain(self, val):
assert val in (GAIN_LOW, GAIN_MED, GAIN_HIGH, GAIN_MAX)
# Set appropriate gain value.
control = self._read_u8(_TSL2591_REGISTER_CONTROL)
control &= 0b11001111
control |= val
self._write_u8(_TSL2591_REGISTER_CONTROL, control)
# Keep track of gain for future lux calculations.
self._gain = val
@property
def integration_time(self):
"""Get and set the integration time of the sensor. Can be a value of:
- ``INTEGRATIONTIME_100MS`` (100 millis)
- ``INTEGRATIONTIME_200MS`` (200 millis)
- ``INTEGRATIONTIME_300MS`` (300 millis)
- ``INTEGRATIONTIME_400MS`` (400 millis)
- ``INTEGRATIONTIME_500MS`` (500 millis)
- ``INTEGRATIONTIME_600MS`` (600 millis)
"""
control = self._read_u8(_TSL2591_REGISTER_CONTROL)
return control & 0b00000111
@integration_time.setter
def integration_time(self, val):
assert 0 <= val <= 5
# Set control bits appropriately.
control = self._read_u8(_TSL2591_REGISTER_CONTROL)
control &= 0b11111000
control |= val
self._write_u8(_TSL2591_REGISTER_CONTROL, control)
# Keep track of integration time for future reading delay times.
self._integration_time = val
@property
def raw_luminosity(self):
"""Read the raw luminosity from the sensor (both IR + visible and IR
only channels) and return a 2-tuple of those values. The first value
is IR + visible luminosity (channel 0) and the second is the IR only
(channel 1). Both values are 16-bit unsigned numbers (0-65535).
"""
# Read both the luminosity channels.
channel_0 = self._read_u16LE(_TSL2591_REGISTER_CHAN0_LOW)
channel_1 = self._read_u16LE(_TSL2591_REGISTER_CHAN1_LOW)
return (channel_0, channel_1)
@property
def full_spectrum(self):
"""Read the full spectrum (IR + visible) light and return its value
as a 32-bit unsigned number.
"""
channel_0, channel_1 = self.raw_luminosity
return (channel_1 << 16) | channel_0
@property
def infrared(self):
"""Read the infrared light and return its value as a 16-bit unsigned number."""
_, channel_1 = self.raw_luminosity
return channel_1
@property
def visible(self):
"""Read the visible light and return its value as a 32-bit unsigned number."""
channel_0, channel_1 = self.raw_luminosity
full = (channel_1 << 16) | channel_0
return full - channel_1
@property
def lux(self):
"""Read the sensor and calculate a lux value from both its infrared
and visible light channels.
.. note::
:attr:`lux` is not calibrated!
"""
channel_0, channel_1 = self.raw_luminosity
# Compute the atime in milliseconds
atime = 100.0 * self._integration_time + 100.0
# Set the maximum sensor counts based on the integration time (atime) setting
if self._integration_time == INTEGRATIONTIME_100MS:
max_counts = _TSL2591_MAX_COUNT_100MS
else:
max_counts = _TSL2591_MAX_COUNT
# Handle overflow.
if channel_0 >= max_counts or channel_1 >= max_counts:
message = (
"Overflow reading light channels!, Try to reduce the gain of\n "
+ "the sensor using adafruit_tsl2591.GAIN_LOW"
)
raise RuntimeError(message)
# Calculate lux using same equation as Arduino library:
# https://github.com/adafruit/Adafruit_TSL2591_Library/blob/master/Adafruit_TSL2591.cpp
again = 1.0
if self._gain == GAIN_MED:
again = 25.0
elif self._gain == GAIN_HIGH:
again = 428.0
elif self._gain == GAIN_MAX:
again = 9876.0
cpl = (atime * again) / _TSL2591_LUX_DF
lux1 = (channel_0 - (_TSL2591_LUX_COEFB * channel_1)) / cpl
lux2 = (
(_TSL2591_LUX_COEFC * channel_0) - (_TSL2591_LUX_COEFD * channel_1)
) / cpl
return max(lux1, lux2)
``` |
{
"source": "jposada202020/CircuitPython_Display_Frame",
"score": 3
} |
#### File: jposada202020/CircuitPython_Display_Frame/circuitpython_display_frame.py
```python
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/foamyguy/Circuitpython_CircuitPython_Display_Frame.git"
import displayio
import terminalio
from adafruit_display_text import bitmap_label
from adafruit_display_shapes.roundrect import RoundRect
class Frame(displayio.Group):
# pylint: disable=too-many-arguments,too-many-locals
"""
A rounded rectangle frame with a text label at the top center.
:param int x: The x-position of the top left corner.
:param int y: The y-position of the top left corner.
:param int width: The width of the rounded-corner rectangle.
:param int height: The height of the rounded-corner rectangle.
:param int corner_radius: The radius of the rounded corner.
:param str text: Text to display
:param Font font: A font class that has ``get_bounding_box`` and ``get_glyph``.
Defaults to terminalio.FONT
:param outline: The outline of the rounded-corner rectangle. Can be a hex value for a color.
:param stroke: Used for the outline. Will not change the outer bound size set by ``width`` and
``height``.
"""
LABEL_ALIGN_RIGHT = 2
LABEL_ALIGN_CENTER = 1
LABEL_ALIGN_LEFT = 0
def __init__(
self,
x,
y,
width,
height,
corner_radius=10,
text="Frame",
font=terminalio.FONT,
outline=0xFFFFFF,
text_color=None,
background_color=0x0,
stroke=2,
top_label=True,
label_align=LABEL_ALIGN_LEFT,
):
super().__init__(x=x, y=y)
roundrect = RoundRect(
0,
0,
width,
height,
corner_radius,
fill=None,
outline=outline,
stroke=stroke,
)
self.append(roundrect)
if outline and not text_color:
text_color = outline
self.label = bitmap_label.Label(
font,
text=text,
color=text_color,
background_color=background_color,
padding_left=2,
padding_right=1,
)
self.label_align = label_align
self.top_label = top_label
if self.label.bounding_box[2] * 2 < width - (corner_radius * 2):
self.label.scale = 2
if top_label:
_anchored_pos_y = 0
else:
_anchored_pos_y = height - 6
if label_align == Frame.LABEL_ALIGN_CENTER:
_anchor_x = 0.5
_anchored_pos_x = width // 2
elif label_align == Frame.LABEL_ALIGN_RIGHT:
_anchor_x = 1.0
_anchored_pos_x = width - corner_radius
else: # label_align == Frame.LABEL_ALIGN_LEFT:
_anchor_x = 0
_anchored_pos_x = corner_radius
self.label.anchor_point = (_anchor_x, 0.5)
self.label.anchored_position = (_anchored_pos_x, _anchored_pos_y)
self.append(self.label)
``` |
{
"source": "jposada202020/CircuitPython_equalizer",
"score": 2
} |
#### File: CircuitPython_equalizer/equalizer/equalizer.py
```python
import displayio
try:
import adafruit_fancyled.adafruit_fancyled as fancy
except ImportError:
pass
from adafruit_displayio_layout.widgets.widget import Widget
from equalizer import rectangle_helper
from equalizer import rgb
class Equalizer(Widget):
"""An equalizer widget. The origin is set using ``x`` and ``y``.
:param int x: x position of the plane origin
:param int y: y position of the plane origin
:param int width: requested width, in pixels.
:param int height: requested height, in pixels.
:param int background_color: background color to use defaults to black (0x000000)
:para int number_bars: number of bar in the equalizer. Defaults to 1.
:para bool bar_best_fit: Allows selecting the best fit for the bar in the given width
:param int bar_width: width in pixels of the equalizers bar. Defaults to 10.
:param int pad_x: pixels number to move the bars to the right
:param int number_segments: number of segments in each bar
:param int segments_height: height in pixel of each bar equalizer segment
:param bool seg_best_fit: When True it will calculate segment height automatically
Defaults to True.
**Quickstart: Importing and using Equalizer**
Here is one way of importing the `Equalizer` class so you can use it as
the name ``Equal``:
.. code-block:: python
from adafruit_displayio_layout.widgets.cartesian import Equalizer as Equal
Now you can create an equalizer at pixel position x=20, y=30 using:
.. code-block:: python
my_equalizer=Equal(x=20, y=30) # instance the equalizer at x=20, y=30
Once you setup your display, you can now add ``my_equalizer`` to your display using:
.. code-block:: python
display.show(my_equalizer) # add the group to the display
If you want to have multiple display elements, you can create a group and then
append the plane and the other elements to the group. Then, you can add the full
group to the display as in this example:
.. code-block:: python
my_equalizer= Equal(20, 30) # instance the equalizer at x=20, y=30
my_group = displayio.Group(max_size=10) # make a group that can hold 10 items
my_group.append(my_equalizer) # Add my_equalizer to the group
#
# Append other display elements to the group
#
display.show(my_group) # add the group to the display
**Summary: Cartesian Features and input variables**
The `cartesian` widget has some options for controlling its position, visible appearance,
and scale through a collection of input variables:
- **position**: ``x``, ``y``
- **size**: ``width`` and ``height``
- **color**: color is controlled internally, to ease the use of the widget
- **background color**: ``background_color``
- **range**: range is controlled internally to ease the use of the widget and is set
to 100. To have other ranges, normalize your values first and the pass them to the
updater.
.. figure:: equalizer.gif
:scale: 100 %
:figwidth: 50%
:align: center
:alt: Picture of the equalizer widget in motion.
This shows the equalizer capabilities.
"""
def __init__(
self,
background_color: int = 0x000000,
number_bars: int = 1,
bar_best_fit: bool = True,
bar_width: int = 10,
pad_x: int = 0,
number_segments: int = 2,
segments_height: int = 10,
seg_best_fit: bool = True,
**kwargs,
) -> None:
# TODO Segment display [✓]
# TODO SEGMENT level logic [✓]
# TODO Animation function [✓]
# TODO Animated Gifs [✓]
# TODO SimpleTest example [✓]
# TODO Features docs [✓]
# TODO PNGs [✓]
# TODO Type Annotations [✓]
# TODO API example inclusion [✓]
# TODO API RST inclusion [✓]
# TODO Black Pylint example [✓]
# TODO Black Pylint __init__ [✓]
super().__init__(**kwargs, max_size=3)
self._background_color = background_color
if self.width < 42:
print("Equalizer minimum width is 40. Defaulting to 40")
self._width = 40
self._number_bars = number_bars
self._bar_width = bar_width
self._pad_x = pad_x
self._bar_best_fit = bar_best_fit
self._number_segments = number_segments
self._segments_height = segments_height
self._seg_best_fit = seg_best_fit
self._screen_bitmap = displayio.Bitmap(self.width, self.height, 5)
self._screen_bitmap.fill(10)
self._screen_palette = displayio.Palette(11)
self._screen_palette[10] = self._background_color
self._bar_inventory = list()
self._segment_inventory = list()
self._hor_bar_setup()
self._screen_tilegrid = displayio.TileGrid(
self._screen_bitmap,
pixel_shader=self._screen_palette,
x=0,
y=0,
)
self.append(self._screen_tilegrid)
def _hor_bar_setup(self):
if self._bar_best_fit:
self._bar_width = (
self.width - 2 * (self._number_bars + 1)
) // self._number_bars
else:
total_width = self._number_bars * self._bar_width + (
(self._number_bars + 1) * 2
)
if total_width > self.width:
print("Equalizer setup could not be displayed. Adjusting bar widths")
self._bar_width = (
self.width - ((self._number_bars + 1) * 2)
) // self._number_bars
widths_bars = self._number_bars * self._bar_width
width_free = self.width - widths_bars
separationx = width_free // (self._number_bars + 1)
x_local = separationx + self._pad_x
if self._seg_best_fit:
self._segments_height = (self.height - 2) // self._number_segments
else:
total_height = self._number_segments * self._segments_height + 6
if total_height > self.height:
print(
"Equalizer setup could not be displayed. Adjusting segments heights"
)
self._segments_height = (
self.height - ((self._number_segments + 1) * 2)
) // self._number_segments
heights_segs = self._number_segments * self._segments_height
height_free = self.height - heights_segs
self._separationy = height_free // (self._number_segments + 1)
for col in range(self._number_bars):
self._bar_inventory.append((col, x_local))
x_local = x_local + separationx + self._bar_width
y_local = self.height - self._separationy - self._segments_height
delta = 100 // self._number_segments
trigger_value = 0
for row in range(self._number_segments):
self._segment_inventory.append((row, y_local, trigger_value, 0))
y_local = y_local - self._separationy - self._segments_height
trigger_value = trigger_value + delta
for i, item in enumerate(self._segment_inventory):
prgb_color = rgb(item[1], 0, 100)
color_buffer = fancy.CRGB(prgb_color[0], prgb_color[1], prgb_color[2])
self._screen_palette[i] = color_buffer.pack()
def show_bars(self, values) -> None:
"""
:parm values: Tuple of values to update the equlizer bars
"""
for j, element in enumerate(self._segment_inventory):
for i, _ in enumerate(self._bar_inventory):
if element[2] < values[i]:
rectangle_helper(
self._bar_inventory[i][1],
self._segment_inventory[j][1],
self._segments_height,
self._bar_width,
self._screen_bitmap,
j,
self._screen_palette,
)
else:
rectangle_helper(
self._bar_inventory[i][1],
self._segment_inventory[j][1],
self._segments_height,
self._bar_width,
self._screen_bitmap,
10,
self._screen_palette,
)
``` |
{
"source": "jposada202020/CircuitPython_scales",
"score": 3
} |
#### File: jposada202020/CircuitPython_scales/scales.py
```python
import displayio
import terminalio
from adafruit_display_text.bitmap_label import Label
from vectorio import VectorShape, Polygon, Rectangle
try:
from typing import Tuple
except ImportError:
pass
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/jposada202020/CircuitPython_scales.git"
# pylint: disable=too-many-instance-attributes, too-many-arguments, too-few-public-methods
class Axes(displayio.Group):
"""
:param int x: pixel position. Defaults to :const:`0`
:param int y: pixel position. Defaults to :const:`0`
:param int,int limits: tuple of value range for the scale. Defaults to (0, 100)
:param int divisions: Divisions number
:param str direction: direction of the scale either :attr:`horizontal` or :attr:`vertical`
defaults to :attr:`horizontal`
:param int stroke: width in pixels of the scale axes. Defaults to :const:`3`
:param int length: scale length in pixels. Defaults to :const:`100`
:param int color: 24-bit hex value axes line color, Defaults to Purple :const:`0x990099`
"""
def __init__(
self,
x: int = 0,
y: int = 0,
limits: Tuple[int, int] = (0, 100),
divisions: int = 10,
direction: str = "horizontal",
stroke: int = 3,
length: int = 100,
color: int = 0x990099,
):
super().__init__(max_size=1)
self.x = x
self.y = y
self.limits = limits
self.divisions = divisions
if direction == "horizontal":
self.direction = True
else:
self.direction = False
self.stroke = stroke
self.length = length
self._palette = displayio.Palette(2)
self._palette.make_transparent(0)
self._palette[1] = color
self._tick_length = None
self._tick_stroke = None
self.ticks = None
self.text_ticks = None
def _draw_line(self):
"""Private function to draw the Axe.
:return: None
"""
if self.direction:
self.append(rectangle_draw(0, 0, self.stroke, self.length, self._palette))
else:
self.append(
rectangle_draw(0, -self.length, self.length, self.stroke, self._palette)
)
# pylint: disable=invalid-unary-operand-type
def _draw_ticks(self, tick_length: int = 10, tick_stroke: int = 4):
"""Private function to draw the ticks
:param int tick_length: tick length in pixels
:param int tick_stroke: tick thickness in pixels
:return: None
"""
self._tick_length = tick_length
self._tick_stroke = tick_stroke
self._conversion()
if self.direction:
for val in self.ticks[:-1]:
self.append(
rectangle_draw(
val - 1, -self._tick_length, self._tick_length, 3, self._palette
)
)
else:
for val in self.ticks[:-1]:
self.append(
rectangle_draw(0, -val, 3, self._tick_length, self._palette)
)
def _conversion(self):
"""Private function that creates the ticks distance and text.
:return: None
"""
self.ticks = list()
self.text_ticks = list()
espace = round(self.length / self.divisions)
rang_discrete = self.limits[1] - self.limits[0]
factorp = self.length / rang_discrete
for i in range(espace, self.length + 1, espace):
self.ticks.append(i)
self.text_ticks.append(str(int(self.limits[0] + i * 1 / factorp)))
def _draw_text(self):
"""Private function to draw the text, uses values found in ``_conversion``
:return: None
"""
index = 0
separation = 20
font_width = 12
if self.direction:
for tick_text in self.text_ticks[:-1]:
dist_x = self.ticks[index] - font_width // 2
dist_y = separation // 2
tick_label = Label(terminalio.FONT, text=tick_text, x=dist_x, y=dist_y)
self.append(tick_label)
index = index + 1
else:
for tick_text in self.text_ticks[:-1]:
dist_x = -separation
dist_y = -self.ticks[index]
tick_label = Label(terminalio.FONT, text=tick_text, x=dist_x, y=dist_y)
self.append(tick_label)
index = index + 1
class Scale(Axes):
"""
:param int x: pixel position. Defaults to :const:`0`
:param int y: pixel position. Defaults to :const:`0`
:param str direction: direction of the scale either :attr:`horizontal` or :attr:`vertical`
defaults to :attr:`horizontal`
:param int stroke: width in pixels of the scale axes. Defaults to 3
:param int length: scale length in pixels. Defaults to 100
that extends the touch response boundary, defaults to 0
:param int color: 24-bit hex value axes line color, Defaults to purple :const:`0x990099`
:param int width: scale width in pixels. Defaults to :const:`50`
:param limits: tuple of value range for the scale. Defaults to :const:`(0, 100)`
:param int divisions: Divisions number
:param int back_color: 24-bit hex value axes line color.
Defaults to Light Blue :const:`0x9FFFFF`
:param int tick_length: Scale tick length in pixels. Defaults to :const:`10`
:param int tick_stroke: Scale tick width in pixels. Defaults to :const:`4`
**Quickstart: Importing and using Scales**
Here is one way of importing the `Scale` class so you can use it as
the name ``my_scale``:
.. code-block:: python
from Graphics import Scale
Now you can create an vertical Scale at pixel position x=50, y=180 with 3 divisions and a range
of 0 to 80 using:
.. code-block:: python
my_scale = Scale(x=50, y=180, direction="vertical", divisions=3, limits=(0, 80))
Once you setup your display, you can now add ``my_scale`` to your display using:
.. code-block:: python
display.show(my_scale)
If you want to have multiple display elements, you can create a group and then
append the scale and the other elements to the group. Then, you can add the full
group to the display as in this example:
.. code-block:: python
my_scale= Scale(x=20, y=30)
my_group = displayio.Group(max_size=10) # make a group that can hold 10 items
my_group.append(my_scale) # Add my_slider to the group
#
# Append other display elements to the group
#
display.show(my_group) # add the group to the display
**Summary: Slider Features and input variables**
The `Scale` class has some options for controlling its position, visible appearance,
and value through a collection of input variables:
- **position**: :attr:`x``, :attr:`y`
- **size**: :attr:`length` and :attr:`width`
- **color**: :attr:`color`, :attr:`back_color`
- **linewidths**: :attr:`stroke` and :attr:`tick_stroke`
- **value**: Set :attr:`value` to the initial value (`True` or `False`)
- **range and divisions**: :attr:`limits` and :attr:`divisions`
.. figure:: scales.png
:scale: 100 %
:align: center
:alt: Diagram of scales
Diagram showing a simple scale.
"""
def __init__(
self,
x: int = 0,
y: int = 0,
direction: str = "horizontal",
stroke: int = 3,
length: int = 100,
color: int = 0x990099,
width: int = 50,
limits: Tuple[int, int] = (0, 100),
divisions: int = 10,
back_color: int = 0x9FFFFF,
tick_length: int = 10,
tick_stroke: int = 4,
):
super().__init__(
x=x,
y=y,
direction=direction,
stroke=stroke,
length=length,
limits=limits,
divisions=divisions,
color=color,
)
self._width = width
self._back_color = back_color
self._draw_background()
self._draw_line()
self._draw_ticks()
self._tick_length = tick_length
self._tick_stroke = tick_stroke
self.pointer = None
self._draw_text()
self._draw_pointer()
def _draw_background(self):
"""Private function to draw the background for the scale
:return: None
"""
back_palette = displayio.Palette(2)
back_palette.make_transparent(0)
back_palette[1] = self._back_color
if self.direction:
self.append(
rectangle_draw(0, -self._width, self._width, self.length, back_palette)
)
else:
self.append(
rectangle_draw(0, -self.length, self.length, self._width, back_palette)
)
def _draw_pointer(
self,
color: int = 0xFF0000,
val_ini: int = 15,
space: int = 3,
pointer_length: int = 20,
pointer_stroke: int = 6,
):
"""Private function to initial draw the pointer.
:param int color: 24-bit hex value axes line color. Defaults to red :const:`0xFF0000`
:param int val_ini: initial value to draw the pointer
:param int space: separation in pixels from the ticker to the pointer.
Defaults to :const:`3`
:param int pointer_length: length in pixels for the point. Defaults to :const:`20`
:param int pointer_stroke: pointer thickness in pixels. Defaults to :const:`6`
:return: None
"""
pointer_palette = displayio.Palette(2)
pointer_palette.make_transparent(0)
pointer_palette[1] = color
self._pointer_length = pointer_length
self._space = space
self._pointer_stroke = pointer_stroke
if self.direction:
self.pointer = Polygon(
points=[
(
self.x - self._pointer_stroke // 2 + val_ini,
self.y - self.stroke - self._tick_length - self._space,
),
(
self.x - self._pointer_stroke // 2 + val_ini,
self.y
- self.stroke
- self._tick_length
- self._space
- self._pointer_length,
),
(
self.x + self._pointer_stroke // 2 + val_ini,
self.y
- self.stroke
- self._tick_length
- self._space
- self._pointer_length,
),
(
self.x + self._pointer_stroke // 2 + val_ini,
self.y - self.stroke - self._tick_length - self._space,
),
]
)
else:
self.pointer = Polygon(
points=[
(
self.stroke + self._tick_length + space,
self.y + self._pointer_stroke // 2 - val_ini,
),
(
self.stroke
+ self._tick_length
+ self._space
+ self._pointer_length,
self.y + self._pointer_stroke // 2 - val_ini,
),
(
self.stroke
+ self._tick_length
+ self._space
+ self._pointer_length,
self.y - self._pointer_stroke // 2 - val_ini,
),
(
self.stroke + self._tick_length + self._space,
self.y - self._pointer_stroke // 2 - val_ini,
),
]
)
pointer_shape = VectorShape(
shape=self.pointer,
pixel_shader=pointer_palette,
x=0,
y=-self.y,
)
self.append(pointer_shape)
def animate_pointer(self, value):
"""Public function to animate the pointer
:param value: value to draw the pointer
:return: None
"""
if self.direction:
self.pointer.points = [
(
self.x - self._pointer_stroke // 2 + value,
self.y - self.stroke - self._tick_length - self._space,
),
(
self.x - self._pointer_stroke // 2 + value,
self.y
- self.stroke
- self._tick_length
- self._space
- self._pointer_length,
),
(
self.x + self._pointer_stroke // 2 + value,
self.y
- self.stroke
- self._tick_length
- self._space
- self._pointer_length,
),
(
self.x + self._pointer_stroke // 2 + value,
self.y - self.stroke - self._tick_length - self._space,
),
]
else:
self.pointer.points = [
(
self.stroke + self._tick_length + self._space,
self.y + self._pointer_stroke // 2 - value,
),
(
self.stroke
+ self._tick_length
+ self._space
+ self._pointer_length,
self.y + self._pointer_stroke // 2 - value,
),
(
self.stroke
+ self._tick_length
+ self._space
+ self._pointer_length,
self.y - self._pointer_stroke // 2 - value,
),
(
self.stroke + self._tick_length + self._space,
self.y - self._pointer_stroke // 2 - value,
),
]
# pylint: disable=invalid-name
def rectangle_draw(x0: int, y0: int, height: int, width: int, palette):
"""rectangle_draw function
Draws a rectangle using or `vectorio.Rectangle`
:param int x0: rectangle lower corner x position
:param int y0: rectangle lower corner y position
:param int width: rectangle upper corner x position
:param int height: rectangle upper corner y position
:param `~displayio.Palette` palette: palette object to be used to draw the rectangle
"""
rect = Rectangle(width, height)
return VectorShape(shape=rect, pixel_shader=palette, x=x0, y=y0)
``` |
{
"source": "jposada202020/CircuitPython_slider",
"score": 2
} |
#### File: jposada202020/CircuitPython_slider/slider.py
```python
from adafruit_display_shapes.roundrect import RoundRect
from adafruit_displayio_layout.widgets.widget import Widget
from adafruit_displayio_layout.widgets.control import Control
from adafruit_displayio_layout.widgets.easing import quadratic_easeinout as easing
try:
from typing import Tuple
except ImportError:
pass
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_DisplayIO_Layout.git"
class Slider(Widget, Control):
"""
:param int x: pixel position, defaults to 0
:param int y: pixel position, defaults to 0
:param int width: width of the slider in pixels. It is recommended to use 100
the height will auto-size relative to the width, defaults to 100
:param int height: height of the slider in pixels, defaults to 40 pixels
:param int touch_padding: the width of an additional border surrounding the switch
that extends the touch response boundary, defaults to 0
:param anchor_point: starting point for the annotation line, where ``anchor_point`` is
an (A,B) tuple in relative units of the size of the widget, for example (0.0, 0.0) is
the upper left corner, and (1.0, 1.0) is the lower right corner of the widget.
If ``anchor_point`` is `None`, then ``anchored_position`` is used to set the
annotation line starting point, in widget size relative units (default is (0.0, 0.0)).
:type anchor_point: Tuple[float, float]
:param anchored_position: pixel position starting point for the annotation line
where ``anchored_position`` is an (x,y) tuple in pixel units relative to the
upper left corner of the widget, in pixel units (default is None).
:type anchored_position: Tuple[int, int]
:param fill_color: (*RGB tuple or 24-bit hex value*) slider fill color, default
is ``(66, 44, 66)`` gray.
:param outline_color: (*RGB tuple or 24-bit hex value*) slider outline color,
default is ``(30, 30, 30)`` dark gray.
:param background_color: (*RGB tuple or 24-bit hex value*) background color,
default is ``(255, 255, 255)`` white
:param int switch_stroke: outline stroke width for the switch and background, in pixels,
default is 2
:param Boolean value: the initial value for the switch, default is False
**Quickstart: Importing and using Slider**
Here is one way of importing the ``Slider`` class so you can use it as
the name ``Slider``:
.. code-block:: python
from adafruit_displayio_layout.widgets.slider import Slider
Now you can create an Slider at pixel position x=20, y=30 using:
.. code-block:: python
my_slider=Slider(x=20, y=30)
Once you setup your display, you can now add ``my_slider`` to your display using:
.. code-block:: python
display.show(my_slider) # add the group to the display
If you want to have multiple display elements, you can create a group and then
append the slider and the other elements to the group. Then, you can add the full
group to the display as in this example:
.. code-block:: python
my_slider= Slider(20, 30)
my_group = displayio.Group(max_size=10) # make a group that can hold 10 items
my_group.append(my_slider) # Add my_slider to the group
#
# Append other display elements to the group
#
display.show(my_group) # add the group to the display
**Summary: Slider Features and input variables**
The ``Slider`` widget has some options for controlling its position, visible appearance,
and value through a collection of input variables:
- **position**: ``x``, ``y`` or ``anchor_point`` and ``anchored_position``
- **size**: ``width`` and ``height`` (recommend to leave ``height`` = None to use
preferred aspect ratio)
- **switch color**: ``fill_color``, ``outline_color``
- **background color**: ``background_color``
- **linewidths**: ``switch_stroke``
- **value**: Set ``value`` to the initial value (True or False)
- **touch boundaries**: ``touch_padding`` defines the number of additional pixels
surrounding the switch that should respond to a touch. (Note: The ``touch_padding``
variable updates the ``touch_boundary`` Control class variable. The definition of
the ``touch_boundary`` is used to determine the region on the Widget that returns
`True` in the `when_inside` function.)
**The Slider Widget**
.. figure:: slider.png
:scale: 100 %
:figwidth: 80%
:align: center
:alt: Diagram of the slider widget.
This is a diagram of a slider with component parts
"""
# pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-locals
# pylint: disable=too-many-branches, too-many-statements
def __init__(
self,
x: int = 0,
y: int = 0,
width: int = 100, # recommend to default to
height: int = 40,
touch_padding: int = 0,
anchor_point: Tuple[int, int] = None,
anchored_position: Tuple[int, int] = None,
fill_color: Tuple[int, int, int] = (66, 44, 66),
outline_color: Tuple[int, int, int] = (30, 30, 30),
background_color: Tuple[int, int, int] = (255, 255, 255),
value: bool = False,
**kwargs,
):
Widget.__init__(
self, x=x, y=y, height=height, width=width, **kwargs, max_size=4
)
Control.__init__(self)
self._knob_width = height // 2
self._knob_height = height
self._knob_x = self._knob_width
self._knob_y = self._knob_height
self._slider_height = height // 5
self._height = self.height
# pylint: disable=access-member-before-definition)
if self._width is None:
self._width = 50
else:
self._width = self.width
self._fill_color = fill_color
self._outline_color = outline_color
self._background_color = background_color
self._switch_stroke = 2
self._touch_padding = touch_padding
self._value = value
self._anchor_point = anchor_point
self._anchored_position = anchored_position
self._create_slider()
def _create_slider(self):
# The main function that creates the switch display elements
self._x_motion = self._width
self._y_motion = 0
self._frame = RoundRect(
x=0,
y=0,
width=self.width,
height=self.height,
r=4,
fill=0x990099,
outline=self._outline_color,
stroke=self._switch_stroke,
)
self._switch_handle = RoundRect(
x=0,
y=0,
width=self._knob_width,
height=self._knob_height,
r=4,
fill=self._fill_color,
outline=self._outline_color,
stroke=self._switch_stroke,
)
self._switch_roundrect = RoundRect(
x=2,
y=self.height // 2 - self._slider_height // 2,
r=2,
width=self._width - 4,
height=self._slider_height,
fill=self._background_color,
outline=self._background_color,
stroke=self._switch_stroke,
)
self._bounding_box = [
0,
0,
self.width,
self._knob_height,
]
self.touch_boundary = [
self._bounding_box[0] - self._touch_padding,
self._bounding_box[1] - self._touch_padding,
self._bounding_box[2] + 2 * self._touch_padding,
self._bounding_box[3] + 2 * self._touch_padding,
]
self._switch_initial_x = self._switch_handle.x
self._switch_initial_y = self._switch_handle.y
for _ in range(len(self)):
self.pop()
self.append(self._frame)
self.append(self._switch_roundrect)
self.append(self._switch_handle)
self._update_position()
def _get_offset_position(self, position):
x_offset = int(self._x_motion * position // 2)
y_offset = int(self._y_motion * position)
return x_offset, y_offset
def _draw_position(self, position):
# apply the "easing" function to the requested position to adjust motion
position = easing(position)
# Get the position offset from the motion function
x_offset, y_offset = self._get_offset_position(position)
# Update the switch and text x- and y-positions
self._switch_handle.x = self._switch_initial_x + x_offset
self._switch_handle.y = self._switch_initial_y + y_offset
def when_selected(self, touch_point):
"""
Manages internal logic when widget is selected
"""
touch_x = touch_point[0] - self.x
touch_y = touch_point[1] - self.y
self._switch_handle.x = touch_x
super().selected((touch_x, touch_y, 0))
return self._switch_handle.x
def when_inside(self, touch_point):
"""Checks if the Widget was touched.
:param touch_point: x,y location of the screen, in absolute display coordinates.
:return: Boolean
"""
touch_x = (
touch_point[0] - self.x
) # adjust touch position for the local position
touch_y = touch_point[1] - self.y
return super().contains((touch_x, touch_y, 0))
@property
def value(self):
"""The current switch value (Boolean).
:return: Boolean
"""
return self._value
``` |
{
"source": "jposada202020/microplot",
"score": 3
} |
#### File: microplot/adafruit/bittest.py
```python
import board
import digitalio
import busio
import adafruit_sdcard
import storage
from adafruit_bitmapsaver import save_pixels
def save():
spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
cs = digitalio.DigitalInOut(board.SD_CS)
sdcard = adafruit_sdcard.SDCard(spi, cs)
vfs = storage.VfsFat(sdcard)
storage.mount(vfs, "/sd")
print("Taking Screenshot...")
save_pixels("/sd/screenshot.bmp")
print("Screenshot taken")
```
#### File: microplot/adafruit/scatter-save.py
```python
from plotter import Plotter
from plots import ScatterPlot
import board
import digitalio
import busio
import adafruit_sdcard
import storage
from adafruit_bitmapsaver import save_pixels
def plot():
data = [[(20, 30), (40, 50), (10, 90), (60, 60)], [(10, 25), (45, 65)], [(33, 70)]]
splot = ScatterPlot(data, 'Scatter Plot')
plotter = Plotter()
splot.plot(plotter)
def save():
spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
cs = digitalio.DigitalInOut(board.SD_CS)
sdcard = adafruit_sdcard.SDCard(spi, cs)
vfs = storage.VfsFat(sdcard)
storage.mount(vfs, "/sd")
save_pixels("/sd/splot.bmp")
plot()
save()
print('done')
```
#### File: microplot/shared/abstract_plotter.py
```python
from bmp import MonoBitmapWriter
class Frame:
def __init__(self, *specs):
self.width, self.height, self.tm, self.bm, self.lm, self.rm = specs
def bottom(self):
return self.height - self.bm
def y_span(self):
return self.bottom() - self.tm
def right(self):
return self.width - self.rm
class AbstractPlotter:
def __init__(self, frame: Frame = None):
self.frame = frame if frame else self.default_frame()
self.pen = None
def line(self, x1, y1, x2, y2, color=None):
if color is not None:
self.set_pen(color)
x1 = round(x1)
y1 = round(y1)
x2 = round(x2)
y2 = round(y2)
for (x, y) in self.bresenham(x1, y1, x2, y2):
self.display_pixel(x, y)
def width(self) -> int:
return self.frame.width
def height(self) -> int:
return self.frame.height
def text(self, x, y, text):
pass
def default_frame(self) -> Frame:
pass
def show(self):
pass
def get_pixel(self, x, y) -> tuple:
pass
def set_pen(self, color: tuple):
self.pen = color
def circle(self, x, y, r, color):
pass
def display_pixel(self, x, y):
pass
def write_mono_bitmap(self, file_name):
with MonoBitmapWriter(file_name, self.frame.width, self.frame.height) as mbw:
bytes_in_row = self.frame.width // 8
row_bytes = bytearray(bytes_in_row)
for i in range(self.frame.height):
for j in range(bytes_in_row):
row_bytes[j] = 0
for k in range(8):
x = k + 8 * j
y = self.frame.height - (i + 1)
bit = ((0,0,0) != self.get_pixel(x, y))
row_bytes[j] |= bit << (7 - k)
mbw.add_row(row_bytes)
"""Implementation of Bresenham's line drawing algorithm
See en.wikipedia.org/wiki/Bresenham's_line_algorithm
Code from https://github.com/encukou/bresenham
"""
def bresenham(self, x0, y0, x1, y1):
"""Yield integer coordinates on the line from (x0, y0) to (x1, y1).
Input coordinates should be integers.
The result will contain both the start and the end point.
"""
dx = x1 - x0
dy = y1 - y0
xsign = 1 if dx > 0 else -1
ysign = 1 if dy > 0 else -1
dx = abs(dx)
dy = abs(dy)
if dx > dy:
xx, xy, yx, yy = xsign, 0, 0, ysign
else:
dx, dy = dy, dx
xx, xy, yx, yy = 0, ysign, xsign, 0
D = 2 * dy - dx
y = 0
for x in range(dx + 1):
yield x0 + x * xx + y * yx, y0 + x * xy + y * yy
if D >= 0:
y += 1
D -= 2 * dx
D += 2 * dy
```
#### File: microplot/shared/bmp.py
```python
import math, struct, os
BF_TYPE = b'BM'
"""
Create and write out a Monochrome Bitmap File.
File is built from BITMAPFILEHEADER, BITMAPINFOHEADER, RGBQUAD table, scanlines
"""
def mult(m, n):
return m * ((n+m-1) // m)
def padding_length(w):
return (-w) % 4
class BitmapWriter:
def __init__(self, file_name, width=240, height=240):
self._bmf = None
self.file_name = file_name
self.width = width
self.height = height
def __enter__(self):
if self.file_name in os.listdir():
os.remove(self.file_name)
self._bmf = open(self.file_name,'wb')
self.write_header(self.width, self.height)
width_in_bytes = self.width // 8
self.padding = bytearray([0] * padding_length(width_in_bytes))
return self
def add_row(self, row):
self._bmf.write(row+self.padding)
def __exit__(self, et, val, tb):
self._bmf.close()
def write_header(self, width, height, biBitCount=b'\x01\x00', bfOffBits=b'\x20\x00\x00\x00'):
n = mult(4,width) * height + 0x20
self.write_bitmap_file_header(bfOffBits, n)
self.write_bitmap_info_header(biBitCount, height, width)
self.write_mono_rgb_triples()
def write_mono_rgb_triples(self):
rgbBlack = b'\xff\xff\xff'
rgbWhite = b'\x00\x00\x00'
self.write(rgbBlack,
rgbWhite)
def write_bitmap_info_header(self, biBitCount, height, width):
self.write(b'\x0C\x00\x00\x00',
struct.pack("<H", width),
struct.pack("<H", height),
b'\x01\x00',
biBitCount)
def write_bitmap_file_header(self, bfOffBits, n):
self.write(b"BM",
struct.pack("<I", n),
b'\x00\x00',
b'\x00\x00',
bfOffBits)
def write(self, *items):
for item in items:
self._bmf.write(item)
class MonoBitmapWriter(BitmapWriter):
pass
```
#### File: microplot/shared/demo_multi.py
```python
import math
from plotter import Plotter
from plots import LinePlot
from bitmap_saver import save_pixels
def row(i):
offset = [0, 45, 90, 135, 180][i]
return list(math.sin(math.radians(x + offset))
for x in range(0, 361, 5))
data = list(row(i) for i in range(5))
plot = LinePlot(data,'Muli-line plot')
plotter = Plotter()
plot.plot(plotter)
``` |
{
"source": "jposada202020/SnT-Badge",
"score": 3
} |
#### File: SnT-Badge/src/ICM20602.py
```python
import adafruit_bus_device.i2c_device as i2c_device
def twos_comp(val, bits):
"""compute the 2's complement of int value val"""
if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255
val = val - (1 << bits) # compute negative value
return val
class ICM20602:
_BUFFER = bytearray(6)
def __init__(self, i2c, addr):
self._sensor = i2c_device.I2CDevice(i2c, addr)
print("Initializing")
self._write_u8(0x6B, 0x01) #self._sensor.write(b'\x6B\x01')
print("Config")
self._write_u8(0x1A, 0x01) #self._sensor.write(b'\x1A\x01')
@property
def gyro(self):
xhigh = self._read_u8(0x43)
xlow = self._read_u8(0x44)
valx = xhigh << 8 | xlow
valx = twos_comp(valx, 16)
valx = valx / 131
yhigh = self._read_u8(0x45)
ylow = self._read_u8(0x46)
valy = yhigh << 8 | ylow
valy = twos_comp(valy, 16)
valy = valy / 131
zhigh = self._read_u8(0x47)
zlow = self._read_u8(0x48)
valz = zhigh << 8 | zlow
valz = twos_comp(valz, 16)
valz = valz / 131
return (valx, valy, valz)
@property
def acceleration(self):
xhigh = self._read_u8(0x3b)
xlow = self._read_u8(0x3c)
yhigh = self._read_u8(0x3d)
ylow = self._read_u8(0x3e)
zhigh = self._read_u8(0x3f)
zlow = self._read_u8(0x40)
valx = xhigh << 8 | xlow
valx = twos_comp(valx, 16)
valx = valx / 16384 * 9.8
valy = yhigh << 8 | ylow
valy = twos_comp(valy, 16)
valy = valy / 16384 * 9.8
valz = zhigh << 8 | zlow
valz = twos_comp(valz, 16)
valz = valz / 16384 * 9.8
return (valx, valy, valz)
def _read_u8(self, address):
with self._sensor as i2c:
self._BUFFER[0] = address & 0xFF
i2c.write_then_readinto(
self._BUFFER, self._BUFFER, out_end=1, in_start=1, in_end=2
)
return self._BUFFER[1]
def _read_bytes(self, address, count, buf):
with _self.sensor as i2c:
buf[0] = address & 0xFF
i2c.write_then_readinto(buf, buf, out_end=1, in_end=count)
def _write_u8(self, address, val):
with self._sensor as i2c:
self._BUFFER[0] = address & 0xFF
self._BUFFER[1] = val & 0xFF
i2c.write(self._BUFFER, end=2)
``` |
{
"source": "jposchel/trusty-brusher-python",
"score": 4
} |
#### File: jposchel/trusty-brusher-python/food.py
```python
class Food:
"""
A Food eaten by a Person.
self.food_type: string value indicating type of food
self.decay_value: int value indicating propensity of food to cause tooth decay
Higher value indicates higher propensity; lower value indicates lower propensity
self.cleaning_value: int value indicating propensity of food to prevent tooth decay
Higher value indicates higher propensity; lower value indicates lower propensity
"""
def __init__(self, init_food_type, init_decay_value, init_cleaning_value):
self.food_type = init_food_type
self.decay_value = init_decay_value
self.cleaning_value = init_cleaning_value
def get_food_type(self):
return self.food_type
def set_food_type(self, new_food_type):
self.food_type = new_food_type
def get_decay_value(self):
return self.decay_value
def set_decay_value(self, new_decay_value):
self.decay_value = new_decay_value
def get_cleaning_value(self):
return self.cleaning_value
def set_cleaning_value(self, new_cleaning_value):
self.cleaning_value = new_cleaning_value
```
#### File: jposchel/trusty-brusher-python/germ.py
```python
class Germ:
"""
A Germ, often on a Tooth, in the Mouth of a Person.
self.location: int value indicating location of Germ in the Mouth
self.health: int value indicating the current health of Germ
"""
def __init__(self, init_location, init_health):
self.location = init_location
self.health = init_health
def get_location(self):
return self.location
def set_location(self, new_location):
self.location = new_location
def get_health(self):
return self.health
def set_health(self, new_health):
self.health = new_health
def increment_health(self):
self.health = self.health + 1
def decrement_health(self):
if self.health > 0:
self.health = self.health - 1
```
#### File: jposchel/trusty-brusher-python/trusty_brusher_game_world.py
```python
from calendar_clock import Calendar_Clock
from food import Food
from germ import Germ
from person import Person
from tooth import Tooth
class Trusty_Brusher_Game_World:
def __init__(self, init_person_name, init_person_gender, healthy_food_info, unhealthy_food_info):
self.master_clock = Calendar_Clock()
self.main_person = Person(init_person_name, init_person_gender)
self.teeth = [Tooth(counter, False, 0) for counter in range(26)]
self.healthy_food = [Food(specs[0], specs[1], specs[2]) for specs in healthy_food_info]
self.unhealthy_food = [Food(specs[0], specs[1], specs[2]) for specs in unhealthy_food_info]
self.current_food = None
self.food_pointer = None
self.germs = []
self.toothbrush = False
self.toothpaste = False
self.floss = False
self.game_paused = False
self.game_over = False
def is_game_paused(self):
return self.game_paused
def pause_game(self, new_pause_status):
self.game_paused = new_pause_status
def is_game_over(self):
return self.game_over
def get_toothbrush_status(self):
return self.toothbrush
def set_toothbrush_status(self, new_brush_status):
self.toothbrush = new_brush_status
def get_toothpaste_status(self):
return self.toothpaste
def set_toothpaste_status(self, new_paste_status):
self.toothpaste = new_paste_status
def add_healthy_food(self, food_item_specs):
self.healthy_food.extend(Food(food_item_specs[0], food_item_specs[1], food_item_specs[2]))
def remove_healthy_food(self):
if len(self.healthy_food) > 0:
self.healthy_food.remove( self.healthy_food[ len(self.healthy_food - 1) ] )
def add_unhealthy_food(self, food_item_specs):
self.unhealthy_food.extend(Food(food_item_specs[0], food_item_specs[1], food_item_specs[2]))
def remove_unhealthy_food(self):
if len(self.unhealthy_food) > 0:
self.unhealthy_food.remove( self.unhealthy_food[ len(self.unhealthy_food - 1) ] )
def _advance_food(self):
if self.food_pointer == None:
self.current_food = self.healthy_food[0]
self.food_pointer = [1, 0]
elif self.food_pointer[0] == 0:
self.current_food = self.unhealthy_food[self.food_pointer[1]]
if self.food_pointer[1] == len(self.unhealthy_food) - 1:
self.food_pointer = [1, 0]
else:
self.food_pointer = [1, self.food_pointer[1] + 1]
elif self.current_food[0] == 1:
self.current_food = self.healthy_food[self.food_pointer[1]]
if self.food_pointer[1] == len(self.healthy_food) - 1:
self.food_pointer = [0, 0]
else:
self.food_pointer = [0, self.food_pointer[1] + 1]
def eat(self):
# Put awesomeness here
# Eat well
self._advance_food()
self.main_person.eat()
``` |
{
"source": "JPoser/giantbomb-cli",
"score": 3
} |
#### File: JPoser/giantbomb-cli/giantbombcli.py
```python
import json, requests, datetime
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
class giantbomb_cli:
def __init__(self):
#initialises the cli class
self.headers = {'User-Agent': 'giantbomb-test-cli 0.1', 'From': '<EMAIL>'}
# read config file and set api key class variable
self.config = ConfigParser()
self.config.read('config.ini')
self.api_key = self.config.get('default' , 'api_key')
def search(self, search_term):
# create dictionary for results
output_games = {}
# search the api for the term in the arguements
url = 'http://www.giantbomb.com/api/search/?api_key=%s&format=json&query="%s"&resources=game' % (self.api_key, search_term)
# call api using requests library with headers
api_search = requests.get(url, headers=self.headers).json()
#loop through dict
result_dict = api_search['results']
for game in result_dict:
output_games[game.get('name')] = game.get('api_detail_url')
#return dict with game name as key and api_urls as val
return output_games
def game_details(self, game):
# get details of specific game
# call search
search_results = self.search(game)
api_url = ""
# loop through keys to see if there is a specific match
for return_game, return_url in search_results.items():
if return_game.lower() == game.lower():
game_name = return_game
api_url = return_url
if api_url == "":
return "error game not found"
# set api_url to include extra arguments to use api key and return json
api_url = api_url + "?api_key=" + self.api_key + "&format=json"
# call api
game_get = requests.get(api_url, headers=self.headers).json()
# return game data and name
output_game_data = game_get['results']
return [game_name, output_game_data]
def output_search(self, games):
print("Search Results:")
for g in games:
print(g)
def output_game(self, game, dlc_flag):
#print(game)
game_name = game[0]
game_data = game[1]
print("Name: " + game_name)
print("Description: " + game_data['deck'])
print("Developed by: ", end='')
for devs in game_data['developers']:
print(devs['name'], end=', ')
print('')
print("Genre: ", end='')
for genre in game_data['genres']:
print(genre['name'], end=', ')
if dlc_flag == True:
dlcs = self.get_dlcs(self.game_details(game_name))
if len(dlcs) > 0:
try:
sorted_dlcs = sorted(dlcs, key=lambda p: datetime.datetime.strptime(p['release_date'], "%Y-%m-%d %H:%M:%S"))
except TypeError:
print("Error: release date not found - printing in ")
sorted_dlcs = dlcs
print("\nDLC list: ")
for dlc in sorted_dlcs:
print("DLC name: " + dlc['name'])
if dlc['deck'] != None:
print("DLC description: " + dlc['deck'])
print("Platform: " + dlc['platform'].get('name'))
if dlc['release_date'] != None:
print("Release Date: " + dlc['release_date'])
print("")
else:
print("")
print("No DLC found")
def get_dlcs(self, game):
# Func to get list of game dlcs from
# Get game data dictionary from get game function
game_data = game[1]
dlc_list = []
results_list = []
# get list of dlc api url's from
try:
for dlc in game_data['dlcs']:
dlc_list.append(dlc['api_detail_url'])
except KeyError:
results_list = []
for url in dlc_list:
url = url + "?api_key=" + self.api_key + "&format=json"
results_list.append(requests.get(url, headers=self.headers).json()['results'])
return results_list
``` |
{
"source": "JPoser/jacket-server",
"score": 3
} |
#### File: JPoser/jacket-server/app.py
```python
from flask import Flask, jsonify
import tweepy, configparser
config = configparser.ConfigParser()
config.read("config.ini")
consumer_key = config['twitter']['twitter_key']
consumer_secret = config['twitter']['twitter_secret']
access_token = config['twitter']['access_token']
access_token_secret = config['twitter']['access_token_secret']
app = Flask(__name__)
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
twitter_api = tweepy.API(auth)
colour = ""
def check_tweet(tweet_text):
tweet_text = tweet_text.lower()
if "red" in tweet_text:
return "red"
elif "orange" in tweet_text:
return "orange"
elif "yellow" in tweet_text:
return "yellow"
elif "chartreuse" in tweet_text:
return "chartreuse"
elif "green" in tweet_text:
return "green"
elif "spring" in tweet_text:
return "spring"
elif "cyan" in tweet_text:
return "cyan"
elif "azure" in tweet_text:
return "azure"
elif "blue" in tweet_text:
return "blue"
elif "violet" in tweet_text:
return "violet"
elif "violet" in tweet_text:
return "violet"
elif "magenta" in tweet_text:
return "magenta"
elif "rose" in tweet_text:
return "rose"
else:
return "white"
def get_latest_mention():
tweets = twitter_api.mentions_timeline()
for tweet in tweets:
return (tweet.text)
@app.route('/', methods=['GET'])
def index():
return "Welcome to Jacket Server, turn back or suffer your DOOM"
@app.route('/api/v1.0/get_tweets', methods=['GET'])
def get_tweets():
if check_tweet(get_latest_mention()) != None:
colour = check_tweet(get_latest_mention())
return jsonify({'colour': colour, 'tweets': get_latest_mention()})
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "jposes22/APNS_FCM_TESTER",
"score": 2
} |
#### File: jposes22/APNS_FCM_TESTER/apnx.py
```python
import socket, ssl, json, struct, sys
from datetime import datetime
## Constants never change
fcmUrl = 'https://fcm.googleapis.com/fcm/send'
fcmContentType = 'Content-Type: application/json'
apnsSandBoxHostSSL = ( 'gateway.sandbox.push.apple.com', 2195 )
apnsProductionHostSSL = ( 'gateway.push.apple.com', 2195)
apnsSandBoxHostTLS = ( 'api.development.push.apple.com', 443 )
apnsProductionHostTLS = ( 'api.push.apple.com', 443)
def showAllInfo():
print '-h to show help'
def mainWithArguments():
print 'au'
def mainOptionsMenu():
print 'au'
if __name__== "__main__":
print "#### Running apnx pusher ####"
print sys.argv[0]
showAllInfo()
mainWithArguments()
#originally from: http://stackoverflow.com/questions/1052645/apple-pns-push-notification-services-sample-code
# device token returned when the iPhone application
# registers to receive alerts
deviceToken = '<KEY>'
thePayLoad = {
'aps': {
'alert':'OMG! Push\'s works fine! with date: ' + datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'sound':'k1DiveAlarm.caf',
'badge':42,
},
'test_data': { 'foo': 'bar' },
}
# Certificate issued by apple and converted to .pem format with openSSL
# Per Apple's Push Notification Guide (end of chapter 3), first export the cert in p12 format
# openssl pkcs12 -in cert.p12 -out cert.pem -nodes
# when prompted "Enter Import Password:" hit return
#
theCertfile = 'apns-dev-cert.pem'
#
data = json.dumps( thePayLoad )
# Clear out spaces in the device token and convert to hex
deviceToken = deviceToken.replace(' ','')
if sys.version_info[0] == 3:
byteToken = bytes.fromhex( deviceToken ) # Python 3
text = input("Choose and option:") # Python 3
else:
byteToken = deviceToken.decode('hex') # Python 2
text = raw_input("Choose and option:") # Python 2
theFormat = '!BH32sH%ds' % len(data)
theNotification = struct.pack( theFormat, 0, 32, byteToken, len(data), data )
# Create our connection using the certfile saved locally
ssl_sock = ssl.wrap_socket( socket.socket( socket.AF_INET, socket.SOCK_STREAM ), certfile = theCertfile )
ssl_sock.connect( apnsSandBoxHostSSL )
# Write out our data
ssl_sock.write( theNotification )
print "successfully"
# Close the connection -- apple would prefer that we keep
# a connection open and push data as needed.
ssl_sock.close()
``` |
{
"source": "jposi/regulations-parser",
"score": 3
} |
#### File: tree/xml_parser/reg_text.py
```python
import re
import logging
from lxml import etree
from regparser import content
from regparser.tree.struct import Node
from regparser.tree.paragraph import p_level_of, p_levels
from regparser.tree.xml_parser.appendices import build_non_reg_text
from regparser.tree import reg_text
from regparser.tree.xml_parser import tree_utils
def determine_level(c, current_level, next_marker=None):
""" Regulation paragraphs are hierarchical. This determines which level
the paragraph is at. Convert between p_level indexing and depth here by
adding one"""
potential = p_level_of(c)
if len(potential) > 1 and next_marker: # resolve ambiguity
following = p_level_of(next_marker)
# Add character index
potential = [(level, p_levels[level].index(c)) for level in potential]
following = [(level, p_levels[level].index(next_marker))
for level in following]
# Check if we can be certain using the following marker
for pot_level, pot_idx in potential:
for next_level, next_idx in following:
if ( # E.g. i followed by A or i followed by 1
(next_idx == 0 and next_level == pot_level + 1)
or # E.g. i followed by ii
(next_level == pot_level and next_idx > pot_idx)
or # E.g. i followed by 3
(next_level < pot_level and next_idx > 0)):
return pot_level + 1
logging.warning("Ambiguous marker (%s) not followed by something "
+ "disambiguating (%s)", c, next_marker)
return potential[0][0] + 1
else:
return potential[0] + 1
def get_reg_part(reg_doc):
"""
The CFR Part number for a regulation is contained within
an EAR tag, for a Federal Register notice it's in a REGTEXT tag. Get the
part number of the regulation.
"""
#FR notice
reg_text_xml = reg_doc.xpath('//REGTEXT')
if reg_text_xml:
return reg_text_xml[0].attrib['PART']
#e-CFR XML
reg_ear = reg_doc.xpath('//PART/EAR')
if reg_ear:
return reg_ear[0].text.split('Pt.')[1].strip()
def get_title(reg_doc):
""" Extract the title of the regulation. """
parent = reg_doc.xpath('//PART/HD')[0]
title = parent.text
return title
def preprocess_xml(xml):
"""This transforms the read XML through macros. Each macro consists of
an xpath and a replacement xml string"""
for path, replacement in content.Macros():
replacement = etree.fromstring('<ROOT>' + replacement + '</ROOT>')
for node in xml.xpath(path):
parent = node.getparent()
idx = parent.index(node)
parent.remove(node)
for repl in replacement:
parent.insert(idx, repl)
idx += 1
def build_tree(reg_xml):
doc = etree.fromstring(reg_xml)
preprocess_xml(doc)
reg_part = get_reg_part(doc)
title = get_title(doc)
tree = Node("", [], [reg_part], title)
part = doc.xpath('//PART')[0]
subpart_xmls = [c for c in part.getchildren() if c.tag == 'SUBPART']
if len(subpart_xmls) > 0:
subparts = [build_subpart(reg_part, s) for s in subpart_xmls]
tree.children = subparts
else:
section_xmls = [c for c in part.getchildren() if c.tag == 'SECTION']
sections = []
for section_xml in section_xmls:
sections.extend(build_from_section(reg_part, section_xml))
empty_part = reg_text.build_empty_part(reg_part)
empty_part.children = sections
tree.children = [empty_part]
non_reg_sections = build_non_reg_text(doc, reg_part)
tree.children += non_reg_sections
return tree
def get_subpart_title(subpart_xml):
hds = subpart_xml.xpath('./HD')
return [hd.text for hd in hds][0]
def build_subpart(reg_part, subpart_xml):
subpart_title = get_subpart_title(subpart_xml)
subpart = reg_text.build_subpart(subpart_title, reg_part)
sections = []
for ch in subpart_xml.getchildren():
if ch.tag == 'SECTION':
sections.extend(build_from_section(reg_part, ch))
subpart.children = sections
return subpart
def get_markers(text):
""" Extract all the paragraph markers from text. Do some checks on the
collapsed markers."""
markers = tree_utils.get_paragraph_markers(text)
collapsed_markers = tree_utils.get_collapsed_markers(text)
# Check that the collapsed markers make sense (i.e. are at least one
# level below the initial marker)
if markers and collapsed_markers:
initial_marker_levels = p_level_of(markers[-1])
final_collapsed_markers = []
for collapsed_marker in collapsed_markers:
collapsed_marker_levels = p_level_of(collapsed_marker)
if any(c > f for f in initial_marker_levels
for c in collapsed_marker_levels):
final_collapsed_markers.append(collapsed_marker)
collapsed_markers = final_collapsed_markers
markers_list = [m for m in markers] + [m for m in collapsed_markers]
return markers_list
def get_markers_and_text(node, markers_list):
node_text = tree_utils.get_node_text(node, add_spaces=True)
text_with_tags = tree_utils.get_node_text_tags_preserved(node)
if len(markers_list) > 1:
actual_markers = ['(%s)' % m for m in markers_list]
plain_markers = [m.replace('<E T="03">', '').replace('</E>', '')
for m in actual_markers]
node_texts = tree_utils.split_text(node_text, plain_markers)
tagged_texts = tree_utils.split_text(text_with_tags, actual_markers)
node_text_list = zip(node_texts, tagged_texts)
elif markers_list:
node_text_list = [(node_text, text_with_tags)]
return zip(markers_list, node_text_list)
def next_marker(xml_node, remaining_markers):
"""Try to determine the marker following the current xml_node. Remaining
markers is a list of other marks *within* the xml_node. May return
None"""
# More markers in this xml node
if remaining_markers:
return remaining_markers[0][0]
# Check the next xml node; skip over stars
sib = xml_node.getnext()
while sib is not None and sib.tag in ('STARS', 'PRTPAGE'):
sib = sib.getnext()
if sib is not None:
next_text = tree_utils.get_node_text(sib)
next_markers = get_markers(next_text)
if next_markers:
return next_markers[0]
def build_from_section(reg_part, section_xml):
p_level = 1
m_stack = tree_utils.NodeStack()
section_texts = []
for ch in (ch for ch in section_xml.getchildren() if ch.tag == 'P'):
text = tree_utils.get_node_text(ch, add_spaces=True)
tagged_text = tree_utils.get_node_text_tags_preserved(ch)
markers_list = get_markers(tagged_text)
if not markers_list:
section_texts.append((text, tagged_text))
else:
markers_and_text = get_markers_and_text(ch, markers_list)
# Easier to reason if we view the list as a stack
markers_and_text = list(reversed(markers_and_text))
while markers_and_text:
m, node_text = markers_and_text.pop()
m_sans_markup = m.replace('<E T="03">', '').replace('</E>', '')
n = Node(node_text[0], [], [str(m_sans_markup)],
source_xml=ch)
n.tagged_text = unicode(node_text[1])
new_p_level = determine_level(
m, p_level, next_marker(ch, markers_and_text))
last = m_stack.peek()
if len(last) == 0:
m_stack.push_last((new_p_level, n))
else:
m_stack.add(new_p_level, n)
p_level = new_p_level
section_no = section_xml.xpath('SECTNO')[0].text
subject_xml = section_xml.xpath('SUBJECT')
if not subject_xml:
subject_xml = section_xml.xpath('RESERVED')
subject_text = subject_xml[0].text
nodes = []
section_nums = []
for match in re.finditer(r'%s\.(\d+)' % reg_part, section_no):
section_nums.append(int(match.group(1)))
# Span of section numbers
if u'§§' == section_no[:2] and '-' in section_no:
first, last = section_nums
section_nums = []
for i in range(first, last + 1):
section_nums.append(i)
for section_number in section_nums:
section_number = str(section_number)
plain_sect_texts = [s[0] for s in section_texts]
tagged_sect_texts = [s[1] for s in section_texts]
section_text = ' '.join([section_xml.text] + plain_sect_texts)
tagged_section_text = ' '.join([section_xml.text] + tagged_sect_texts)
section_title = u"§ " + reg_part + "." + section_number
if subject_text:
section_title += " " + subject_text
sect_node = Node(
section_text, label=[reg_part, section_number],
title=section_title)
sect_node.tagged_text = tagged_section_text
m_stack.add_to_bottom((1, sect_node))
while m_stack.size() > 1:
m_stack.unwind()
nodes.append(m_stack.pop()[0][1])
return nodes
```
#### File: tree/xml_parser/tree_utils.py
```python
import HTMLParser
from itertools import chain
import re
from lxml import etree
from pyparsing import Literal, Optional, Regex, StringStart, Suppress
from regparser.citations import remove_citation_overlaps
from regparser.grammar.unified import any_depth_p
from regparser.tree.paragraph import p_levels
from regparser.tree.priority_stack import PriorityStack
def prepend_parts(parts_prefix, n):
""" Recursively preprend parts_prefix to the parts of the node
n. Parts is a list of markers that indicates where you are in the
regulation text. """
n.label = parts_prefix + n.label
for c in n.children:
prepend_parts(parts_prefix, c)
return n
class NodeStack(PriorityStack):
""" The NodeStack aids our construction of a struct.Node tree. We process
xml one paragraph at a time; using a priority stack allows us to insert
items at their proper depth and unwind the stack (collecting children) as
necessary"""
def unwind(self):
""" Unwind the stack, collapsing sub-paragraphs that are on the stack
into the children of the previous level. """
children = self.pop()
parts_prefix = self.peek_last()[1].label
children = [prepend_parts(parts_prefix, c[1]) for c in children]
self.peek_last()[1].children = children
def split_text(text, tokens):
"""
Given a body of text that contains tokens,
splice the text along those tokens.
"""
starts = [text.find(t) for t in tokens]
slices = zip(starts, starts[1:])
texts = [text[i[0]:i[1]] for i in slices] + [text[starts[-1]:]]
return texts
_first_markers = []
for idx, level in enumerate(p_levels):
marker = (Suppress(Regex(u',|\.|-|—|>'))
+ Suppress('(')
+ Literal(level[0])
+ Suppress(')'))
for inner_idx in range(idx + 1, len(p_levels)):
inner_level = p_levels[inner_idx]
marker += Optional(Suppress('(')
+ Literal(inner_level[0])
+ Suppress(')'))
_first_markers.append(marker)
def get_collapsed_markers(text):
"""Not all paragraph markers are at the beginning of of the text. This
grabs inner markers like (1) and (i) here:
(c) cContent —(1) 1Content (i) iContent"""
matches = []
for parser in _first_markers:
matches.extend(parser.scanString(text))
# remove matches at the beginning
if matches and matches[0][1] == 0:
matches = matches[1:]
# remove any that overlap with citations
matches = [m for m, _, _ in remove_citation_overlaps(text, matches)]
# get the letters; poor man's flatten
return reduce(lambda lhs, rhs: list(lhs) + list(rhs), matches, [])
def get_paragraph_markers(text):
""" From a body of text that contains paragraph markers, extract the
initial markers. """
for citation, start, end in any_depth_p.scanString(text):
if start == 0:
markers = [citation.p1, citation.p2, citation.p3, citation.p4,
citation.p5, citation.p6]
if markers[4]:
markers[4] = '<E T="03">' + markers[4] + '</E>'
if markers[5]:
markers[5] = '<E T="03">' + markers[5] + '</E>'
return list(filter(bool, markers))
return []
def _should_add_space(prev_text, next_text):
"""Logic to determine where to add spaces to XML. Generally this is just
as matter of checking for space characters, but there are some
outliers"""
prev_text, next_text = prev_text[-1:], next_text[:1]
return (not prev_text.isspace() and not next_text.isspace()
and next_text
and prev_text not in '([/<'
and next_text not in ').;,]>/')
def get_node_text(node, add_spaces=False):
""" Extract all the text from an XML node (including the
text of it's children). """
parts = [node.text] +\
list(chain(*([c.text, c.tail] for c in node.getchildren()))) +\
[node.tail]
if add_spaces:
final_text = ''
for part in filter(bool, parts):
if _should_add_space(final_text, part):
final_text += " " + part
else:
final_text += part
return final_text.strip()
else:
return ''.join(filter(None, parts))
def get_node_text_tags_preserved(node):
""" Given an XML node, generate text from the node, skipping the PRTPAGE
tag. """
html_parser = HTMLParser.HTMLParser()
if node.text:
node_text = node.text
else:
node_text = ''
for c in node:
if c.tag == 'E':
#xlmns non-sense makes me do this.
e_tag = '<E T="03">%s</E>' % c.text
node_text += e_tag
if c.tail is not None:
node_text += c.tail
node_text = html_parser.unescape(node_text)
return node_text
```
#### File: regulations-parser/tests/grammar_delays_tests.py
```python
from datetime import date
from unittest import TestCase
from regparser.grammar.delays import *
class GrammarDelaysTests(TestCase):
def test_date_parser(self):
result = date_parser.parseString("February 7, 2012")
self.assertEqual(date(2012, 2, 7), result[0])
result = date_parser.parseString("April 21, 1987")
self.assertEqual(date(1987, 4, 21), result[0])
```
#### File: regulations-parser/tests/notice_build_interp_tests.py
```python
from unittest import TestCase
from lxml import etree
from mock import patch
from regparser.notice import build_interp
from regparser.notice.diff import Amendment
class NoticeBuildInterpTest(TestCase):
@patch('regparser.notice.build_interp.interpretations')
def test_process_with_headers(self, interpretations):
xml_str1 = """
<REGTEXT>
<EXTRACT>
<P>Something</P>
<STARS />
<HD>Supplement I</HD>
<HD>A</HD>
<T1>a</T1>
<P>b</P>
</EXTRACT>
</REGTEXT>"""
xml_str2 = """
<REGTEXT>
<P>Something</P>
<STARS />
<SUBSECT><HD>Supplement I</HD></SUBSECT>
<HD>A</HD>
<T1>a</T1>
<P>b</P>
</REGTEXT>"""
xml_str3 = """
<REGTEXT>
<AMDPAR>1. In Supplement I to part 111, under...</AMDPAR>
<P>Something</P>
<STARS />
<HD>SUPPLEMENT I</HD>
<HD>A</HD>
<T1>a</T1>
<P>b</P>
</REGTEXT>"""
xml_str4 = """
<REGTEXT>
<AMDPAR>1. In Supplement I to part 111, under...</AMDPAR>
<P>Something</P>
<STARS />
<APPENDIX>
<HD>SUPPLEMENT I</HD>
</APPENDIX>
<HD>A</HD>
<T1>a</T1>
<P>b</P>
<PRTPAGE />
</REGTEXT>"""
for xml_str in (xml_str1, xml_str2, xml_str3, xml_str4):
build_interp.process_with_headers('111', etree.fromstring(xml_str))
root, nodes = interpretations.parse_from_xml.call_args[0]
self.assertEqual(root.label, ['111', 'Interp'])
self.assertEqual(['HD', 'T1', 'P'], [n.tag for n in nodes])
def test_process_with_headers_subpart_confusion(self):
xml_str = u"""
<REGTEXT>
<AMDPAR>
1. In Supplement I to part 111, under Section 33,
paragraph 5 is added.
</AMDPAR>
<HD>Supplement I</HD>
<SUBPART>
<SECTION>
<SECTNO>§ 111.33</SECTNO>
<SUBJECT>Stubby Subby</SUBJECT>
<STARS />
<P>5. Some Content</P>
</SECTION>
</SUBPART>
</REGTEXT>"""
xml = etree.fromstring(xml_str)
interp = build_interp.process_with_headers('111', xml)
self.assertEqual(1, len(interp.children))
c33 = interp.children[0]
self.assertEqual(c33.label, ['111', '33', 'Interp'])
self.assertEqual(1, len(c33.children))
c335 = c33.children[0]
self.assertEqual(c335.label, ['111', '33', 'Interp', '5'])
def test_process_without_headers(self):
xml = """
<REGTEXT>
<AMDPAR>Adding comment 33(c)-5, 34(b)-5, and 34(b)-6</AMDPAR>
<P>5. five five five</P>
<P>i. eye eye eye</P>
<P>5. five five five2</P>
<P>6. six six six</P>
</REGTEXT>"""
amended_labels = [Amendment('POST', '111-Interpretations-33-(c)-5'),
Amendment('POST', '111-Interpretations-34-(b)-5'),
Amendment('POST', '111-Interpretations-34-(b)-6')]
interp = build_interp.process_without_headers(
'111', etree.fromstring(xml), amended_labels)
self.assertEqual(2, len(interp.children))
c, b = interp.children
self.assertEqual(c.label, ['111', '33', 'c', 'Interp'])
self.assertEqual(1, len(c.children))
c5 = c.children[0]
self.assertEqual('5. five five five', c5.text.strip())
self.assertEqual(c5.label, ['111', '33', 'c', 'Interp', '5'])
self.assertEqual(1, len(c5.children))
c5i = c5.children[0]
self.assertEqual('i. eye eye eye', c5i.text.strip())
self.assertEqual(c5i.label, ['111', '33', 'c', 'Interp', '5', 'i'])
self.assertEqual([], c5i.children)
b5, b6 = b.children
self.assertEqual('5. five five five2', b5.text.strip())
self.assertEqual(b5.label, ['111', '34', 'b', 'Interp', '5'])
self.assertEqual([], b5.children)
self.assertEqual('6. six six six', b6.text.strip())
self.assertEqual(b6.label, ['111', '34', 'b', 'Interp', '6'])
self.assertEqual([], b6.children)
```
#### File: regulations-parser/tests/tree_xml_parser_appendices_tests.py
```python
from unittest import TestCase
from lxml import etree
from lxml import html
from regparser.tree.struct import Node
from regparser.tree.xml_parser import appendices, tree_utils
class AppendicesTest(TestCase):
def test_process_appendix(self):
"""Integration test for appendices"""
xml = u"""
<APPENDIX>
<EAR>Pt. 1111, App. A</EAR>
<HD SOURCE="HED">Appendix A to Part 1111—Awesome</HD>
<P>Intro text</P>
<HD SOURCE="HD1">Header 1</HD>
<P>Content H1-1</P>
<P>Content H1-2</P>
<HD SOURCE="HD2">Subheader</HD>
<P>Subheader content</P>
<HD SOURCE="HD1">Header <E T="03">2</E></HD>
<P>Final <E T="03">Content</E></P>
<GPH>
<PRTPAGE P="650" />
<GID>MYGID</GID>
</GPH>
<GPOTABLE CDEF="s50,15,15" COLS="3" OPTS="L2">
<BOXHD>
<CHED H="1">For some reason <LI>lis</LI></CHED>
<CHED H="2">column two</CHED>
<CHED H="2">a third column</CHED>
</BOXHD>
<ROW>
<ENT I="01">0</ENT>
<ENT/>
<ENT>Content3</ENT>
</ROW>
<ROW>
<ENT>Cell 1</ENT>
<ENT>Cell 2</ENT>
<ENT>Cell 3</ENT>
</ROW>
</GPOTABLE>
<FP SOURCE="FR-1">A-3 Some header here</FP>
<P>Content A-3</P>
<P>A-4 Another header</P>
<P>Content A-4</P>
</APPENDIX>
"""
appendix = appendices.process_appendix(etree.fromstring(xml), 1111)
self.assertEqual(5, len(appendix.children))
intro, h1, h2, a3, a4 = appendix.children
self.assertEqual([], intro.children)
self.assertEqual("Intro text", intro.text.strip())
self.assertEqual(3, len(h1.children))
self.assertEqual('Header 1', h1.title)
c1, c2, sub = h1.children
self.assertEqual([], c1.children)
self.assertEqual('Content H1-1', c1.text.strip())
self.assertEqual([], c2.children)
self.assertEqual('Content H1-2', c2.text.strip())
self.assertEqual(1, len(sub.children))
self.assertEqual('Subheader', sub.title)
self.assertEqual('Subheader content', sub.children[0].text.strip())
self.assertEqual(3, len(h2.children))
self.assertEqual('Header 2', h2.title)
self.assertEqual('Final Content', h2.children[0].text.strip())
self.assertEqual('', h2.children[1].text.strip())
table_lines = h2.children[2].text.strip().split('\n')
self.assertEqual('|For some reason lis|column two|a third column|',
table_lines[0])
self.assertEqual('|---|---|---|', table_lines[1])
self.assertEqual('|0||Content3|', table_lines[2])
self.assertEqual('|Cell 1|Cell 2|Cell 3|', table_lines[3])
self.assertEqual('A-3 Some header here', a3.title)
self.assertEqual('A-4 Another header', a4.title)
def test_process_appendix_header_depth(self):
xml = u"""
<APPENDIX>
<EAR>Pt. 1111, App. A</EAR>
<HD SOURCE="HED">Appendix A to Part 1111—Awesome</HD>
<P>1. Some content</P>
<HD SOURCE="HD3">An Interruption</HD>
<P>Moo</P>
<P>2. More content</P>
</APPENDIX>"""
appendix = appendices.process_appendix(etree.fromstring(xml), 1111)
self.assertEqual(2, len(appendix.children))
a1, a2 = appendix.children
self.assertEqual(['1111', 'A', '1'], a1.label)
self.assertEqual(1, len(a1.children))
self.assertEqual('1. Some content', a1.text.strip())
self.assertEqual(['1111', 'A', '2'], a2.label)
self.assertEqual(0, len(a2.children))
self.assertEqual('2. More content', a2.text.strip())
def test_process_appendix_header_is_paragraph(self):
xml = u"""
<APPENDIX>
<EAR>Pt. 1111, App. A</EAR>
<HD SOURCE="HED">Appendix A to Part 1111—Awesome</HD>
<HD SOURCE="HD2">A-1 - First kind of awesome</HD>
<HD SOURCE="HD3">(A) First Subkind</HD>
<P>1. Content</P>
<HD SOURCE="HD3">(B) Next Subkind</HD>
<P>1. Moar Contents</P>
<HD SOURCE="HD3">I. Remains Header</HD>
<P>1. Content tent</P>
</APPENDIX>"""
appendix = appendices.process_appendix(etree.fromstring(xml), 1111)
self.assertEqual(1, len(appendix.children))
a1 = appendix.children[0]
self.assertEqual(['1111', 'A', '1'], a1.label)
self.assertEqual(2, len(a1.children))
self.assertEqual('A-1 - First kind of awesome', a1.title.strip())
a1a, a1B = a1.children
self.assertEqual(['1111', 'A', '1', 'A'], a1a.label)
self.assertEqual(1, len(a1a.children))
self.assertEqual('(A) First Subkind', a1a.text.strip())
self.assertEqual('1. Content', a1a.children[0].text.strip())
self.assertEqual(['1111', 'A', '1', 'B'], a1B.label)
self.assertEqual(1, len(a1B.children))
self.assertEqual('(B) Next Subkind', a1B.text.strip())
self.assertEqual('1. Moar Contents', a1B.children[0].text.strip())
self.assertEqual(1, len(a1B.children))
a1B1 = a1B.children[0]
self.assertEqual(1, len(a1B1.children))
a1B1h = a1B1.children[0]
self.assertEqual(a1B1h.title.strip(), 'I. Remains Header')
self.assertEqual(1, len(a1B1h.children))
self.assertEqual(a1B1h.children[0].text.strip(), '1. Content tent')
def test_header_ordering(self):
xml = u"""
<APPENDIX>
<EAR>Pt. 1111, App. A</EAR>
<HD SOURCE="HED">Appendix A to Part 1111—Awesome</HD>
<HD SOURCE="HD1">A-1 Content</HD>
<HD SOURCE="HD3">Level 1</HD>
<HD SOURCE="HD2">Level 2</HD>
<P>Paragraph</P>
<HD SOURCE="HD1">A-1(A) More Content</HD>
<P>A1A Paragraph</P>
</APPENDIX>"""
appendix = appendices.process_appendix(etree.fromstring(xml), 1111)
self.assertEqual(2, len(appendix.children))
a1, a1A = appendix.children
self.assertEqual(1, len(a1A.children))
self.assertEqual(['1111', 'A', '1'], a1.label)
self.assertEqual(1, len(a1.children))
a1_1 = a1.children[0]
self.assertEqual(['1111', 'A', '1', 'h1'], a1_1.label)
self.assertEqual(1, len(a1_1.children))
a1_1_1 = a1_1.children[0]
self.assertEqual(['1111', 'A', '1', 'h1', 'h2'], a1_1_1.label)
self.assertEqual(1, len(a1_1_1.children))
def test_process_same_sub_level(self):
xml = u"""
<APPENDIX>
<HD SOURCE="HED">Appendix A to Part 1111—Awesome</HD>
<P>1. 1 1 1</P>
<P>a. 1a 1a 1a</P>
<P>b. 1b 1b 1b</P>
<P>c. 1c 1c 1c</P>
<P>d. 1d 1d 1d</P>
<P>e. 1e 1e 1e</P>
<P>f. 1f 1f 1f</P>
<P>2. 2 2 2</P>
<P>a. 2a 2a 2a</P>
<P>i. 2ai 2ai 2ai</P>
<P>ii. 2aii 2aii 2aii</P>
<P>a. 2aiia 2aiia 2aiia</P>
<P>b. 2aiib 2aiib 2aiib</P>
<P>c. 2aiic 2aiic 2aiic</P>
<P>d. 2aiid 2aiid 2aiid</P>
<P>b. 2b 2b 2b</P>
</APPENDIX>"""
appendix = appendices.process_appendix(etree.fromstring(xml), 1111)
self.assertEqual(['1111', 'A'], appendix.label)
self.assertEqual(2, len(appendix.children))
a1, a2 = appendix.children
self.assertEqual(['1111', 'A', '1'], a1.label)
self.assertEqual(6, len(a1.children))
for i in range(6):
self.assertEqual(['1111', 'A', '1', chr(i + ord('a'))],
a1.children[i].label)
self.assertEqual(['1111', 'A', '2'], a2.label)
self.assertEqual(2, len(a2.children))
a2a, a2b = a2.children
self.assertEqual(['1111', 'A', '2', 'a'], a2a.label)
self.assertEqual(2, len(a2a.children))
a2ai, a2aii = a2a.children
self.assertEqual(['1111', 'A', '2', 'a', 'i'], a2ai.label)
self.assertEqual(0, len(a2ai.children))
self.assertEqual(['1111', 'A', '2', 'a', 'ii'], a2aii.label)
self.assertEqual(4, len(a2aii.children))
for i in range(4):
self.assertEqual(['1111', 'A', '2', 'a', 'ii', chr(i + ord('a'))],
a2aii.children[i].label)
self.assertEqual(['1111', 'A', '2', 'b'], a2b.label)
self.assertEqual(0, len(a2b.children))
def test_process_notes(self):
xml = u"""
<APPENDIX>
<HD SOURCE="HED">Appendix A to Part 1111—Awesome</HD>
<NOTE>
<P>Par</P>
<E>Emem</E>
<P>Parparpar</P>
</NOTE>
</APPENDIX>"""
appendix = appendices.process_appendix(etree.fromstring(xml), 1111)
self.assertEqual(['1111', 'A'], appendix.label)
self.assertEqual(1, len(appendix.children))
note = appendix.children[0]
text = '```note\nPar\nEmem\nParparpar\n```'
self.assertEqual(note.text, text)
def test_initial_marker(self):
self.assertEqual(("i", "i."), appendices.initial_marker("i. Hi"))
self.assertEqual(("iv", "iv."), appendices.initial_marker("iv. Hi"))
self.assertEqual(("A", "A."), appendices.initial_marker("A. Hi"))
self.assertEqual(("3", "3."), appendices.initial_marker("3. Hi"))
self.assertEqual(("i", "(i)"), appendices.initial_marker("(i) Hi"))
self.assertEqual(("iv", "(iv)"), appendices.initial_marker("(iv) Hi"))
self.assertEqual(("A", "(A)"), appendices.initial_marker("(A) Hi"))
self.assertEqual(("3", "(3)"), appendices.initial_marker("(3) Hi"))
def test_remove_toc(self):
xml = u"""
<APPENDIX>
<EAR>Pt. 1111, App. A</EAR>
<HD SOURCE="HED">Appendix A to Part 1111—Awesome</HD>
<FP>A-1 Awesome</FP>
<FP>A-2 More Awesome</FP>
<FP>A-1 Incorrect TOC</FP>
<P>A-3 The End of Awesome</P>
<HD>A-1Awesomer</HD>
<P>Content content</P>
</APPENDIX>"""
# Note that the title isn't identical
xml = etree.fromstring(xml)
appendices.remove_toc(xml, 'A')
self.assertEqual(['EAR', 'HD', 'HD', 'P'], [t.tag for t in xml])
xml = u"""
<APPENDIX>
<EAR>Pt. 1111, App. A</EAR>
<HD SOURCE="HED">Appendix A to Part 1111—Awesome</HD>
<FP>A-1 Awesome</FP>
<FP>A-2 More Awesome</FP>
<FP>A-1 Incorrect TOC</FP>
<P>A-3 The End of Awesome</P>
<GPH><GID>GIDGID</GID></GPH>
<HD>A-3Awesomer</HD>
<P>Content content</P>
</APPENDIX>"""
# Note that the title isn't identical
xml = etree.fromstring(xml)
appendices.remove_toc(xml, 'A')
self.assertEqual(['EAR', 'HD', 'GPH', 'HD', 'P'], [t.tag for t in xml])
xml = u"""
<APPENDIX>
<EAR>Pt. 1111, App. A</EAR>
<HD SOURCE="HED">Appendix A to Part 1111—Awesome</HD>
<FP>A-1 Awesome</FP>
<P>Good Content here</P>
<FP>A-2 More Awesome</FP>
<P>More Content</P>
<HD>A-11 Crank It Up</HD>
<P>Content content</P>
</APPENDIX>"""
xml = etree.fromstring(xml)
appendices.remove_toc(xml, 'A')
self.assertEqual(['EAR', 'HD', 'FP', 'P', 'FP', 'P', 'HD', 'P'],
[t.tag for t in xml])
def test_title_label_pair(self):
title = u'A-1—Model Clauses'
self.assertEqual(('1', 2), appendices.title_label_pair(title, 'A'))
title = u'Part III—Construction Period'
self.assertEqual(('III', 2), appendices.title_label_pair(title, 'A'))
def test_title_label_pair_parens(self):
title = u'G-13(A)—Has No parent'
self.assertEqual(('13(A)', 2), appendices.title_label_pair(title, 'G'))
title = u'G-13(C)(1) - Some Title'
self.assertEqual(('13(C)(1)', 2),
appendices.title_label_pair(title, 'G'))
title = u'G-13A - Some Title'
self.assertEqual(('13A', 2), appendices.title_label_pair(title, 'G'))
title = u'G-13And Some Smashed Text'
self.assertEqual(('13', 2), appendices.title_label_pair(title, 'G'))
class AppendixProcessorTest(TestCase):
def setUp(self):
self.ap = appendices.AppendixProcessor()
self.ap.paragraph_counter = 0
self.ap.depth = 0
self.ap.m_stack = tree_utils.NodeStack()
def result(self):
return self.ap.m_stack.peek_last()
def test_paragraph_no_marker(self):
self.ap.paragraph_no_marker("Paragraph Text")
lvl, node = self.result()
self.assertEqual(node.text, 'Paragraph Text')
self.assertEqual(0, lvl)
self.assertEqual(node.label, ['p1'])
# If a header was before the paragraph, increment the level 1
self.ap.m_stack.add(0, Node(label=['h1'], title='Some section'))
self.ap.paragraph_no_marker("Paragraph Text")
lvl, node = self.result()
self.assertEqual(node.text, 'Paragraph Text')
self.assertEqual(1, lvl)
self.assertEqual(node.label, ['p2'])
def test_paragraph_with_marker(self):
self.ap.paragraph_with_marker("(a) A paragraph")
lvl, node = self.result()
self.assertEqual(node.text, '(a) A paragraph')
self.assertEqual(lvl, 1)
self.assertEqual(node.label, ['a'])
self.ap.paragraph_with_marker("(b) A paragraph")
lvl, node = self.result()
self.assertEqual(node.text, '(b) A paragraph')
self.assertEqual(lvl, 1)
self.assertEqual(node.label, ['b'])
self.ap.paragraph_with_marker("(1) A paragraph")
lvl, node = self.result()
self.assertEqual(node.text, '(1) A paragraph')
self.assertEqual(lvl, 2)
self.assertEqual(node.label, ['1'])
self.ap.paragraph_with_marker("(2) A paragraph")
lvl, node = self.result()
self.assertEqual(node.text, '(2) A paragraph')
self.assertEqual(lvl, 2)
self.assertEqual(node.label, ['2'])
self.ap.paragraph_with_marker("(c) A paragraph")
lvl, node = self.result()
self.assertEqual(node.text, '(c) A paragraph')
self.assertEqual(lvl, 1)
self.assertEqual(node.label, ['c'])
self.ap.paragraph_no_marker("Some text")
lvl, node = self.result()
self.assertEqual(node.text, 'Some text')
self.assertEqual(lvl, 1) # Stay on the same level
self.assertEqual(node.label, ['p1'])
self.ap.paragraph_with_marker("(d) A paragraph")
lvl, node = self.result()
self.assertEqual(node.text, '(d) A paragraph')
self.assertEqual(lvl, 1)
self.assertEqual(node.label, ['d'])
def test_paragraph_period(self):
self.ap.paragraph_with_marker("1. A paragraph")
lvl, node = self.result()
self.assertEqual(node.text, '1. A paragraph')
self.assertEqual(lvl, 1)
self.assertEqual(node.label, ['1'])
self.ap.paragraph_with_marker("(b) A paragraph")
lvl, node = self.result()
self.assertEqual(node.text, '(b) A paragraph')
self.assertEqual(lvl, 2)
self.assertEqual(node.label, ['b'])
self.ap.paragraph_with_marker("A. A paragraph")
lvl, node = self.result()
self.assertEqual(node.text, 'A. A paragraph')
self.assertEqual(lvl, 3)
self.assertEqual(node.label, ['A'])
self.ap.paragraph_no_marker("code . is here")
lvl, node = self.result()
self.assertEqual(node.text, 'code . is here')
self.assertEqual(lvl, 3) # Stay on the same level
self.assertEqual(node.label, ['p1'])
def test_paragraph_roman(self):
self.ap.paragraph_with_marker("(1) A paragraph", "(b) A paragraph")
lvl, node = self.result()
self.assertEqual(node.text, '(1) A paragraph')
self.assertEqual(lvl, 1)
self.assertEqual(node.label, ['1'])
self.ap.paragraph_with_marker("(b) A paragraph", "(i) A paragraph")
lvl, node = self.result()
self.assertEqual(node.text, '(b) A paragraph')
self.assertEqual(lvl, 2)
self.assertEqual(node.label, ['b'])
self.ap.paragraph_with_marker("(i) A paragraph", "(ii) A paragraph")
lvl, node = self.result()
self.assertEqual(node.text, '(i) A paragraph')
self.assertEqual(lvl, 3)
self.assertEqual(node.label, ['i'])
self.ap.paragraph_with_marker("(ii) A paragraph")
lvl, node = self.result()
self.assertEqual(node.text, '(ii) A paragraph')
self.assertEqual(lvl, 3)
self.assertEqual(node.label, ['ii'])
self.ap.paragraph_with_marker("(v) A paragraph")
lvl, node = self.result()
self.assertEqual(node.text, '(v) A paragraph')
self.assertEqual(lvl, 3)
self.assertEqual(node.label, ['v'])
def test_split_paragraph_text(self):
res = self.ap.split_paragraph_text("(a) Paragraph. (1) Next paragraph")
self.assertEqual(['(a) Paragraph. ', '(1) Next paragraph', ''], res)
res = self.ap.split_paragraph_text("(a) (Keyterm) (1) Next paragraph")
self.assertEqual(['(a) (Keyterm) ', '(1) Next paragraph', ''], res)
res = self.ap.split_paragraph_text("(a) Mentions one (1) comment")
self.assertEqual(['(a) Mentions one (1) comment', ''], res)
def test_paragraph_double_depth(self):
self.ap.paragraph_with_marker("(a) A paragraph", "(1) A paragraph")
lvl, node = self.result()
self.assertEqual(node.text, '(a) A paragraph')
self.assertEqual(lvl, 1)
self.assertEqual(node.label, ['a'])
self.ap.paragraph_with_marker("(1) A paragraph", "(i) A paragraph")
lvl, node = self.result()
self.assertEqual(node.text, '(1) A paragraph')
self.assertEqual(lvl, 2)
self.assertEqual(node.label, ['1'])
self.ap.paragraph_with_marker("(i) A paragraph", "(A) A paragraph")
lvl, node = self.result()
self.assertEqual(node.text, '(i) A paragraph')
self.assertEqual(lvl, 3)
self.assertEqual(node.label, ['i'])
self.ap.paragraph_with_marker("(A) A paragraph")
lvl, node = self.result()
self.assertEqual(node.text, '(A) A paragraph')
self.assertEqual(lvl, 4)
self.assertEqual(node.label, ['A'])
def test_process_part_cap(self):
xml = u"""
<APPENDIX>
<EAR>Pt. 1111, App. A</EAR>
<HD SOURCE="HED">Appendix A to Part 1111—Awesome</HD>
<HD SOURCE="HD1">Part I - Stuff</HD>
<P>A. Content</P>
</APPENDIX>
"""
appendix = self.ap.process(etree.fromstring(xml), 1111)
self.assertEqual(1, len(appendix.children))
aI = appendix.children[0]
self.assertEqual(1, len(aI.children))
def test_process_depth_look_forward(self):
xml = u"""
<APPENDIX>
<EAR>Pt. 1111, App. A</EAR>
<HD SOURCE="HED">Appendix A to Part 1111—Awesome</HD>
<P>(a) aaaaa</P>
<P>(i) iiiii</P>
<P>Text text</P>
<P>(ii) ii ii ii</P>
</APPENDIX>
"""
appendix = self.ap.process(etree.fromstring(xml), 1111)
self.assertEqual(1, len(appendix.children))
Aa = appendix.children[0]
child_labels = [child.label for child in Aa.children]
self.assertTrue(['1111', 'A', 'a', 'i'] in child_labels)
self.assertTrue(['1111', 'A', 'a', 'ii'] in child_labels)
def test_process_header_depth(self):
xml = u"""
<APPENDIX>
<EAR>Pt. 1111, App. A</EAR>
<HD SOURCE="HED">Appendix A to Part 1111—Awesome</HD>
<HD SOURCE="HD1">Title 1</HD>
<P>(1) Content 1</P>
<P>(2) Content 2</P>
<HD SOURCE="HD1">Title 2</HD>
<P>A. Content</P>
</APPENDIX>
"""
appendix = self.ap.process(etree.fromstring(xml), 1111)
self.assertEqual(2, len(appendix.children))
a1, a2 = appendix.children
self.assertEqual(2, len(a1.children))
self.assertEqual(1, len(a2.children))
def test_process_roman(self):
xml = u"""
<APPENDIX>
<EAR>Pt. 1111, App. A</EAR>
<HD SOURCE="HED">Appendix A to Part 1111—Awesome</HD>
<HD SOURCE="HD1">Part I - Something</HD>
<P>(a) Something</P>
<GPH><GID>Contains (b)(i) - (iv)</GID></GPH>
<P>(v) Something else</P>
<P>(vi) Something more</P>
</APPENDIX>
"""
appendix = self.ap.process(etree.fromstring(xml), 1111)
self.assertEqual(1, len(appendix.children))
aI = appendix.children[0]
self.assertEqual(2, len(aI.children))
aIa, aIb = aI.children
self.assertEqual(2, len(aIb.children))
aIv, aIvi = aIb.children
self.assertEqual(['1111', 'A', 'I', 'a'], aIa.label)
self.assertEqual(['1111', 'A', 'I', 'p1'], aIb.label)
self.assertEqual(['1111', 'A', 'I', 'p1', 'v'], aIv.label)
self.assertEqual(['1111', 'A', 'I', 'p1', 'vi'], aIvi.label)
def test_process_collapsed(self):
xml = u"""
<APPENDIX>
<EAR>Pt. 1111, App. A</EAR>
<HD SOURCE="HED">Appendix A to Part 1111—Awesome</HD>
<HD SOURCE="HD1">Part I - Something</HD>
<P>(a) Something referencing § 999.2(a)(1). (1) Content</P>
<P>(2) Something else</P>
</APPENDIX>
"""
appendix = self.ap.process(etree.fromstring(xml), 1111)
self.assertEqual(1, len(appendix.children))
aI = appendix.children[0]
self.assertEqual(1, len(aI.children))
aIa = aI.children[0]
self.assertEqual(2, len(aIa.children))
aIa1, aIa2 = aIa.children
self.assertEqual(['1111', 'A', 'I', 'a', '1'], aIa1.label)
self.assertEqual('(1) Content', aIa1.text)
self.assertEqual(['1111', 'A', 'I', 'a', '2'], aIa2.label)
self.assertEqual('(2) Something else', aIa2.text)
``` |
{
"source": "jpostigo1/NLP_Project1",
"score": 4
} |
#### File: jpostigo1/NLP_Project1/langDemo2.py
```python
import random,operator,nltk
from nltk.corpus import udhr
#ratio of train:test documents
trainRatio = 3
#minimum word length we want to consider
ml = 3
#languages we are interested in (over 300 available in the udhr corpus) MUST be "Latin1" coding
languages = ['Spanish_Espanol', 'Welsh_Cymraeg', 'Afrikaans','Basque_Euskara','Danish_Dansk','Dutch_Nederlands','Finnish_Suomi','French_Francais','German_Deutsch','English','Italian']
wordsToUseAsFeatures = []
#Define a function that produces features from a given object, in this case one word
#Three string features are extracted per word: first two letters, last letter and last three letters
#Note that featuresets are dictionaries. That's what the classifier takes as input
def langFeatures(word):
features = {'first_two':word[:2], 'last_letter':word[-1],'last_three':word[-3:]}
if word in wordsToUseAsFeatures:
features['word-'+word]=True
return features
# a function operating on training words only, that could help us get more features
# in this case, we are finding the most frequent whole words in the trainig set
def getMoreFeatures(trainWords):
moreFeatures = []
for l in languages:
langWords = [w for (w,l) in trainWords]
fdist = nltk.FreqDist(langWords)
for w in list(fdist.keys()): #fdist.N() // 5]:
moreFeatures.append(w)
return moreFeatures
#use Python's functional features to get a big list of (word,Langauge) from languages, we are interested in
#words = reduce(operator.add, map(lambda L: ([(w.lower(),L) for w in udhr.words(L+'-Latin1') if len(w) >= ml]),languages),[])
words = []
allLists = [[(w.lower(),L) for w in udhr.words(L+'-Latin1') if len(w) >= ml] for L in languages]
for L in allLists:
words.extend(L)
#engWords, afrWords, itaWords = udhr.words('English-Latin1'), udhr.words('Afrikaans-Latin1'), udhr.words('Italian-Latin1')
#words = [(w,'English') for w in engWords] + [(w,'Afrikaans') for w in afrWords] + [(w,'Italian') for w in itaWords]
#words = [(w,l) for (w,l) in words if len(w) >= ml]
#(word, Langauge) tuples are still in file access order. This randomizes them
random.shuffle(words)
#split into training and test words still just (w,l) tuples.
splitPoint = len(words)//trainRatio
testWords, trainWords = words[:splitPoint],words[splitPoint:]
#Analysis on training set (you are not allowed to learn anything from the test set)
wordsToUseAsFeatures.extend(getMoreFeatures(trainWords))
#convert the (word,L) -> (features(word),L) for both training and test sets
test = [(langFeatures(w),l) for (w,l) in testWords]
train = [(langFeatures(w),l) for (w,l) in trainWords]
#NLTK's built-in implementation of the Naive Bayes classifier is trained
classifier = nltk.NaiveBayesClassifier.train(train)
#Other classifiers easily available from NLTK: Decision Trees and MaxEnt
#classifier = nltk.MaxentClassifier.train(train,max_iter=5)
#classifier = nltk.DecisionTreeClassifier.train(train,entropy_cutoff=0.1)
#now, it is tested on the test set and the accuracy reported
print("Accuracy: ",nltk.classify.accuracy(classifier,test))
#this is a nice function that reports the top most impactful features the NB classifier found
#It works for Maxent too, but it is not defined for DecisionTrees. So comment it out for DTs.
print(classifier.show_most_informative_features(20))
```
#### File: jpostigo1/NLP_Project1/textModifiers.py
```python
from nltk.corpus import stopwords
from nltk import sent_tokenize, word_tokenize, FreqDist, pos_tag
from nltk.stem.snowball import SnowballStemmer
def RemoveStopwords(text):
# Returns a list of words in text excluding stopwords
# where text is a single string
stop = set(stopwords.words("english"))
return [word for word in word_tokenize(text) if word not in stop]
def GetComparativeFreqs(words1, words2):
# words1 and words2 are lists of dictionaries of {word: freq} entries
# return two lists of dictionaries with the adjusted frequencies
# for example, if words1 is [{"dog": 5}] and words2 is [{"dog": 2}]
# then return words1 as {("dog": 3}] and words2 as []
new_words1 = {}
for word, freq in words1.items():
if word in words2.keys():
new_freq = freq - words2[word]
if new_freq > 0:
new_words1[word] = new_freq
else:
new_words1[word] = freq
new_words2 = {}
for word, freq in words2.items():
if word in words1.keys():
new_freq = freq - words1[word]
if new_freq > 0:
new_words2[word] = new_freq
else:
new_words2[word] = freq
return new_words1, new_words2
def GetMostCommonWordFreqs(words, n=10):
return sorted(words.items(), key=lambda t: -t[1])[:n]
def SplitOnOverallRating(set):
# Return two sets of dictionaries split from `set` based on overall ratings
set_0 = []
set_1 = []
for review in set:
if "rating" in review and float(review["rating"]) <= 3:
set_0.append(review)
else:
set_1.append(review)
return set_0, set_1
def GetReviewText(review):
# Returns just the paragraphs from a given review as a single string.
allParas = ""
for key in review.keys():
# get all paragraphs regardless of how many
if "para" in key:
allParas += "\n" + review[key]
return allParas
def GetSentimentWords(set, n=5):
# Given a set of review dictionaries (such as test or train),
# return the `n` most frequent words compared to good(4,5)/bad(1,2,3) ratings
set_0, set_1 = SplitOnOverallRating(set)
words_0 = [word.lower() for review in set_0 for word in RemoveStopwords(GetReviewText(review))]
freqDict0 = dict(FreqDist([word.lower() for word in words_0]))
words_1 = [word.lower() for review in set_1 for word in RemoveStopwords(GetReviewText(review))]
freqDict1 = dict(FreqDist([word.lower() for word in words_1]))
comparedFreqs0, comparedFreqs1 = GetComparativeFreqs(freqDict0, freqDict1)
most_common0 = GetMostCommonWordFreqs(comparedFreqs0, n)
most_common1 = GetMostCommonWordFreqs(comparedFreqs1, n)
return most_common0, most_common1
``` |
{
"source": "jpothoof/goldschmidt-factor",
"score": 3
} |
#### File: jpothoof/goldschmidt-factor/ABX_functions.py
```python
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def ABX(A, B, X, A_dictionary, B_dictionary, X_dictionary):
A_radii = A_dictionary.get(A[0])
B_radii = B_dictionary.get(B[0])
X_radii = X_dictionary.get(X[0])
t_effective = (A_radii + X_radii) / (math.sqrt(2) * (B_radii + X_radii))
return t_effective
def ABX2(A, B, X, X_ratio, A_dictionary, B_dictionary, X_dictionary):
if X_ratio == None:
X_radii = []
for ions in X:
X_radii.append(X_dictionary.get(ions))
A_radii = A_dictionary.get(A[0])
B_radii = B_dictionary.get(B[0])
ratio = np.linspace(0, 1, num=11)
r_effective = ratio * X_radii[0] + (1 - ratio) * X_radii[1]
t_effective = (A_radii + r_effective) / (math.sqrt(2) * (B_radii + r_effective))
ones = np.ones(ratio.shape)
eights = ones*0.8
nines = ones*0.9
plt.plot(ratio, t_effective, color='red', lw=2)
plt.plot(ratio, ones, c='black')
plt.plot(ratio, eights, c='black')
plt.ylim(0.6, 1.2)
plt.xlim(0,1)
title = plt.title("$%s%s_x%s_{3x}%s_{3(1-x)}$ tolerance factor as a function of %s molar fraction" % (A[0], B[0], X[0], X[1], X[0]))
title.set_position([0.5, 1.05])
plt.ylabel("Tolerance Factor t", fontsize=14)
plt.xlabel("%s Molar Ratio" % X[0], fontsize=14)
plt.fill_between(ratio, nines, eights, color='yellow', alpha=0.5)
plt.fill_between(ratio, nines, ones, color='green', alpha=0.5)
plt.show()
df = pd.DataFrame(np.round(ratio, 2), columns=['%s Ratio' % X[0]])
df['%s Ratio' % X[1]] = np.round(1-ratio, 2)
df['Tolerance Factor'] = t_effective
return df
else:
if sum(X_ratio) == 1:
X_radii = []
for ions in X:
X_radii.append(X_dictionary.get(ions))
A_radii = A_dictionary.get(A[0])
B_radii = B_dictionary.get(B[0])
r_effective = X_ratio[0] * X_radii[0] + X_ratio[1] * X_radii[1]
t_effective = (A_radii + r_effective) / (math.sqrt(2) * (B_radii + r_effective))
return t_effective
else:
print('Error: The sum of X_ratio is not equal to 1.')
def ABX3(A, B, X, X_ratio, A_dictionary, B_dictionary, X_dictionary):
if X_ratio == None:
X_radii = []
for ions in X:
X_radii.append(X_dictionary.get(ions))
A_radii = A_dictionary.get(A[0])
B_radii = B_dictionary.get(B[0])
x_ratio = []
y_ratio = []
z_ratio = []
x = np.linspace(0,1,11)
y = np.linspace(0,1,11)
xx, yy = np.meshgrid(x, y)
z = -xx -yy +1
for i in range(len(x)):
for j in range(len(y)):
if z[i][j] >= 0:
x_ratio.append(x[i])
y_ratio.append(y[j])
z_ratio.append(z[i][j])
else:
continue
x_ratio = np.array(x_ratio)
y_ratio = np.array(y_ratio)
z_ratio = np.array(z_ratio)
r_effective = x_ratio * X_radii[0] + y_ratio * X_radii[1] + z_ratio * X_radii[2]
t_effective = (A_radii + r_effective) / (math.sqrt(2) * (B_radii + r_effective))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
img = ax.scatter(x_ratio, y_ratio, z_ratio, c=t_effective, cmap=plt.jet())
fig.colorbar(img)
ax.set_xlabel("%s Molar Ratio" % X[0])
ax.set_ylabel("%s Molar Ratio" % X[1])
ax.set_zlabel("%s Molar Ratio" % X[2])
title = plt.title("$%s%s%s_x%s_y%s_z$ tolerance factor as a function of halide composition" % (A[0], B[0], X[0], X[1], X[2]))
title.set_position([0.5,1.05])
plt.show()
df = pd.DataFrame(x_ratio, columns =['%s Ratio' % X[0]])
df['%s Ratio' % X[1]] = y_ratio
df['%s Ratio' % X[2]] = z_ratio
df['Tolerance Factor'] = t_effective
return df
else:
if sum(X_ratio) == 1:
X_radii = []
for ions in X:
X_radii.append(X_dictionary.get(ions))
A_radii = A_dictionary.get(A[0])
B_radii = B_dictionary.get(B[0])
r_effective = X_ratio[0] * X_radii[0] + X_ratio[1] * X_radii[1] + X_ratio[2] * X_radii[2]
t_effective = (A_radii + r_effective) / (math.sqrt(2) * (B_radii + r_effective))
return t_effective
else:
print('Error: The sum of X_ratio is not equal to 1.')
def AB2X(A, B, X, B_ratio, A_dictionary, B_dictionary, X_dictionary):
if B_ratio == None:
B_radii = []
for ions in B:
B_radii.append(B_dictionary.get(ions))
A_radii = A_dictionary.get(A[0])
X_radii = X_dictionary.get(X[0])
ratio = np.linspace(0, 1, num=11)
r_effective = ratio * B_radii[0] + (1 - ratio) * B_radii[1]
t_effective = (A_radii + X_radii) / (math.sqrt(2) * (r_effective + X_radii))
ones = np.ones(ratio.shape)
eights = ones*0.8
nines = ones*0.9
plt.plot(ratio, t_effective, color='red', lw=2)
plt.plot(ratio, ones, c='black')
plt.plot(ratio, eights, c='black')
plt.ylim(0.6, 1.2)
plt.xlim(0,1)
title = plt.title("$%s%s_x%s_{1-x}%s_3$ tolerance factor as a function of %s molar fraction" % (A[0], B[0], B[1], X[0], B[0]))
title.set_position([0.5, 1.05])
plt.ylabel("Tolerance Factor t", fontsize=14)
plt.xlabel("%s Molar Ratio" % B[0], fontsize=14)
plt.fill_between(ratio, nines, eights, color='yellow', alpha=0.5)
plt.fill_between(ratio, nines, ones, color='green', alpha=0.5)
plt.show()
df = pd.DataFrame(np.round(ratio, 2), columns=['%s Ratio' % B[0]])
df['%s Ratio' % B[1]] = np.round(1-ratio, 2)
df['Tolerance Factor'] = t_effective
return df
else:
if sum(B_ratio) == 1:
B_radii = []
for ions in B:
B_radii.append(B_dictionary.get(ions))
A_radii = A_dictionary.get(A[0])
X_radii = X_dictionary.get(X[0])
r_effective = B_ratio[0] * B_radii[0] + B_ratio[1] * B_radii[1]
t_effective = (A_radii + X_radii) / (math.sqrt(2) * (r_effective + X_radii))
return t_effective
else:
print('Error: The sum of B_ratio is not equal to 1.')
def A2BX(A, B, X, A_ratio, A_dictionary, B_dictionary, X_dictionary):
if A_ratio == None:
A_radii = []
for ions in A:
A_radii.append(A_dictionary.get(ions))
B_radii = B_dictionary.get(B[0])
X_radii = X_dictionary.get(X[0])
ratio = np.linspace(0, 1, num=11)
r_effective = ratio * A_radii[0] + (1 - ratio) * A_radii[1]
t_effective = (r_effective + X_radii) / (math.sqrt(2) * (B_radii + X_radii))
ones = np.ones(ratio.shape)
eights = ones*0.8
nines = ones*0.9
plt.plot(ratio, t_effective, color='red', lw=2)
plt.plot(ratio, ones, c='black')
plt.plot(ratio, eights, c='black')
plt.ylim(0.6, 1.2)
plt.xlim(0,1)
title = plt.title("$%s_x%s_{1-x}%s%s_3$ tolerance factor as a function of %s molar fraction" % (A[0], A[1], B[0], X[0], A[0]))
title.set_position([0.5, 1.05])
plt.ylabel("Tolerance Factor t", fontsize=14)
plt.xlabel("%s Molar Ratio" % A[0], fontsize=14)
plt.fill_between(ratio, nines, eights, color='yellow', alpha=0.5)
plt.fill_between(ratio, nines, ones, color='green', alpha=0.5)
plt.show()
df = pd.DataFrame(np.round(ratio, 2), columns=['%s Ratio' % A[0]])
df['%s Ratio' % A[1]] = np.round(1-ratio, 2)
df['Tolerance Factor'] = t_effective
return df
else:
if sum(A_ratio) == 1:
A_radii = []
for ions in A:
A_radii.append(A_dictionary.get(ions))
B_radii = B_dictionary.get(B[0])
X_radii = X_dictionary.get(X[0])
r_effective = A_ratio[0] * A_radii[0] + A_ratio[1] * A_radii[1]
t_effective = (r_effective + X_radii) / (math.sqrt(2) * (B_radii + X_radii))
return t_effective
else:
print('Error: The sum of A_ratio is not equal to 1.')
def A3BX(A, B, X, A_ratio, A_dictionary, B_dictionary, X_dictionary):
if A_ratio == None:
A_radii = []
for ions in A:
A_radii.append(A_dictionary.get(ions))
X_radii = X_dictionary.get(X[0])
B_radii = B_dictionary.get(B[0])
x_ratio = []
y_ratio = []
z_ratio = []
x = np.linspace(0,1,11)
y = np.linspace(0,1,11)
xx, yy = np.meshgrid(x, y)
z = -xx -yy +1
for i in range(len(x)):
for j in range(len(y)):
if z[i][j] >= 0:
x_ratio.append(x[i])
y_ratio.append(y[j])
z_ratio.append(z[i][j])
else:
continue
x_ratio = np.array(x_ratio)
y_ratio = np.array(y_ratio)
z_ratio = np.array(z_ratio)
r_effective = x_ratio * A_radii[0] + y_ratio * A_radii[1] + z_ratio * A_radii[2]
t_effective = (r_effective + X_radii) / (math.sqrt(2) * (B_radii + X_radii))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
img = ax.scatter(x_ratio, y_ratio, z_ratio, c=t_effective, cmap=plt.jet())
fig.colorbar(img)
ax.set_xlabel("%s Molar Ratio" % A[0])
ax.set_ylabel("%s Molar Ratio" % A[1])
ax.set_zlabel("%s Molar Ratio" % A[2])
title = plt.title("$%s%s%s_x%s_y%s_z$ tolerance factor as a function of A-site cation composition" % (A[0], A[1], A[2], B[0], X[0]))
title.set_position([0.5,1.05])
plt.show()
df = pd.DataFrame(x_ratio, columns =['%s Ratio' % A[0]])
df['%s Ratio' % A[1]] = y_ratio
df['%s Ratio' % A[2]] = z_ratio
df['Tolerance Factor'] = t_effective
return df
else:
if sum(A_ratio) == 1:
A_radii = []
for ions in A:
A_radii.append(A_dictionary.get(ions))
X_radii = X_dictionary.get(X[0])
B_radii = B_dictionary.get(B[0])
r_effective = A_ratio[0] * A_radii[0] + A_ratio[1] * A_radii[1] + A_ratio[2] * A_radii[2]
t_effective = (r_effective + X_radii) / (math.sqrt(2) * (B_radii + X_radii))
return t_effective
else:
print('Error: The sum of A_ratio is not equal to 1.')
def A2BX2(A, B, X, A_ratio, X_ratio, A_dictionary, B_dictionary, X_dictionary):
if (A_ratio == None and X_ratio == None):
A_radii = []
X_radii = []
for ions in A:
A_radii.append(A_dictionary.get(ions))
for ions in X:
X_radii.append(X_dictionary.get(ions))
B_radii = B_dictionary.get(B[0])
ratio = np.linspace(0, 1, num=11)
A_effective = ratio * A_radii[0] + (1-ratio) * A_radii[1]
X_effective = ratio * X_radii[0] + (1-ratio) * X_radii[1]
t_effective = []
for i in A_effective:
t_effective.append((i + X_effective) / (math.sqrt(2) * (B_radii + X_effective)))
df = pd.DataFrame(ratio, columns =['%s Ratio' % A[0]])
df['%s Ratio' % A[1]] = 1-ratio
#df['Tolerance Factor'] = t_effective
i_count = 0
ratio = np.round(ratio, decimals=2)
for i in ratio:
df['%s' % i] = t_effective[i_count]
i_count += 1
df = df.rename(columns = {'0.0' : '%s Ratio : 0.0' % X[0]})
return df
elif ((A_ratio == None and X_ratio != None) or (A_ratio != None and X_ratio == None)):
print('Warning: Insert a list of ratios for both A_ratio and X_ratio to calculate a specifice tolerance factor')
A_radii = []
X_radii = []
for ions in A:
A_radii.append(A_dictionary.get(ions))
for ions in X:
X_radii.append(X_dictionary.get(ions))
B_radii = B_dictionary.get(B[0])
ratio = np.linspace(0, 1, num=11)
A_effective = ratio * A_radii[0] + (1-ratio) * A_radii[1]
X_effective = ratio * X_radii[0] + (1-ratio) * X_radii[1]
t_effective = []
for i in A_effective:
t_effective.append((i + X_effective) / (math.sqrt(2) * (B_radii + X_effective)))
df = pd.DataFrame(ratio, columns =['%s Ratio' % A[0]])
df['%s Ratio' % A[1]] = 1-ratio
#df['Tolerance Factor'] = t_effective
i_count = 0
ratio = np.round(ratio, decimals=2)
for i in ratio:
df['%s' % i] = t_effective[i_count]
i_count += 1
df = df.rename(columns = {'0.0' : '%s Ratio : 0.0' % X[0]})
return df
elif (A_ratio != None and X_ratio != None):
if (sum(A_ratio) == 1 and sum(X_ratio_ ==1)):
A_radii = []
X_radii = []
for ions in A:
A_radii.append(A_dictionary.get(ions))
for ions in X:
X_radii.append(X_dictionary.get(ions))
B_radii = B_dictionary.get(B[0])
A_effective = A_ratio[0] * A_radii[0] + A_ratio[1] * A_radii[1]
X_effective = X_ratio[0] * X_radii[0] + X_ratio[1] * X_radii[1]
t_effective = (A_effective + X_effective) / (math.sqrt(2) * (B_radii + X_effective))
return t_effective
else:
print('Error: Either the sum of A_ratio or X_ratio is not equal to 1.')
def A3BX2(A, B, X, A_ratio, X_ratio, A_dictionary, B_dictionary, X_dictionary):
if (A_ratio == None and X_ratio == None):
A_radii = []
X_radii = []
for ions in A:
A_radii.append(A_dictionary.get(ions))
for ions in X:
X_radii.append(X_dictionary.get(ions))
B_radii = B_dictionary.get(B[0])
ratio = np.linspace(0, 1, num=11)
x_ratio = []
y_ratio = []
z_ratio = []
x = np.linspace(0,1,11)
y = np.linspace(0,1,11)
xx, yy = np.meshgrid(x, y)
z = -xx -yy +1
for i in range(len(x)):
for j in range(len(y)):
if z[i][j] >= 0:
x_ratio.append(x[i])
y_ratio.append(y[j])
z_ratio.append(z[i][j])
else:
continue
x_ratio = np.array(x_ratio)
y_ratio = np.array(y_ratio)
z_ratio = np.array(z_ratio)
A_effective = x_ratio * A_radii[0] + y_ratio * A_radii[1] + z_ratio * A_radii[2]
X_effective = ratio * X_radii[0] + (1-ratio) * X_radii[1]
t_effective = []
for i in A_effective:
t_effective.append((i + X_effective) / (math.sqrt(2) * (B_radii + X_effective)))
df = pd.DataFrame(x_ratio, columns=['%s Ratio' % A[0]])
df['%s Ratio' % A[1]] = y_ratio
df['%s Ratio' % A[2]] = z_ratio
df['A_effective'] = A_effective
df2 = pd.DataFrame(t_effective, columns = np.round(ratio,2))
df_merged = pd.merge(df,df2,left_index=True,right_index=True)
df_merged = df_merged.rename(columns = {0.0 : '%s Ratio : 0.0' % X[0]})
return df_merged
elif ((A_ratio == None and X_ratio != None) or (A_ratio != None and X_ratio == None)):
print('Warning: Insert a list of ratios for both A_ratio and X_ratio to calculate a specifice tolerance factor')
A_radii = []
X_radii = []
for ions in A:
A_radii.append(A_dictionary.get(ions))
for ions in X:
X_radii.append(X_dictionary.get(ions))
B_radii = B_dictionary.get(B[0])
ratio = np.linspace(0, 1, num=11)
x_ratio = []
y_ratio = []
z_ratio = []
x = np.linspace(0,1,11)
y = np.linspace(0,1,11)
xx, yy = np.meshgrid(x, y)
z = -xx -yy +1
for i in range(len(x)):
for j in range(len(y)):
if z[i][j] >= 0:
x_ratio.append(x[i])
y_ratio.append(y[j])
z_ratio.append(z[i][j])
else:
continue
x_ratio = np.array(x_ratio)
y_ratio = np.array(y_ratio)
z_ratio = np.array(z_ratio)
A_effective = x_ratio * A_radii[0] + y_ratio * A_radii[1] + z_ratio * A_radii[2]
X_effective = ratio * X_radii[0] + (1-ratio) * X_radii[1]
t_effective = []
for i in A_effective:
t_effective.append((i + X_effective) / (math.sqrt(2) * (B_radii + X_effective)))
df = pd.DataFrame(x_ratio, columns=['%s Ratio' % A[0]])
df['%s Ratio' % A[1]] = y_ratio
df['%s Ratio' % A[2]] = z_ratio
df['A_effective'] = A_effective
df2 = pd.DataFrame(t_effective, columns = np.round(ratio,2))
df_merged = pd.merge(df,df2,left_index=True,right_index=True)
df_merged = df_merged.rename(columns = {0.0 : '%s Ratio : 0.0' % X[0]})
return df_merged
elif (A_ratio != None and X_ratio != None):
if (sum(A_ratio) == 1 and sum(X_ratio) == 1):
A_radii = []
X_radii = []
for ions in A:
A_radii.append(A_dictionary.get(ions))
for ions in X:
X_radii.append(X_dictionary.get(ions))
B_radii = B_dictionary.get(B[0])
A_effective = A_ratio[0] * A_radii[0] + A_ratio[1] * A_radii[1] + A_ratio[2] * A_radii[2]
X_effective = X_ratio[0] * X_radii[0] + X_ratio[1] * X_radii[1]
t_effective = (A_effective + X_effective) / (math.sqrt(2) * (B_radii + X_effective))
return t_effective
else:
print('Error: Either the sum of A_ratio or X_ratio is not equal to 1.')
def A2BX3(A, B, X, A_ratio, X_ratio, A_dictionary, B_dictionary, X_dictionary):
if (A_ratio == None and X_ratio == None):
A_radii = []
X_radii = []
for ions in A:
A_radii.append(A_dictionary.get(ions))
for ions in X:
X_radii.append(X_dictionary.get(ions))
B_radii = B_dictionary.get(B[0])
ratio = np.linspace(0, 1, num=11)
x_ratio = []
y_ratio = []
z_ratio = []
x = np.linspace(0,1,11)
y = np.linspace(0,1,11)
xx, yy = np.meshgrid(x, y)
z = -xx -yy +1
for i in range(len(x)):
for j in range(len(y)):
if z[i][j] >= 0:
x_ratio.append(x[i])
y_ratio.append(y[j])
z_ratio.append(z[i][j])
else:
continue
x_ratio = np.array(x_ratio)
y_ratio = np.array(y_ratio)
z_ratio = np.array(z_ratio)
X_effective = x_ratio * X_radii[0] + y_ratio * X_radii[1] + z_ratio * X_radii[2]
A_effective = ratio * A_radii[0] + (1-ratio) * A_radii[1]
t_effective = []
for i in X_effective:
t_effective.append((A_effective + i) / (math.sqrt(2) * (B_radii + i)))
df = pd.DataFrame(x_ratio, columns=['%s Ratio' % X[0]])
df['%s Ratio' % X[1]] = y_ratio
df['%s Ratio' % X[2]] = z_ratio
df['X_effective'] = X_effective
df2 = pd.DataFrame(t_effective, columns = np.round(ratio,2))
df_merged = pd.merge(df,df2,left_index=True,right_index=True)
df_merged = df_merged.rename(columns = {0.0 : '%s Ratio : 0.0' % A[0]})
return df_merged
elif ((A_ratio == None and X_ratio != None) or (A_ratio != None and X_ratio == None)):
print('Warning: Insert a list of ratios for both A_ratio and X_ratio to calculate a specifice tolerance factor')
A_radii = []
X_radii = []
for ions in A:
A_radii.append(A_dictionary.get(ions))
for ions in X:
X_radii.append(X_dictionary.get(ions))
B_radii = B_dictionary.get(B[0])
ratio = np.linspace(0, 1, num=11)
x_ratio = []
y_ratio = []
z_ratio = []
x = np.linspace(0,1,11)
y = np.linspace(0,1,11)
xx, yy = np.meshgrid(x, y)
z = -xx -yy +1
for i in range(len(x)):
for j in range(len(y)):
if z[i][j] >= 0:
x_ratio.append(x[i])
y_ratio.append(y[j])
z_ratio.append(z[i][j])
else:
continue
x_ratio = np.array(x_ratio)
y_ratio = np.array(y_ratio)
z_ratio = np.array(z_ratio)
X_effective = x_ratio * X_radii[0] + y_ratio * X_radii[1] + z_ratio * X_radii[2]
A_effective = ratio * A_radii[0] + (1-ratio) * A_radii[1]
t_effective = []
for i in X_effective:
t_effective.append((A_effective + i) / (math.sqrt(2) * (B_radii + i)))
df = pd.DataFrame(x_ratio, columns=['%s Ratio' % X[0]])
df['%s Ratio' % X[1]] = y_ratio
df['%s Ratio' % X[2]] = z_ratio
df['X_effective'] = X_effective
df2 = pd.DataFrame(t_effective, columns = np.round(ratio,2))
df_merged = pd.merge(df,df2,left_index=True,right_index=True)
df_merged = df_merged.rename(columns = {0.0 : '%s Ratio : 0.0' % A[0]})
return df_merged
elif (A_ratio != None and X_ratio != None):
if sum(A_ratio) == 1 and sum(X_ratio) == 1:
A_radii = []
X_radii = []
for ions in A:
A_radii.append(A_dictionary.get(ions))
for ions in X:
X_radii.append(X_dictionary.get(ions))
B_radii = B_dictionary.get(B[0])
A_effective = A_ratio[0] * A_radii[0] + A_ratio[1] * A_radii[1]
X_effective = X_ratio[0] * X_radii[0] + X_ratio[1] * X_radii[1] + X_ratio[2] * X_radii[2]
t_effective = (A_effective + X_effective) / (math.sqrt(2) * (B_radii + X_effective))
return t_effective
else:
print('Error: Either the sum of A_ratio or X_ratio is not equal to 1.')
def A3BX3(A, B, X, A_ratio, X_ratio, A_dictionary, B_dictionary, X_dictionary):
if (A_ratio == None and X_ratio == None):
A_radii = []
X_radii = []
for ions in A:
A_radii.append(A_dictionary.get(ions))
for ions in X:
X_radii.append(X_dictionary.get(ions))
B_radii = B_dictionary.get(B[0])
ratio = np.linspace(0, 1, num=11)
x_ratio = []
y_ratio = []
z_ratio = []
x = np.linspace(0,1,11)
y = np.linspace(0,1,11)
xx, yy = np.meshgrid(x, y)
z = -xx -yy +1
for i in range(len(x)):
for j in range(len(y)):
if z[i][j] >= 0:
x_ratio.append(x[i])
y_ratio.append(y[j])
z_ratio.append(z[i][j])
else:
continue
x_ratio = np.array(x_ratio)
y_ratio = np.array(y_ratio)
z_ratio = np.array(z_ratio)
X_effective = x_ratio * X_radii[0] + y_ratio * X_radii[1] + z_ratio * X_radii[2]
A_effective = x_ratio * A_radii[0] + y_ratio * A_radii[1] + z_ratio * A_radii[2]
t_effective = np.zeros(shape=[len(X_effective), len(A_effective)])
i_count = 0
for i in X_effective:
j_count = 0
for j in A_effective:
t_effective[i_count][j_count] = (j + i) / (math.sqrt(2) * (B_radii + i))
j_count += 1
i_count += 1
X_labels = []
A_labels = []
for i in range(len(x_ratio)):
X_labels.append("%s: %s, %s: %s, %s: %s" % (X[0], np.round(x_ratio[i],2), X[1], np.round(y_ratio[i],2), X[2], np.round(z_ratio[i],2)))
A_labels.append("%s: %s, %s: %s, %s: %s" % (A[0], np.round(x_ratio[i],2), A[1], np.round(y_ratio[i],2), A[2], np.round(z_ratio[i],2)))
df = pd.DataFrame(t_effective, columns=A_labels)
df['X Ratio Index'] = X_labels
df = df.set_index('X Ratio Index')
return df
elif ((A_ratio == None and X_ratio != None) or (A_ratio != None and X_ratio == None)):
print('Warning: Insert a list of ratios for both A_ratio and X_ratio to calculate a specifice tolerance factor')
A_radii = []
X_radii = []
for ions in A:
A_radii.append(A_dictionary.get(ions))
for ions in X:
X_radii.append(X_dictionary.get(ions))
B_radii = B_dictionary.get(B[0])
ratio = np.linspace(0, 1, num=11)
x_ratio = []
y_ratio = []
z_ratio = []
x = np.linspace(0,1,11)
y = np.linspace(0,1,11)
xx, yy = np.meshgrid(x, y)
z = -xx -yy +1
for i in range(len(x)):
for j in range(len(y)):
if z[i][j] >= 0:
x_ratio.append(x[i])
y_ratio.append(y[j])
z_ratio.append(z[i][j])
else:
continue
x_ratio = np.array(x_ratio)
y_ratio = np.array(y_ratio)
z_ratio = np.array(z_ratio)
X_effective = x_ratio * X_radii[0] + y_ratio * X_radii[1] + z_ratio * X_radii[2]
A_effective = x_ratio * A_radii[0] + y_ratio * A_radii[1] + z_ratio * A_radii[2]
t_effective = np.zeros(shape=[len(X_effective), len(A_effective)])
i_count = 0
for i in X_effective:
j_count = 0
for j in A_effective:
t_effective[i_count][j_count] = (j + i) / (math.sqrt(2) * (B_radii + i))
j_count += 1
i_count += 1
X_labels = []
A_labels = []
for i in range(len(x_ratio)):
X_labels.append("%s: %s, %s: %s, %s: %s" % (X[0], np.round(x_ratio[i],2), X[1], np.round(y_ratio[i],2), X[2], np.round(z_ratio[i],2)))
A_labels.append("%s: %s, %s: %s, %s: %s" % (A[0], np.round(x_ratio[i],2), A[1], np.round(y_ratio[i],2), A[2], np.round(z_ratio[i],2)))
df = pd.DataFrame(t_effective, columns=A_labels)
df['X Ratio Index'] = X_labels
df = df.set_index('X Ratio Index')
return df
elif (A_ratio != None and X_ratio != None):
if sum(A_ratio) == 1 and sum(X_ratio) == 1:
A_radii = []
X_radii = []
for ions in A:
A_radii.append(A_dictionary.get(ions))
for ions in X:
X_radii.append(X_dictionary.get(ions))
B_radii = B_dictionary.get(B[0])
A_effective = A_ratio[0] * A_radii[0] + A_ratio[1] * A_radii[1] + A_ratio[2] * A_radii[2]
X_effective = X_ratio[0] * X_radii[0] + X_ratio[1] * X_radii[1] + X_ratio[2] * X_radii[2]
t_effective = (A_effective + X_effective) / (math.sqrt(2) * (B_radii + X_effective))
return t_effective
else:
print('Error: Either the sum of A_ratio or X_ratio is not equal to 1.')
```
#### File: jpothoof/goldschmidt-factor/goldschmidt.py
```python
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import ABX_functions
# https://onlinelibrary-wiley-com.offcampus.lib.washington.edu/doi/epdf/10.1002/aenm.201902467
def goldschmidt(A, B, X, A_ratio=None, B_ratio=None, X_ratio=None):
'''
'''
A_dictionary = {"MA" : 2.16, "FA" : 2.53, "EA" : 2.74, "Cs" : 1.67}
B_dictionary = {"Pb" : 1.19, "Sn" : 1.15}
X_dictionary = {"I" : 2.20, "Br" : 1.96, "Cl" : 1.84}
if len(A) == len(B) == len(X) == 1:
return ABX_functions.ABX(A, B, X, A_dictionary=A_dictionary, B_dictionary=B_dictionary, X_dictionary=X_dictionary)
elif len(A) == 2 and len(B) == 1 and len(X) == 1:
return ABX_functions.A2BX(A, B, X, A_ratio=A_ratio, A_dictionary=A_dictionary, B_dictionary=B_dictionary, X_dictionary=X_dictionary)
elif len(A) == 1 and len(B) == 2 and len(X) == 1:
return ABX_functions.AB2X(A, B, X, B_ratio=B_ratio, A_dictionary=A_dictionary, B_dictionary=B_dictionary, X_dictionary=X_dictionary)
elif len(A) == 1 and len(B) == 1 and len(X) == 2:
return ABX_functions.ABX2(A, B, X, X_ratio=X_ratio, A_dictionary=A_dictionary, B_dictionary=B_dictionary, X_dictionary=X_dictionary)
elif len(A) == 1 and len(B) == 1 and len(X) == 3:
return ABX_functions.ABX3(A, B, X, X_ratio=X_ratio, A_dictionary=A_dictionary, B_dictionary=B_dictionary, X_dictionary=X_dictionary)
elif len(A) == 3 and len(B) == 1 and len(X) == 1:
return ABX_functions.A3BX(A, B, X, A_ratio=A_ratio, A_dictionary=A_dictionary, B_dictionary=B_dictionary, X_dictionary=X_dictionary)
elif len(A) == 2 and len(B) == 1 and len(X) == 2:
return ABX_functions.A2BX2(A, B, X, A_ratio=A_ratio, X_ratio=X_ratio, A_dictionary=A_dictionary, B_dictionary=B_dictionary, X_dictionary=X_dictionary)
elif len(A) == 3 and len(B) == 1 and len(X) == 2:
return ABX_functions.A3BX2(A, B, X, A_ratio=A_ratio, X_ratio=X_ratio, A_dictionary=A_dictionary, B_dictionary=B_dictionary, X_dictionary=X_dictionary)
elif len(A) == 2 and len(B) == 1 and len(X) == 3:
return ABX_functions.A2BX3(A, B, X, A_ratio=A_ratio, X_ratio=X_ratio, A_dictionary=A_dictionary, B_dictionary=B_dictionary, X_dictionary=X_dictionary)
elif len(A) == 3 and len(B) == 1 and len(X) == 3:
return ABX_functions.A3BX3(A, B, X, A_ratio=A_ratio, X_ratio=X_ratio, A_dictionary=A_dictionary, B_dictionary=B_dictionary, X_dictionary=X_dictionary)
``` |
{
"source": "jpot/Instanssi.org",
"score": 2
} |
#### File: Instanssi/admin_arkisto/forms.py
```python
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Fieldset, ButtonHolder
from Instanssi.arkisto.models import OtherVideo, OtherVideoCategory
from Instanssi.common.misc import parse_youtube_video_id
class VideoForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
# Initialize
self.event = kwargs.pop('event', None)
super(VideoForm, self).__init__(*args, **kwargs)
# Set choices
if self.event:
cats = []
for cat in OtherVideoCategory.objects.filter(event=self.event):
cats.append((cat.id, cat.name))
self.fields['category'].choices = cats
# Set form
self.helper = FormHelper()
self.helper.layout = Layout(
Fieldset(
'Muu Video',
'name',
'category',
'description',
'youtube_url',
ButtonHolder (
Submit('submit', 'Tallenna')
)
)
)
def clean_youtube_url(self):
# Make sure field has content
if not self.cleaned_data['youtube_url']:
return self.cleaned_data['youtube_url']
# Parse video id
video_id = parse_youtube_video_id(self.cleaned_data['youtube_url'])
# Warn if something is wrong
if not video_id:
raise forms.ValidationError('Osoitteesta ei löytynyt videotunnusta.')
# Return a new video url
return 'https://www.youtube.com/v/{}'.format(video_id)
class Meta:
model = OtherVideo
fields = ('category', 'name', 'description', 'youtube_url')
class VideoCategoryForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(VideoCategoryForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Fieldset(
'Kategoria',
'name',
ButtonHolder (
Submit('submit', 'Tallenna')
)
)
)
class Meta:
model = OtherVideoCategory
fields = ('name',)
```
#### File: Instanssi/admin_arkisto/views.py
```python
from Instanssi.common.http import Http403
from Instanssi.common.auth import staff_access_required
from django.shortcuts import get_object_or_404
from django.http import Http404, HttpResponseRedirect
from django.urls import reverse
from Instanssi.kompomaatti.models import *
from Instanssi.arkisto.models import OtherVideo, OtherVideoCategory
from Instanssi.admin_arkisto.forms import VideoForm, VideoCategoryForm
from Instanssi.admin_arkisto.misc import utils
from Instanssi.admin_base.misc.custom_render import admin_render
# Logging related
import logging
logger = logging.getLogger(__name__)
@staff_access_required
def index(request, sel_event_id):
# Render response
return admin_render(request, "admin_arkisto/index.html", {
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def removeoldvotes(request, sel_event_id):
# Check rights
if not request.user.has_perms('kompomaatti.delete_vote'):
raise Http403
# Don't proceed if the event is still ongoing
event = get_object_or_404(Event, pk=int(sel_event_id))
if utils.is_event_ongoing(event):
raise Http404
# Find compos belonging to this event
compo_ids = Compo.objects.filter(event_id=int(sel_event_id)).values('pk')
# Don't allow removing votes if votes haven't yet been consolidated to entry rows (prevent data loss)
if utils.is_votes_unoptimized(compo_ids):
raise Http404
# Delete votes belonging to compos in this event
for group in VoteGroup.objects.filter(compo__in=compo_ids):
group.delete_votes()
group.delete()
# Log it
logger.info('Event old votes removed.', extra={'user': request.user, 'event': event})
# All done, redirect
return HttpResponseRedirect(reverse('manage-arkisto:archiver', args=(sel_event_id,)))
@staff_access_required
def transferrights(request, sel_event_id):
# Check rights
if not request.user.has_perms('kompomaatti.change_entry'):
raise Http403
# Don't allow this function if the event is still ongoing
event = get_object_or_404(Event, pk=int(sel_event_id))
if utils.is_event_ongoing(event):
raise Http404
# Get archive user, compo id's and competition id's
archiveuser = get_object_or_404(User, username="arkisto")
compo_ids = Compo.objects.filter(event_id=int(sel_event_id)).values('pk')
competition_ids = Competition.objects.filter(event_id=int(sel_event_id)).values('pk')
# Transfer all user rights on entries and competition participations belonging to this event
Entry.objects.filter(compo__in=compo_ids).update(user=archiveuser)
CompetitionParticipation.objects.filter(competition__in=competition_ids).update(user=archiveuser)
# Log it
logger.info('Event rights transferred.', extra={'user': request.user, 'event': event})
# All done, redirect
return HttpResponseRedirect(reverse('manage-arkisto:archiver', args=(sel_event_id,)))
@staff_access_required
def optimizescores(request, sel_event_id):
# Check rights
if not request.user.has_perms('kompomaatti.change_entry'):
raise Http403
# Don't allow this function if the event is still ongoing
event = get_object_or_404(Event, pk=int(sel_event_id))
if utils.is_event_ongoing(event):
raise Http404
# Get compo id's
compo_ids = Compo.objects.filter(event_id=int(sel_event_id)).values('pk')
# Set score and rank to database, instead of having to calculate it every time we need it
entries = Entry.objects.filter(compo__in=compo_ids)
for entry in entries:
entry.archive_rank = entry.get_rank()
entry.archive_score = entry.get_score()
entry.save()
# Log it
logger.info('Event scores optimized.', extra={'user': request.user, 'event': event})
return HttpResponseRedirect(reverse('manage-arkisto:archiver', args=(sel_event_id,)))
@staff_access_required
def archiver(request, sel_event_id):
# Get event information
event = get_object_or_404(Event, pk=sel_event_id)
# Get archive user information for future use
archiveuser = get_object_or_404(User, username="arkisto")
# Get Compo id's belonging to this event for future use
compo_ids = Compo.objects.filter(event_id=int(sel_event_id)).values('pk')
# Check if there are any compo entries that are not owner by archive user
untransferred = False
entries = Entry.objects.filter(compo__in=compo_ids)
for entry in entries:
if entry.user != archiveuser:
untransferred = True
break
# Check if there are any participations that are not owner by archive user
if not untransferred:
competition_ids = Competition.objects.filter(event_id=int(sel_event_id)).values('pk')
participations = CompetitionParticipation.objects.filter(competition__in=competition_ids)
for part in participations:
if part.user != archiveuser:
untransferred = True
break
# Check if voting results need to be optimized
votes_unoptimized = utils.is_votes_unoptimized(compo_ids)
# Check if event is still ongoing
ongoing_activity = utils.is_event_ongoing(event)
# See if there are any old votes left
old_votes_found = False
votes = Vote.objects.filter(compo__in=compo_ids)
if len(votes) > 0:
old_votes_found = True
# Render response
return admin_render(request, "admin_arkisto/archiver.html", {
'selected_event_id': int(sel_event_id),
'is_archived': event.archived,
'untransferred': untransferred,
'ongoing_activity': ongoing_activity,
'votes_unoptimized': votes_unoptimized,
'old_votes_found': old_votes_found,
})
@staff_access_required
def show(request, sel_event_id):
# Check rights
if not request.user.has_perms('kompomaatti.change_event'):
raise Http403
# Mark event as archived
event = get_object_or_404(Event, pk=sel_event_id)
event.archived = True
event.save()
# Log it
logger.info('Event set as visible in archive.', extra={'user': request.user, 'event': event})
return HttpResponseRedirect(reverse('manage-arkisto:archiver', args=(sel_event_id,)))
@staff_access_required
def hide(request, sel_event_id):
# Check rights
if not request.user.has_perms('kompomaatti.change_event'):
raise Http403
# Mark event as NOT archived
event = get_object_or_404(Event, pk=sel_event_id)
event.archived = False
event.save()
# Log it
logger.info('Event set as hidden in archive.', extra={'user': request.user, 'event': event})
return HttpResponseRedirect(reverse('manage-arkisto:archiver', args=(sel_event_id,)))
@staff_access_required
def vids(request, sel_event_id):
# Get event
event = get_object_or_404(Event, pk=sel_event_id)
# Handle form
if request.method == "POST":
# Check for permissions
if not request.user.has_perm('arkisto.add_othervideo'):
raise Http403
# Handle form
vidform = VideoForm(request.POST, event=event)
if vidform.is_valid():
video = vidform.save()
logger.info('Added archive video {}'.format(video.name),
extra={'user': request.user, 'event': event})
return HttpResponseRedirect(reverse('manage-arkisto:vids', args=(sel_event_id,)))
else:
vidform = VideoForm(event=event)
# Get videos belonging to selected event
categories = OtherVideoCategory.objects.filter(event_id=int(sel_event_id))
videos = []
for cat in categories:
vlist = OtherVideo.objects.filter(category=cat)
for video in vlist:
videos.append(video)
# Render response
return admin_render(request, "admin_arkisto/vids.html", {
'videos': videos,
'vidform': vidform,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def editvid(request, sel_event_id, video_id):
# Check for permissions
if not request.user.has_perm('arkisto.change_othervideo'):
raise Http403
# Get Video
video = get_object_or_404(OtherVideo, pk=video_id)
# Get event
event = get_object_or_404(Event, pk=sel_event_id)
# Handle form
if request.method == "POST":
vidform = VideoForm(request.POST, instance=video, event=event)
if vidform.is_valid():
r_video = vidform.save()
logger.info('Edited archive video {}'.format(r_video.name),
extra={'user': request.user, 'event': event})
return HttpResponseRedirect(reverse('manage-arkisto:vids', args=(sel_event_id,)))
else:
vidform = VideoForm(instance=video, event=event)
# Render response
return admin_render(request, "admin_arkisto/editvid.html", {
'vidform': vidform,
'vid': video,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def deletevid(request, sel_event_id, video_id):
# Check for permissions
if not request.user.has_perm('arkisto.delete_othervideo'):
raise Http403
# Get event
event = get_object_or_404(Event, pk=sel_event_id)
# Attempt to delete video
try:
video = OtherVideo.objects.get(id=video_id)
video.delete()
logger.info('Deleted archive video {}'.format(video.name),
extra={'user': request.user, 'event': event})
except OtherVideo.DoesNotExist:
pass
# Redirect
return HttpResponseRedirect(reverse('manage-arkisto:vids', args=(sel_event_id,)))
@staff_access_required
def cats(request, sel_event_id):
# Get event
event = get_object_or_404(Event, pk=sel_event_id)
# Handle form
if request.method == "POST":
# Check for permissions
if not request.user.has_perm('arkisto.add_othervideocategory'):
raise Http403
# Handle form
catform = VideoCategoryForm(request.POST)
if catform.is_valid():
cat = catform.save(commit=False)
cat.event = event
cat.save()
logger.info('Added archive video category '.format(cat.name),
extra={'user': request.user, 'event': event})
return HttpResponseRedirect(reverse('manage-arkisto:vidcats', args=(sel_event_id,)))
else:
catform = VideoCategoryForm()
# Get videos belonging to selected event
categories = OtherVideoCategory.objects.filter(event_id=int(sel_event_id))
# Render response
return admin_render(request, "admin_arkisto/cats.html", {
'categories': categories,
'catform': catform,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def editcat(request, sel_event_id, category_id):
# Check for permissions
if not request.user.has_perm('arkisto.change_othervideocategory'):
raise Http403
# Get event
event = get_object_or_404(Event, pk=sel_event_id)
# Get category
category = get_object_or_404(OtherVideoCategory, pk=category_id, event=event)
# Handle form
if request.method == "POST":
catform = VideoCategoryForm(request.POST, instance=category)
if catform.is_valid():
r_cat = catform.save()
logger.info('Edited archive video category {}'.format(r_cat.name),
extra={'user': request.user, 'event': event})
return HttpResponseRedirect(reverse('manage-arkisto:vidcats', args=(sel_event_id,)))
else:
catform = VideoCategoryForm(instance=category)
# Render response
return admin_render(request, "admin_arkisto/editcat.html", {
'catform': catform,
'cat': category,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def deletecat(request, sel_event_id, category_id):
# Check for permissions
if not request.user.has_perm('arkisto.delete_othervideocategory'):
raise Http403
event = get_object_or_404(Event, pk=sel_event_id)
# Attempt to delete category
try:
cat = OtherVideoCategory.objects.get(id=category_id, event=event)
cat.delete()
logger.info('Deleted archive video category {}'.format(cat.name),
extra={'user': request.user, 'event': event})
except OtherVideoCategory.DoesNotExist:
pass
# Redirect
return HttpResponseRedirect(reverse('manage-arkisto:vidcats', args=(sel_event_id,)))
```
#### File: Instanssi/admin_blog/views.py
```python
from Instanssi.common.http import Http403
from Instanssi.common.auth import staff_access_required
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils import timezone
from Instanssi.ext_blog.models import BlogEntry
from Instanssi.admin_blog.forms import BlogEntryForm, BlogEntryEditForm
from Instanssi.admin_base.misc.custom_render import admin_render
# Logging related
import logging
logger = logging.getLogger(__name__)
@staff_access_required
def index(request, sel_event_id):
# Post
if request.method == 'POST':
# Check for permissions
if not request.user.has_perm('ext_blog.add_blogentry'):
raise Http403
# Handle form
form = BlogEntryForm(request.POST)
if form.is_valid():
entry = form.save(commit=False)
entry.event_id = int(sel_event_id)
entry.date = timezone.now()
entry.user = request.user
entry.save()
logger.info('Blog entry "'+entry.title+'" added.', extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-blog:index', args=(sel_event_id,)))
else:
form = BlogEntryForm()
# Get events
entries = BlogEntry.objects.filter(event_id = sel_event_id)
# Render response
return admin_render(request, "admin_blog/index.html", {
'entries': entries,
'selected_event_id': int(sel_event_id),
'addform': form,
})
@staff_access_required
def edit(request, sel_event_id, entry_id):
# Check for permissions
if not request.user.has_perm('ext_blog.change_blogentry'):
raise Http403
# Get old entry
entry = get_object_or_404(BlogEntry, pk=entry_id)
# Go ahead and edit
if request.method == 'POST':
form = BlogEntryEditForm(request.POST, instance=entry)
if form.is_valid():
entry = form.save()
logger.info('Blog entry "'+entry.title+'" edited.', extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-blog:index', args=(sel_event_id,)))
else:
form = BlogEntryEditForm(instance=entry)
# Render response
return admin_render(request, "admin_blog/edit.html", {
'editform': form,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def delete(request, sel_event_id, entry_id):
# Check for permissions
if not request.user.has_perm('ext_blog.delete_blogentry'):
raise Http403
# Delete entry
try:
entry = BlogEntry.objects.get(id=entry_id)
entry.delete()
logger.info('Blog entry "'+entry.title+'" deleted.', extra={'user': request.user, 'event_id': sel_event_id})
except BlogEntry.DoesNotExist:
pass
return HttpResponseRedirect(reverse('manage-blog:index', args=(sel_event_id,)))
```
#### File: Instanssi/admin_kompomaatti/views.py
```python
import logging
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from django.shortcuts import get_object_or_404
from django.template import loader
from Instanssi.kompomaatti.models import VoteCodeRequest, TicketVoteCode, Compo, Event, Entry, Competition,\
CompetitionParticipation
from Instanssi.admin_kompomaatti.forms import AdminCompoForm, AdminCompetitionForm, AdminCompetitionScoreForm,\
AdminEntryAddForm, AdminEntryEditForm, AdminParticipationEditForm, CloneCompoForm
from Instanssi.kompomaatti.misc import entrysort
from Instanssi.admin_base.misc.custom_render import admin_render
from Instanssi.common.http import Http403
from Instanssi.common.auth import staff_access_required
from Instanssi.kompomaatti import tasks
logger = logging.getLogger(__name__)
@staff_access_required
def index(request, sel_event_id):
# Render response
return admin_render(request, "admin_kompomaatti/index.html", {
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def entries_csv(request, sel_event_id):
entries = []
# Get event
event = get_object_or_404(Event, pk=sel_event_id)
# Find all compos and their top 3 entries
compos = Compo.objects.filter(event=event)
# Get the entries
for compo in compos:
compo_results = entrysort.sort_by_score(Entry.objects.filter(compo=compo))
if len(compo_results) > 3:
entries = entries + compo_results[:3]
else:
entries = entries + compo_results
# Placements, copypasta
for entry in entries:
m = entry.get_rank()
if m == 1:
entry.placement = 'I'
if m == 2:
entry.placement = 'II'
if m == 3:
entry.placement = 'III'
# Respond with entries CSV (text/csv)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="instanssi_entries.csv"'
t = loader.get_template('admin_kompomaatti/entries_csv.txt')
response.write(t.render({
'entries': entries,
}))
return response
@staff_access_required
def competition_score(request, sel_event_id, competition_id):
# Get competition
competition = get_object_or_404(Competition, pk=competition_id)
# Handle form
if request.method == 'POST':
# Check permissions
if not request.user.has_perm('kompomaatti.change_competitionparticipation'):
raise Http403
# Handle form
scoreform = AdminCompetitionScoreForm(request.POST, competition=competition)
if scoreform.is_valid():
scoreform.save()
logger.info('Competition scores set.', extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-kompomaatti:competitions', args=(sel_event_id,)))
else:
scoreform = AdminCompetitionScoreForm(competition=competition)
# Render response
return admin_render(request, "admin_kompomaatti/competition_score.html", {
'competition': competition,
'scoreform': scoreform,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def competition_participations(request, sel_event_id, competition_id):
# Get competition
participants = CompetitionParticipation.objects.filter(competition_id=int(competition_id))
# Render response
return admin_render(request, "admin_kompomaatti/competition_participations.html", {
'participants': participants,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def competition_participation_edit(request, sel_event_id, competition_id, pid):
# CHeck for permissions
if not request.user.has_perm('kompomaatti.change_competitionparticipation'):
raise Http403
# Get competition, participation
competition = get_object_or_404(Competition, pk=int(competition_id))
participant = get_object_or_404(CompetitionParticipation, pk=int(pid))
# Handle form
if request.method == 'POST':
pform = AdminParticipationEditForm(request.POST, instance=participant)
if pform.is_valid():
pform.save()
logger.info('Competition participation information edited.',
extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-kompomaatti:participations',
args=(sel_event_id, competition_id,)))
else:
pform = AdminParticipationEditForm(instance=participant)
# Render response
return admin_render(request, "admin_kompomaatti/participation_edit.html", {
'pform': pform,
'selected_event_id': int(sel_event_id),
'competition': competition,
})
@staff_access_required
def competitions_browse(request, sel_event_id):
# Get competitions
competitions = Competition.objects.filter(event_id=int(sel_event_id))
# Form handling
if request.method == "POST":
# CHeck for permissions
if not request.user.has_perm('kompomaatti.add_competition'):
raise Http403
# Handle form
competitionform = AdminCompetitionForm(request.POST)
if competitionform.is_valid():
data = competitionform.save(commit=False)
data.event_id = int(sel_event_id)
data.save()
logger.info('Competition "{}" added.'.format(data.name),
extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-kompomaatti:competitions', args=(sel_event_id,)))
else:
competitionform = AdminCompetitionForm()
# Render response
return admin_render(request, "admin_kompomaatti/competitions.html", {
'competitions': competitions,
'competitionform': competitionform,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def competition_edit(request, sel_event_id, competition_id):
# CHeck for permissions
if not request.user.has_perm('kompomaatti.change_competition'):
raise Http403
# Get competition
competition = get_object_or_404(Competition, pk=competition_id)
# Handle form
if request.method == "POST":
competitionform = AdminCompetitionForm(request.POST, instance=competition)
if competitionform.is_valid():
c = competitionform.save()
logger.info('Competition "{}" edited.'.format(c.name),
extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-kompomaatti:competitions', args=(sel_event_id,)))
else:
competitionform = AdminCompetitionForm(instance=competition)
# Render response
return admin_render(request, "admin_kompomaatti/competition_edit.html", {
'competition': competition,
'competitionform': competitionform,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def competition_delete(request, sel_event_id, competition_id):
# CHeck for permissions
if not request.user.has_perm('kompomaatti.delete_competition'):
raise Http403
# Delete competition
try:
c = Competition.objects.get(pk=competition_id)
c.delete()
logger.info('Competition "{}" deleted.'.format(c.name),
extra={'user': request.user, 'event_id': sel_event_id})
except Competition.DoesNotExist:
pass
# Redirect
return HttpResponseRedirect(reverse('manage-kompomaatti:competitions', args=(sel_event_id,)))
@staff_access_required
def compo_browse(request, sel_event_id):
# Get compos
compos = Compo.objects.filter(event_id=int(sel_event_id))
if request.method == "POST" and 'submit-clone' in request.POST:
if not request.user.has_perm('kompomaatti.add_compo'):
raise Http403
clonecompoform = CloneCompoForm(request.POST)
if clonecompoform.is_valid():
clonecompoform.save(event_id=sel_event_id)
logger.info('Compos from other event cloned.',
extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-kompomaatti:compos', args=(sel_event_id,)))
else:
clonecompoform = CloneCompoForm()
# Form handling
if request.method == "POST" and 'submit-compo' in request.POST:
# CHeck for permissions
if not request.user.has_perm('kompomaatti.add_compo'):
raise Http403
# Handle form
compoform = AdminCompoForm(request.POST)
if compoform.is_valid():
data = compoform.save(commit=False)
data.event_id = int(sel_event_id)
data.save()
logger.info('Compo "{}" added.'.format(data.name),
extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-kompomaatti:compos', args=(sel_event_id,)))
else:
compoform = AdminCompoForm()
# Render response
return admin_render(request, "admin_kompomaatti/compo_browse.html", {
'compos': compos,
'compoform': compoform,
'clonecompoform': clonecompoform,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def compo_edit(request, sel_event_id, compo_id):
# CHeck for permissions
if not request.user.has_perm('kompomaatti.change_compo'):
raise Http403
# Get compo
compo = get_object_or_404(Compo, pk=compo_id)
# Handle form
if request.method == "POST":
editform = AdminCompoForm(request.POST, instance=compo)
if editform.is_valid():
c = editform.save()
logger.info('Compo "{}" edited.'.format(c.name),
extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-kompomaatti:compos', args=(sel_event_id,)))
else:
editform = AdminCompoForm(instance=compo)
# Render response
return admin_render(request, "admin_kompomaatti/compo_edit.html", {
'compo': compo,
'editform': editform,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def compo_delete(request, sel_event_id, compo_id):
# CHeck for permissions
if not request.user.has_perm('kompomaatti.delete_compo'):
raise Http403
# Delete competition
try:
c = Compo.objects.get(pk=compo_id)
c.delete()
logger.info('Compo "{}" deleted.'.format(c.name),
extra={'user': request.user, 'event_id': sel_event_id})
except Compo.DoesNotExist:
pass
# Redirect
return HttpResponseRedirect(reverse('manage-kompomaatti:compos', args=(sel_event_id,)))
@staff_access_required
def entry_browse(request, sel_event_id):
# Get event
event = get_object_or_404(Event, pk=sel_event_id)
# Form handling
if request.method == "POST":
# CHeck for permissions
if not request.user.has_perm('kompomaatti.add_entry'):
raise Http403
# Handle form
entryform = AdminEntryAddForm(request.POST, request.FILES, event=event)
if entryform.is_valid():
e = entryform.save()
logger.info('Compo entry "{}" added.'.format(e.name),
extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-kompomaatti:entries', args=(sel_event_id,)))
else:
entryform = AdminEntryAddForm(event=event)
# Get Entries
compos = Compo.objects.filter(event=int(sel_event_id))
entries = Entry.objects.filter(compo__in=compos)
# Render response
return admin_render(request, "admin_kompomaatti/entry_browse.html", {
'entries': entries,
'entryform': entryform,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def entry_edit(request, sel_event_id, entry_id):
# CHeck for permissions
if not request.user.has_perm('kompomaatti.change_entry'):
raise Http403
# Check ID
entry = get_object_or_404(Entry, pk=entry_id)
# Get event
event = get_object_or_404(Event, pk=sel_event_id)
# Handle form
if request.method == "POST":
editform = AdminEntryEditForm(request.POST, request.FILES, instance=entry, event=event)
if editform.is_valid():
e = editform.save()
logger.info('Compo entry "{}" edited.'.format(e.name),
extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-kompomaatti:entries', args=(sel_event_id,)))
else:
editform = AdminEntryEditForm(instance=entry, event=event)
# Render response
return admin_render(request, "admin_kompomaatti/entry_edit.html", {
'entry': entry,
'editform': editform,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def entry_delete(request, sel_event_id, entry_id):
# CHeck for permissions
if not request.user.has_perm('kompomaatti.delete_entry'):
raise Http403
# Get entry
entry = get_object_or_404(Entry, pk=entry_id)
# Delete entry
entry.entryfile.delete()
if entry.sourcefile:
entry.sourcefile.delete()
if entry.imagefile_original:
entry.imagefile_original.delete()
entry.delete()
logger.info('Compo entry "{}" deleted.'.format(entry.name),
extra={'user': request.user, 'event_id': sel_event_id})
# Redirect
return HttpResponseRedirect(reverse('manage-kompomaatti:entries', args=(sel_event_id,)))
@staff_access_required
def generate_result_package(request, sel_event_id, compo_id):
tasks.rebuild_collection.delay(compo_id)
return HttpResponseRedirect(reverse('manage-kompomaatti:results', args=(sel_event_id,)))
@staff_access_required
def results(request, sel_event_id):
# Get compos. competitions
compos = Compo.objects.filter(event_id=int(sel_event_id))
competitions = Competition.objects.filter(event_id=int(sel_event_id))
# Get the entries
compo_results = {}
for compo in compos:
compo_results[compo] = entrysort.sort_by_score(Entry.objects.filter(compo=compo))
# Get competition participations
competition_results = {}
for competition in competitions:
rankby = '-score'
if competition.score_sort == 1:
rankby = 'score'
competition_results[competition.name] = \
CompetitionParticipation.objects.filter(competition=competition).order_by(rankby)
# Render response
return admin_render(request, "admin_kompomaatti/results.html", {
'compo_results': compo_results,
'competition_results': competition_results,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def ticket_votecodes(request, sel_event_id):
# Get tokens
tokens = TicketVoteCode.objects.filter(event_id=sel_event_id)
# Render response
return admin_render(request, "admin_kompomaatti/ticketvotecodes.html", {
'tokens': tokens,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def votecoderequests(request, sel_event_id):
# Get all requests
requests = VoteCodeRequest.objects.filter(event_id=int(sel_event_id,))
# Render response
return admin_render(request, "admin_kompomaatti/vcrequests.html", {
'requests': requests,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def votecoderequests_accept(request, sel_event_id, vcrid):
# CHeck for permissions
if not request.user.has_perm('kompomaatti.change_votecode'):
raise Http403
# Get the request and change status to accepted
vcr = get_object_or_404(VoteCodeRequest, pk=vcrid)
logger.info('Votecode request from "{}" accepted.'.format(vcr.user.username),
extra={'user': request.user, 'event_id': sel_event_id})
vcr.status = 1
vcr.save()
# Return to admin page
return HttpResponseRedirect(reverse('manage-kompomaatti:votecoderequests', args=(sel_event_id,)))
@staff_access_required
def votecoderequests_reject(request, sel_event_id, vcrid):
# CHeck for permissions
if not request.user.has_perm('kompomaatti.change_votecode'):
raise Http403
# Get the request and change status to accepted
vcr = get_object_or_404(VoteCodeRequest, pk=vcrid)
logger.info('Votecode request from "{}" rejected.'.format(vcr.user.username),
extra={'user': request.user, 'event_id': sel_event_id})
vcr.status = 2
vcr.save()
# Return to admin page
return HttpResponseRedirect(reverse('manage-kompomaatti:votecoderequests', args=(sel_event_id,)))
```
#### File: Instanssi/admin_screenshow/views.py
```python
from Instanssi.common.http import Http403
from Instanssi.common.auth import staff_access_required
from django.http import HttpResponseRedirect
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.urls import reverse
from Instanssi.admin_base.misc.custom_render import admin_render
from Instanssi.admin_screenshow.forms import *
import os
# Logging related
import logging
logger = logging.getLogger(__name__)
@staff_access_required
def index(request, sel_event_id):
return admin_render(request, "admin_screenshow/index.html", {
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def config(request, sel_event_id):
# Try to get configuration for event
conf = None
try:
conf = ScreenConfig.objects.get(event_id=sel_event_id)
except:
pass
# Handle post data
if request.method == 'POST':
# Check for permissions
if not request.user.has_perm('screenshow.change_screenconfig'):
raise Http403
# Handle form
configform = ScreenConfigForm(request.POST, instance=conf)
if configform.is_valid():
data = configform.save(commit=False)
data.event_id = sel_event_id
data.save()
logger.info('Screenshow configuration changed.',
extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-screenshow:config', args=(sel_event_id,)))
else:
configform = ScreenConfigForm(instance=conf)
# Dump template contents
return admin_render(request, "admin_screenshow/config.html", {
'selected_event_id': int(sel_event_id),
'configform': configform,
})
@staff_access_required
def playlist(request, sel_event_id):
# Check for form data
if request.method == 'POST':
# Check for permissions
if not request.user.has_perm('screenshow.add_playlistvideo'):
raise Http403
# Handle data
playlistform = PlaylistVideoForm(request.POST)
if playlistform.is_valid():
data = playlistform.save(commit=False)
data.event_id = sel_event_id
data.save()
logger.info('Video "{}" added to playlist.'.format(data.name),
extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-screenshow:playlist', args=(sel_event_id,)))
else:
playlistform = PlaylistVideoForm()
# Get messages
videos = PlaylistVideo.objects.filter(event_id=sel_event_id).order_by('-index')
# Dump template
return admin_render(request, "admin_screenshow/playlist.html", {
'selected_event_id': int(sel_event_id),
'videos': videos,
'playlistform': playlistform,
})
@staff_access_required
def playlist_edit(request, sel_event_id, video_id):
# Check for permissions
if not request.user.has_perm('screenshow.change_playlistvideo'):
raise Http403
# Get initial data
playlist = get_object_or_404(PlaylistVideo, pk=video_id)
# Check for form data
if request.method == 'POST':
playlistform = PlaylistVideoForm(request.POST, instance=playlist)
if playlistform.is_valid():
v = playlistform.save()
logger.info('Video "{}" edited on playlist.'.format(v.name),
extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-screenshow:playlist', args=(sel_event_id,)))
else:
playlistform = PlaylistVideoForm(instance=playlist)
# Dump template
return admin_render(request, "admin_screenshow/playlist_edit.html", {
'selected_event_id': int(sel_event_id),
'video_id': int(video_id),
'playlistform': playlistform,
})
@staff_access_required
def playlist_delete(request, sel_event_id, video_id):
# Check for permissions
if not request.user.has_perm('screenshow.delete_playlistvideo'):
raise Http403
# Attempt to delete
try:
v = PlaylistVideo.objects.get(pk=video_id)
v.delete()
logger.info('Video "{}" deleted from playlist.'.format(v.name),
extra={'user': request.user, 'event_id': sel_event_id})
except PlaylistVideo.DoesNotExist:
pass
# Dump template
return HttpResponseRedirect(reverse('manage-screenshow:playlist', args=(sel_event_id,)))
@staff_access_required
def ircmessages(request, sel_event_id):
# Get messages
messages = IRCMessage.objects.filter(event_id=sel_event_id)
# Dump template
return admin_render(request, "admin_screenshow/ircmessages.html", {
'selected_event_id': int(sel_event_id),
'messages': messages,
})
@staff_access_required
def ircmessage_edit(request, sel_event_id, message_id):
# Check for permissions
if not request.user.has_perm('screenshow.change_ircmessage'):
raise Http403
# Get initial data
message = get_object_or_404(IRCMessage, pk=message_id)
# Check for form data
if request.method == 'POST':
messageform = IRCMessageForm(request.POST, instance=message)
if messageform.is_valid():
messageform.save()
logger.info('IRC Message {} edited'.format(message.id),
extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-screenshow:ircmessages', args=(sel_event_id,)))
else:
messageform = IRCMessageForm(instance=message)
# Dump template
return admin_render(request, "admin_screenshow/ircmessage_edit.html", {
'selected_event_id': int(sel_event_id),
'message_id': int(message_id),
'messageform': messageform,
})
@staff_access_required
def ircmessage_delete(request, sel_event_id, message_id):
# Check for permissions
if not request.user.has_perm('screenshow.delete_ircmessage'):
raise Http403
# Attempt to delete
try:
IRCMessage.objects.get(pk=message_id).delete()
logger.info('IRC Message {} deleted.'.format(message_id),
extra={'user': request.user, 'event_id': sel_event_id})
except Message.DoesNotExist:
pass
# Dump template
return HttpResponseRedirect(reverse('manage-screenshow:ircmessages', args=(sel_event_id,)))
@staff_access_required
def messages(request, sel_event_id):
# Check for form data
if request.method == 'POST':
# Check for permissions
if not request.user.has_perm('screenshow.add_message'):
raise Http403
# Handle data
messageform = MessageForm(request.POST)
if messageform.is_valid():
data = messageform.save(commit=False)
data.event_id = sel_event_id
data.save()
logger.info('Message added.',
extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-screenshow:messages', args=(sel_event_id,)))
else:
messageform = MessageForm()
# Get messages
messages = Message.objects.filter(event_id=sel_event_id)
# Dump template
return admin_render(request, "admin_screenshow/messages.html", {
'selected_event_id': int(sel_event_id),
'messageform': messageform,
'messages': messages,
})
@staff_access_required
def message_edit(request, sel_event_id, message_id):
# Check for permissions
if not request.user.has_perm('screenshow.change_message'):
raise Http403
# Get initial data
message = get_object_or_404(Message, pk=message_id)
# Check for form data
if request.method == 'POST':
messageform = MessageForm(request.POST, instance=message)
if messageform.is_valid():
messageform.save()
logger.info('Message edited.',
extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-screenshow:messages', args=(sel_event_id,)))
else:
messageform = MessageForm(instance=message)
# Dump template
return admin_render(request, "admin_screenshow/message_edit.html", {
'selected_event_id': int(sel_event_id),
'message_id': int(message_id),
'messageform': messageform,
})
@staff_access_required
def message_delete(request, sel_event_id, message_id):
# Check for permissions
if not request.user.has_perm('screenshow.delete_message'):
raise Http403
# Attempt to delete
try:
Message.objects.get(pk=message_id).delete()
logger.info('Message deleted.',
extra={'user': request.user, 'event_id': sel_event_id})
except Message.DoesNotExist:
pass
# Dump template
return HttpResponseRedirect(reverse('manage-screenshow:messages', args=(sel_event_id,)))
@staff_access_required
def sponsors(request, sel_event_id):
# Check for form data
if request.method == 'POST':
# Check for permissions
if not request.user.has_perm('screenshow.add_sponsor'):
raise Http403
# Handle data
sponsorform = SponsorForm(request.POST, request.FILES)
if sponsorform.is_valid():
data = sponsorform.save(commit=False)
data.event_id = sel_event_id
data.save()
logger.info('Sponsor "{}" added.'.format(data.name),
extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-screenshow:sponsors', args=(sel_event_id,)))
else:
sponsorform = SponsorForm()
# Get sponsors
sponsors = Sponsor.objects.filter(event_id=sel_event_id)
# Dump template
return admin_render(request, "admin_screenshow/sponsors.html", {
'selected_event_id': int(sel_event_id),
'sponsorform': sponsorform,
'sponsors': sponsors,
})
@staff_access_required
def sponsor_edit(request, sel_event_id, sponsor_id):
# Check for permissions
if not request.user.has_perm('screenshow.change_sponsor'):
raise Http403
# Get initial data
sponsor = get_object_or_404(Sponsor, pk=sponsor_id)
# Check for form data
if request.method == 'POST':
sponsorform = SponsorForm(request.POST, request.FILES, instance=sponsor)
if sponsorform.is_valid():
s = sponsorform.save()
logger.info('Sponsor "{}" edited.'.format(s.name),
extra={'user': request.user, 'event_id': sel_event_id})
return HttpResponseRedirect(reverse('manage-screenshow:sponsors', args=(sel_event_id,)))
else:
sponsorform = SponsorForm(instance=sponsor)
# Dump template
return admin_render(request, "admin_screenshow/sponsor_edit.html", {
'selected_event_id': int(sel_event_id),
'sponsor_id': int(sponsor_id),
'sponsorform': sponsorform,
})
@staff_access_required
def sponsor_delete(request, sel_event_id, sponsor_id):
# Check for permissions
if not request.user.has_perm('screenshow.delete_sponsor'):
raise Http403
# Attempt to delete
try:
sponsor = Sponsor.objects.get(pk=sponsor_id)
full_name = os.path.join(settings.MEDIA_ROOT, sponsor.logo.name)
if sponsor.logo and os.path.exists(full_name):
sponsor.logo.delete()
sponsor.delete()
logger.info('Sponsor "{}" deleted.'.format(sponsor.name),
extra={'user': request.user, 'event_id': sel_event_id})
except Sponsor.DoesNotExist:
pass
# Dump template
return HttpResponseRedirect(reverse('manage-screenshow:sponsors', args=(sel_event_id,)))
```
#### File: Instanssi/admin_store/views.py
```python
from django.urls import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404
from django.template import loader
from django.forms import inlineformset_factory
from django.db.models import Count
from Instanssi.common.http import Http403
from Instanssi.common.auth import staff_access_required
from Instanssi.admin_base.misc.custom_render import admin_render
from Instanssi.admin_store.forms import StoreItemForm, TaItemExportForm, StoreItemVariantForm
from Instanssi.store.models import StoreItem, StoreItemVariant, StoreTransaction, TransactionItem
from Instanssi.kompomaatti.models import Event
# Logging related
import logging
logger = logging.getLogger(__name__)
@staff_access_required
def index(request):
return admin_render(request, "admin_store/index.html", {})
@staff_access_required
def export(request):
if request.method == 'POST':
form = TaItemExportForm(request.POST)
if form.is_valid():
return HttpResponseRedirect(reverse('manage-store:transactions_csv', args=(form.cleaned_data['event'],)))
else:
form = TaItemExportForm()
return admin_render(request, "admin_store/export.html", {'form': form})
@staff_access_required
def amounts(request):
item_tree = []
# TODO: This is a really quickly made thing; needs optimizing.
for event in Event.objects.iterator():
counts = TransactionItem.objects\
.filter(item__event=event)\
.exclude(transaction__time_paid=None)\
.values('item')\
.annotate(Count('item'))
if not counts:
continue
item_list = []
for c in counts:
if not c['item']:
continue
# Find item description
item = StoreItem.objects.get(pk=c['item'])
# Find available variants (if any) and count them
variants = TransactionItem.objects\
.filter(item=c['item']) \
.exclude(transaction__time_paid=None)\
.values('variant')\
.annotate(Count('variant'))
variant_list = []
for v in variants:
if not v['variant']:
continue
variant = StoreItemVariant.objects.get(pk=v['variant'])
variant_list.append({
'sold_variant': variant,
'count': v['variant__count']
})
# Add everything to a list for template
item_list.append({
'sold_item': item,
'count': c['item__count'],
'variants': variant_list,
})
# Add the event & item list to outgoing template data
item_tree.append({
'event': event,
'items': item_list
})
# Render response
return admin_render(request, "admin_store/amounts.html", {
'item_tree': item_tree
})
@staff_access_required
def items(request):
StoreItemFormSet = inlineformset_factory(
parent_model=StoreItem, model=StoreItemVariant, form=StoreItemVariantForm, extra=5)
# Handle form data
if request.method == 'POST':
if not request.user.has_perm('store.add_storeitem'):
raise Http403
item_form = StoreItemForm(request.POST, request.FILES)
variant_formset = StoreItemFormSet(request.POST, prefix="nested", instance=item_form.instance)
if item_form.is_valid() and variant_formset.is_valid():
item = item_form.save()
variant_formset.save()
logger.info('Store Item "{}" added.'.format(item.name), extra={'user': request.user})
return HttpResponseRedirect(reverse('manage-store:items'))
else:
item_form = StoreItemForm()
variant_formset = StoreItemFormSet(prefix="nested", instance=item_form.instance)
# Get items
m_items = StoreItem.objects.all()
# Render response
return admin_render(request, "admin_store/items.html", {
'items': m_items,
'item_form': item_form,
'variant_formset': variant_formset
})
@staff_access_required
def status(request):
if not request.user.has_perm('store.view_storetransaction'):
raise Http403
transactions = StoreTransaction.objects.all()
# Render response
return admin_render(request, "admin_store/status.html", {
'transactions': transactions,
})
@staff_access_required
def tis_csv(request, event_id):
if not request.user.has_perm('store.view_storetransaction'):
raise Http403
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="instanssi_store.csv"'
t = loader.get_template('admin_store/tis_csv.txt')
c = {
'data': TransactionItem.objects.filter(item__event=event_id, transaction__time_paid__isnull=False),
}
response.write(t.render(c))
return response
@staff_access_required
def tis(request):
if not request.user.has_perm('store.view_storetransaction'):
raise Http403
items = TransactionItem.objects.all()
# Render response
return admin_render(request, "admin_store/tis.html", {
'items': items,
})
@staff_access_required
def transaction_status(request, transaction_id):
if not request.user.has_perm('store.view_storetransaction'):
raise Http403
# Get transaction
transaction = get_object_or_404(StoreTransaction, pk=transaction_id)
# Get items
items = transaction.get_transaction_items()
# Render response
return admin_render(request, "admin_store/transactionstatus.html", {
'transaction_id': int(transaction_id),
'transaction': transaction,
'items': items,
})
@staff_access_required
def edit_item(request, item_id):
if not request.user.has_perm('store.change_storeitem'):
raise Http403
StoreItemFormSet = inlineformset_factory(
parent_model=StoreItem, model=StoreItemVariant, form=StoreItemVariantForm, extra=3)
# Get Item
item = get_object_or_404(StoreItem, pk=item_id)
# Handle form data
if request.method == 'POST':
variant_formset = StoreItemFormSet(request.POST, instance=item)
item_form = StoreItemForm(request.POST, request.FILES, instance=item)
if item_form.is_valid() and variant_formset.is_valid():
item_form.save()
variant_formset.save()
logger.info('Store Item "{}" edited.'.format(item.name), extra={'user': request.user})
return HttpResponseRedirect(reverse('manage-store:edit_item', args=(item.id,)))
else:
item_form = StoreItemForm(instance=item)
variant_formset = StoreItemFormSet(instance=item)
# Render response
return admin_render(request, "admin_store/itemedit.html", {
'item_form': item_form,
'variant_formset': variant_formset
})
@staff_access_required
def delete_item(request, item_id):
# Check for permissions
if not request.user.has_perm('store.delete_storeitem'):
raise Http403
# Delete entry
try:
item = StoreItem.objects.get(id=item_id)
if item.num_sold() == 0:
item.delete()
logger.info('Store Item "{}" deleted.'.format(item.name), extra={'user': request.user})
except StoreItem.DoesNotExist:
pass
return HttpResponseRedirect(reverse('manage-store:items'))
```
#### File: Instanssi/admin_upload/forms.py
```python
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Fieldset, ButtonHolder
from .models import UploadedFile
import os
from django.core.exceptions import ValidationError
class UploadForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(UploadForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Fieldset(
'Lataa',
'description',
'file',
ButtonHolder(
Submit('submit', 'Tallenna')
)
)
)
def field_format_ok(self, fname, allowed):
return os.path.splitext(self.cleaned_data[fname].name)[1][1:].lower() in allowed
def clean_file(self):
# Check format
allowed = ['png', 'jpg', 'gif', 'zip', 'rar', '7z', 'gz', 'tar', 'bz2', 'odt', 'odp', 'doc', 'docx', 'pdf',
'txt', 'ppt', 'pptx', 'xls', 'xlsx']
if not self.field_format_ok("file", allowed):
raise ValidationError('Tiedostotyyppi ei ole sallittu. Sallitut formaatit: {}.'.format(', '.join(allowed)))
# Return
return self.cleaned_data['file']
class Meta:
model = UploadedFile
fields = ('description', 'file')
```
#### File: Instanssi/api/serializers.py
```python
import logging
import os
from django.db import transaction
from django.utils import timezone
from django.contrib.auth.models import User
from rest_framework.serializers import SerializerMethodField, Serializer, EmailField,\
CharField, IntegerField, ChoiceField, BooleanField, ValidationError, ModelSerializer, RelatedField,\
ListField, PrimaryKeyRelatedField
from Instanssi.store.methods import PaymentMethod
from Instanssi.store.handlers import validate_item, validate_payment_method, create_store_transaction, \
TransactionException
from Instanssi.kompomaatti.models import Event, Competition, Compo, Entry, CompetitionParticipation, TicketVoteCode, \
VoteCodeRequest, VoteGroup
from Instanssi.ext_programme.models import ProgrammeEvent
from Instanssi.screenshow.models import NPSong, Sponsor, Message, IRCMessage
from Instanssi.store.models import StoreItem, StoreItemVariant, TransactionItem
logger = logging.getLogger(__name__)
class CompoForeignKey(PrimaryKeyRelatedField):
def get_queryset(self):
return Compo.objects.filter(active=True, event__name__startswith='Instanssi')
class CompetitionForeignKey(PrimaryKeyRelatedField):
def get_queryset(self):
return Competition.objects.filter(active=True, event__name__startswith='Instanssi')
class UserSerializer(ModelSerializer):
class Meta:
model = User
fields = ('id', 'first_name', 'last_name', 'email')
class EventSerializer(ModelSerializer):
class Meta:
model = Event
fields = ('id', 'name', 'date', 'mainurl')
class CompetitionSerializer(ModelSerializer):
class Meta:
model = Competition
fields = ('id', 'event', 'name', 'description', 'participation_end', 'start', 'end', 'score_type',
'score_sort', 'show_results')
extra_kwargs = {}
class CompetitionParticipationSerializer(ModelSerializer):
rank = SerializerMethodField()
score = SerializerMethodField()
disqualified = SerializerMethodField()
disqualified_reason = SerializerMethodField()
def get_disqualified_reason(self, obj):
if obj.competition.show_results:
return obj.disqualified_reason
return None
def get_disqualified(self, obj):
if obj.competition.show_results:
return obj.disqualified
return None
def get_rank(self, obj):
if obj.competition.show_results:
return obj.get_rank()
return None
def get_score(self, obj):
if obj.competition.show_results:
return obj.get_formatted_score()
return None
class Meta:
model = CompetitionParticipation
fields = ('id', 'competition', 'participant_name', 'score', 'rank', 'disqualified', 'disqualified_reason')
extra_kwargs = {}
class CompoSerializer(ModelSerializer):
class Meta:
model = Compo
fields = ('id', 'event', 'name', 'description', 'adding_end', 'editing_end', 'compo_start', 'voting_start',
'voting_end', 'max_source_size', 'max_entry_size', 'max_image_size', 'source_format_list',
'entry_format_list', 'image_format_list', 'show_voting_results', 'entry_view_type', 'is_votable',
'is_imagefile_allowed', 'is_imagefile_required')
extra_kwargs = {}
class CompoEntrySerializer(ModelSerializer):
entryfile_url = SerializerMethodField()
sourcefile_url = SerializerMethodField()
imagefile_original_url = SerializerMethodField()
imagefile_thumbnail_url = SerializerMethodField()
imagefile_medium_url = SerializerMethodField()
rank = SerializerMethodField()
score = SerializerMethodField()
disqualified = SerializerMethodField()
disqualified_reason = SerializerMethodField()
def get_entryfile_url(self, obj):
if obj.entryfile and (obj.compo.show_voting_results or obj.compo.has_voting_started):
return self.context['request'].build_absolute_uri(obj.entryfile.url)
return None
def get_sourcefile_url(self, obj):
if obj.sourcefile and (obj.compo.show_voting_results or obj.compo.has_voting_started):
return self.context['request'].build_absolute_uri(obj.sourcefile.url)
return None
def get_imagefile_original_url(self, obj):
if obj.imagefile_original:
return self.context['request'].build_absolute_uri(obj.imagefile_original.url)
return None
def get_imagefile_medium_url(self, obj):
if obj.imagefile_medium:
return self.context['request'].build_absolute_uri(obj.imagefile_medium.url)
return None
def get_imagefile_thumbnail_url(self, obj):
if obj.imagefile_thumbnail:
return self.context['request'].build_absolute_uri(obj.imagefile_thumbnail.url)
return None
def get_disqualified_reason(self, obj):
if obj.compo.has_voting_started():
return obj.disqualified_reason
return None
def get_disqualified(self, obj):
if obj.compo.has_voting_started():
return obj.disqualified
return None
def get_rank(self, obj):
if obj.compo.show_voting_results:
return obj.get_rank()
return None
def get_score(self, obj):
if obj.compo.show_voting_results:
return obj.get_score()
return None
class Meta:
model = Entry
fields = ('id', 'compo', 'name', 'description', 'creator', 'platform', 'entryfile_url', 'sourcefile_url',
'imagefile_original_url', 'imagefile_thumbnail_url', 'imagefile_medium_url', 'youtube_url',
'disqualified', 'disqualified_reason', 'score', 'rank')
extra_kwargs = {}
class UserCompetitionParticipationSerializer(ModelSerializer):
competition = CompetitionForeignKey()
def validate_competition(self, competition):
if not competition.active:
raise ValidationError("Kilpailu ei ole aktiivinen")
return competition
def validate(self, data):
competition = data.get('competition')
if not competition:
competition = self.instance.competition
# Check competition edits and additions
if not competition.is_participating_open():
raise ValidationError("Kilpailun osallistumisaika on päättynyt")
data = super(UserCompetitionParticipationSerializer, self).validate(data)
has_changed = self.instance and self.instance.competition.id != competition.id
if not self.instance or has_changed:
obj = CompetitionParticipation.objects.filter(
competition=competition, user=self.context['request'].user).first()
if obj:
raise ValidationError("Olet jo osallistunut tähän kilpailuun")
return data
class Meta:
model = CompetitionParticipation
fields = ('id', 'competition', 'participant_name')
extra_kwargs = {
'id': {'read_only': True},
}
class UserCompoEntrySerializer(ModelSerializer):
compo = CompoForeignKey()
entryfile_url = SerializerMethodField()
sourcefile_url = SerializerMethodField()
imagefile_original_url = SerializerMethodField()
imagefile_thumbnail_url = SerializerMethodField()
imagefile_medium_url = SerializerMethodField()
def get_entryfile_url(self, obj):
if obj.entryfile and (obj.compo.show_voting_results or obj.compo.has_voting_started):
return self.context['request'].build_absolute_uri(obj.entryfile.url)
return None
def get_sourcefile_url(self, obj):
if obj.sourcefile and (obj.compo.show_voting_results or obj.compo.has_voting_started):
return self.context['request'].build_absolute_uri(obj.sourcefile.url)
return None
def get_imagefile_original_url(self, obj):
if obj.imagefile_original:
return self.context['request'].build_absolute_uri(obj.imagefile_original.url)
return None
def get_imagefile_medium_url(self, obj):
if obj.imagefile_medium:
return self.context['request'].build_absolute_uri(obj.imagefile_medium.url)
return None
def get_imagefile_thumbnail_url(self, obj):
if obj.imagefile_thumbnail:
return self.context['request'].build_absolute_uri(obj.imagefile_thumbnail.url)
return None
def validate_compo(self, compo):
if not compo.active:
raise ValidationError("Kompoa ei ole olemassa")
return compo
def _validate_file(self, file, accept_formats, accept_formats_readable, max_size, max_readable_size):
errors = []
# Make sure the file size is within limits
if file.size > max_size:
errors.append("Maksimi sallittu tiedostokoko on {}".format(max_readable_size))
# Make sure the file extension seems correct
ext = os.path.splitext(file.name)[1][1:]
if ext.lower() not in accept_formats:
errors.append("Sallitut tiedostotyypit ovat {}".format(accept_formats_readable))
return errors
def validate(self, data):
data = super(UserCompoEntrySerializer, self).validate(data)
compo = data.get('compo')
if not compo:
compo = self.instance.compo
# Check adding & editing time
if not self.instance and not compo.is_adding_open():
raise ValidationError("Kompon lisäysaika on päättynyt")
if self.instance and not compo.is_editing_open():
raise ValidationError("Kompon muokkausaika on päättynyt")
# Aggro if image field is missing but required
if not data.get('imagefile_original') and compo.is_imagefile_required:
raise ValidationError({'imagefile_original': ["Kuvatiedosto tarvitaan tälle kompolle"]})
# Also aggro if image field is supplied but not allowed
if data.get('imagefile_original') and not compo.is_imagefile_allowed:
raise ValidationError({'imagefile_original': ["Kuvatiedostoa ei tarvita tälle kompolle"]})
# Required validation function arguments for each field
errors = {}
check_files_on = {
'entryfile': (
compo.entry_format_list, compo.readable_entry_formats,
compo.max_entry_size, compo.readable_max_entry_size
),
'sourcefile': (
compo.source_format_list, compo.readable_source_formats,
compo.max_source_size, compo.readable_max_source_size
),
'imagefile_original': (
compo.image_format_list, compo.readable_image_formats,
compo.max_image_size, compo.readable_max_image_size
)
}
# Validate each file, and aggregate all errors to a nice dict of lists. This way we can return all possible
# errors at once instead of user having to try again and again.
for key, args in check_files_on.items():
file = data.get(key)
if not file:
continue
field_errors = self._validate_file(file, *args)
if field_errors:
errors[key] = field_errors
if errors:
raise ValidationError(errors)
return data
@staticmethod
def _maybe_copy_entry_to_image(instance):
""" If necessary, copy entryfile to imagefile for thumbnail data """
if instance.compo.is_imagefile_copied:
name = str('th_' + os.path.basename(instance.entryfile.name))
instance.imagefile_original.save(name, instance.entryfile)
def create(self, validated_data):
instance = super(UserCompoEntrySerializer, self).create(validated_data)
self._maybe_copy_entry_to_image(instance)
return instance
def update(self, instance, validated_data):
instance = super(UserCompoEntrySerializer, self).update(instance, validated_data)
self._maybe_copy_entry_to_image(instance)
return instance
class Meta:
model = Entry
fields = ('id', 'compo', 'name', 'description', 'creator', 'platform', 'entryfile', 'imagefile_original', 'sourcefile',
'entryfile_url', 'sourcefile_url', 'imagefile_original_url', 'imagefile_thumbnail_url',
'imagefile_medium_url', 'disqualified', 'disqualified_reason',)
extra_kwargs = {
'id': {'read_only': True},
'entryfile_url': {'read_only': True},
'sourcefile_url': {'read_only': True},
'imagefile_original_url': {'read_only': True},
'imagefile_thumbnail_url': {'read_only': True},
'imagefile_medium_url': {'read_only': True},
'disqualified': {'read_only': True},
'disqualified_reason': {'read_only': True},
'entryfile': {'write_only': True, 'required': True},
'sourcefile': {'write_only': True},
'imagefile_original': {'write_only': True}
}
class TicketVoteCodeSerializer(ModelSerializer):
ticket_key = CharField(min_length=8, trim_whitespace=True, source='key')
def validate(self, data):
data = super(TicketVoteCodeSerializer, self).validate(data)
obj = TicketVoteCode.objects.filter(event=data['event'],
associated_to=self.context['request'].user).first()
if obj:
raise ValidationError("Äänestyskoodi on jo hankittu")
# Check if key is already used, return error if it is
key = data['key']
try:
TicketVoteCode.objects.get(event=data['event'], ticket__key__startswith=key)
raise ValidationError({'ticket_key': ['Lippuavain on jo käytössä!']})
except TicketVoteCode.DoesNotExist:
pass
# Check if key exists at all
try:
TransactionItem.objects.get(
item__event=data['event'], # Must match event
item__is_ticket=True, # Must be ticket
key__startswith=key, # Must start with inserted code
transaction__time_paid__isnull=False) # Must be paid
except TransactionItem.DoesNotExist:
raise ValidationError({'ticket_key': ['Pyydettyä lippuavainta ei ole olemassa!']})
return data
@transaction.atomic
def create(self, validated_data):
ticket_key = validated_data.pop('key')
instance = super(TicketVoteCodeSerializer, self).create(validated_data)
instance.ticket = TransactionItem.objects.get(
item__event=validated_data['event'],
item__is_ticket=True,
key__startswith=ticket_key)
instance.time = timezone.now()
instance.save()
return instance
class Meta:
model = TicketVoteCode
fields = ('id', 'event', 'time', 'ticket_key')
extra_kwargs = {
'event': {'required': True},
'time': {'read_only': True}
}
class VoteCodeRequestSerializer(ModelSerializer):
def validate(self, data):
event = data.get('event')
if not event:
event = self.instance.event
data = super(VoteCodeRequestSerializer, self).validate(data)
# If content has changed or is new, make sure to test for uniqueness
has_changed = self.instance and self.instance.event.id != event.id
if not self.instance or has_changed:
obj = VoteCodeRequest.objects.filter(
event=event, user=self.context['request'].user).first()
if obj:
raise ValidationError("Äänestyskoodipyyntö on jo olemassa")
return data
class Meta:
model = VoteCodeRequest
fields = ('id', 'event', 'text', 'status')
extra_kwargs = {
'event': {'required': True},
'text': {'required': True},
'status': {'read_only': True},
}
class VoteGroupSerializer(ModelSerializer):
entries = ListField(
min_length=1,
child=PrimaryKeyRelatedField(
queryset=Entry.objects.filter(
compo__active=True,
disqualified=False)))
def validate_entries(self, entries):
# Fail if not unique entries
ids = [entry.id for entry in entries]
if len(ids) > len(set(ids)):
raise ValidationError("Voit äänestää entryä vain kerran")
return entries
def validate(self, data):
data = super(VoteGroupSerializer, self).validate(data)
compo = data['compo']
entries = data['entries']
user = self.context['request'].user
# Make sure compo voting is open
if not compo.is_voting_open():
raise ValidationError("Kompon äänestysaika ei ole voimassa")
# Make sure user has rights to vote
try:
TicketVoteCode.objects.get(associated_to=user, event=compo.event)
except TicketVoteCode.DoesNotExist:
try:
VoteCodeRequest.objects.get(user=user, event=compo.event, status=1)
except VoteCodeRequest.DoesNotExist:
raise ValidationError("Äänestysoikeus puuttuu")
# Make sure entries belong to the requested compo
for entry in entries:
if entry.compo.id != compo.id:
raise ValidationError({'entries': ["Entry '{}' ei kuulu kompoon '{}'".format(entry, compo)]})
return data
@transaction.atomic
def create(self, validated_data):
entries = validated_data.pop('entries')
compo = validated_data['compo']
user = validated_data['user']
# Delete old entries (if any) and add new ones
group = VoteGroup.objects.filter(compo=compo, user=user).first()
if group:
group.delete_votes()
else:
group = super(VoteGroupSerializer, self).create(validated_data)
# Add new voted entries
group.create_votes(entries)
# That's that. Return the group.
return group
class Meta:
model = VoteGroup
fields = ('compo', 'entries',)
extra_kwargs = {
'compo': {'required': True},
'entries': {'required': True},
}
class SongSerializer(ModelSerializer):
class Meta:
model = NPSong
fields = ('id', 'event', 'title', 'artist', 'time', 'state')
extra_kwargs = {
'state': {'read_only': True},
'time': {'read_only': True},
'id': {'read_only': True},
}
def create(self, validated_data):
# Set old playing songs to stopped
NPSong.objects.filter(event=validated_data['event'], state=0).update(state=1)
# Add new song, set state to playing
song = NPSong(**validated_data)
song.state = 0
song.time = timezone.now()
song.save()
return song
class ProgrammeEventSerializer(ModelSerializer):
class Meta:
model = ProgrammeEvent
fields = ('id', 'event', 'start', 'end', 'description', 'title', 'presenters', 'presenters_titles',
'place')
extra_kwargs = {}
class SponsorSerializer(ModelSerializer):
logo_url = SerializerMethodField()
logo_scaled_url = SerializerMethodField()
def get_logo_url(self, obj):
return self.context['request'].build_absolute_uri(obj.logo.url)
def get_logo_scaled_url(self, obj):
return self.context['request'].build_absolute_uri(obj.logo_scaled.url)
class Meta:
model = Sponsor
fields = ('id', 'event', 'name', 'logo_url', 'logo_scaled_url')
extra_kwargs = {}
class MessageSerializer(ModelSerializer):
class Meta:
model = Message
fields = ('id', 'event', 'show_start', 'show_end', 'text')
extra_kwargs = {}
class IRCMessageSerializer(ModelSerializer):
class Meta:
model = IRCMessage
fields = ('id', 'event', 'date', 'nick', 'message')
extra_kwargs = {}
class StoreItemVariantSerializer(ModelSerializer):
class Meta:
model = StoreItemVariant
fields = ('id', 'name')
class StoreItemSerializer(ModelSerializer):
imagefile_original_url = SerializerMethodField()
imagefile_thumbnail_url = SerializerMethodField()
discount_factor = SerializerMethodField()
variants = StoreItemVariantSerializer(many=True)
def get_imagefile_original_url(self, obj):
if not obj.imagefile_original:
return None
return self.context['request'].build_absolute_uri(obj.imagefile_original.url)
def get_imagefile_thumbnail_url(self, obj):
if not obj.imagefile_thumbnail:
return None
return self.context['request'].build_absolute_uri(obj.imagefile_thumbnail.url)
def get_discount_factor(self, obj):
return obj.get_discount_factor()
class Meta:
model = StoreItem
fields = ('id', 'event', 'name', 'description', 'price', 'max', 'available', 'imagefile_original_url',
'imagefile_thumbnail_url', 'max_per_order', 'sort_index', 'discount_amount', 'discount_percentage',
'is_discount_available', 'discount_factor', 'num_available', 'variants')
extra_kwargs = {}
class StoreTransactionItemSerializer(Serializer):
item_id = IntegerField()
variant_id = IntegerField(allow_null=True)
amount = IntegerField(min_value=1)
def validate(self, data):
data = super(StoreTransactionItemSerializer, self).validate(data)
try:
validate_item(data)
except TransactionException as e:
raise ValidationError(str(e))
return data
class StoreTransactionSerializer(Serializer):
first_name = CharField(max_length=64)
last_name = CharField(max_length=64)
company = CharField(allow_blank=True, max_length=128)
email = EmailField(max_length=255)
telephone = CharField(allow_blank=True, max_length=64)
mobile = CharField(allow_blank=True, max_length=64)
street = CharField(max_length=128)
postal_code = CharField(max_length=16)
city = CharField(max_length=64)
country = CharField(max_length=2)
information = CharField(allow_blank=True, max_length=1024)
payment_method = ChoiceField(choices=[e.value for e in PaymentMethod])
read_terms = BooleanField()
discount_key = CharField(allow_blank=True, required=False, max_length=32)
items = StoreTransactionItemSerializer(many=True, required=True)
save = BooleanField(default=False)
def validate_read_terms(self, value):
if not value:
raise ValidationError("Käyttöehdot tulee hyväksyä ennen kuin tilausta voidaan jatkaa")
return value
def validate_items(self, value):
if not value:
raise ValidationError("Ostoskorissa on oltava vähintään yksi tuote")
serializer = StoreTransactionItemSerializer(data=value, many=True)
serializer.is_valid(raise_exception=True)
return value
def validate(self, data):
data = super(StoreTransactionSerializer, self).validate(data)
try:
validate_payment_method(data['items'], PaymentMethod(data['payment_method']))
except TransactionException as e:
raise ValidationError(str(e))
return data
def create(self, validated_data):
return create_store_transaction(validated_data)
```
#### File: Instanssi/arkisto/models.py
```python
from django.db import models
from Instanssi.kompomaatti.models import Event
class OtherVideoCategory(models.Model):
event = models.ForeignKey(Event, verbose_name='Tapahtuma', on_delete=models.PROTECT)
name = models.CharField('Nimi', max_length=64, help_text='Kategorian nimi')
def __str__(self):
return self.name
class Meta:
verbose_name = "videokategoria"
verbose_name_plural = "videokategoriat"
class OtherVideo(models.Model):
category = models.ForeignKey(OtherVideoCategory, verbose_name='Kategoria', on_delete=models.CASCADE)
name = models.CharField('Nimi', max_length=64, help_text='Videon nimi.')
description = models.TextField('Kuvaus', help_text='Videon kuvaus.')
youtube_url = models.URLField('Youtube URL', help_text="Linkki teoksen Youtube-versioon.", blank=True)
def __str__(self):
return self.name
class Meta:
verbose_name = "muu video"
verbose_name_plural = "muut videot"
```
#### File: Instanssi/common/auth.py
```python
from django.http import HttpResponseRedirect
from Instanssi.common.http import Http403
from django.urls import reverse
def user_access_required(view_func):
def _checklogin(request, *args, **kwargs):
if request.user.is_authenticated and request.user.is_active:
return view_func(request, *args, **kwargs)
return HttpResponseRedirect(reverse('users:login')+'?next='+request.get_full_path())
return _checklogin
def staff_access_required(view_func):
def _checklogin(request, *args, **kwargs):
if request.user.is_authenticated and request.user.is_active:
if request.user.is_staff:
return view_func(request, *args, **kwargs)
raise Http403
return HttpResponseRedirect(reverse('users:login')+'?next='+request.get_full_path())
return _checklogin
def su_access_required(view_func):
def _checklogin(request, *args, **kwargs):
if request.user.is_authenticated and request.user.is_active:
if request.user.is_staff and request.user.is_superuser:
return view_func(request, *args, **kwargs)
raise Http403
return HttpResponseRedirect(reverse('users:login')+'?next='+request.get_full_path())
return _checklogin
def infodesk_access_required(view_func):
def _checklogin(request, *args, **kwargs):
if request.user.is_authenticated and request.user.is_active:
if request.user.has_perm('store.change_storetransaction'):
return view_func(request, *args, **kwargs)
raise Http403
return HttpResponseRedirect(reverse('users:login')+'?next='+request.get_full_path())
return _checklogin
```
#### File: Instanssi/dblog/handlers.py
```python
from logging import Handler
class DBLogHandler(Handler, object):
def __init__(self):
super(DBLogHandler, self).__init__()
def emit(self, record):
from .models import DBLogEntry as _LogEntry
entry = _LogEntry()
entry.level = record.levelname
entry.message = self.format(record)
entry.module = record.name
try:
entry.event = record.event
except:
try:
entry.event_id = record.event_id
except:
pass
try:
entry.user = record.user
except:
pass
entry.save()
```
#### File: Instanssi/dblog/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from Instanssi.kompomaatti.models import Event
class DBLogEntry(models.Model):
user = models.ForeignKey(User, blank=True, null=True, on_delete=models.SET_NULL)
event = models.ForeignKey(Event, blank=True, null=True, on_delete=models.PROTECT)
date = models.DateTimeField(auto_now_add=True)
module = models.CharField(max_length=64, blank=True)
level = models.CharField(max_length=10)
message = models.TextField()
def __str__(self):
if len(self.message) > 64:
return '{} ...'.format(self.message[:64])
else:
return self.message
class Meta:
verbose_name = "lokimerkintä"
verbose_name_plural = "lokimerkinnät"
```
#### File: kompomaatti/misc/sizeformat.py
```python
def sizeformat(size):
kb = 1024
mb = kb * 1024
gb = mb * 1024
if size > gb:
return '{} Gt'.format(round(size/gb, 2))
if size > mb:
return '{} Mt'.format(round(size/mb, 2))
if size > kb:
return '{} Kt'.format(round(size/kb, 2))
return '{} t'.format(size)
```
#### File: kompomaatti/misc/time_formatting.py
```python
from . import awesometime
def compo_times_formatter(compo):
compo.compo_time = awesometime.format_single(compo.compo_start)
compo.adding_time = awesometime.format_single(compo.adding_end)
compo.editing_time = awesometime.format_single(compo.editing_end)
compo.voting_time = awesometime.format_between(compo.voting_start, compo.voting_end)
return compo
def competition_times_formatter(competition):
competition.start_time = awesometime.format_single(competition.start)
competition.participation_end_time = awesometime.format_single(competition.participation_end)
return competition
```
#### File: kompomaatti/templatetags/kompomaatti_base_tags.py
```python
from django import template
from Instanssi.kompomaatti.models import Compo, Competition, Event
register = template.Library()
@register.inclusion_tag('kompomaatti/tags/compo_nav_items.html')
def render_base_compos_nav(event_id):
return {
'event_id': event_id,
'compos': Compo.objects.filter(active=True, event_id=event_id)
}
@register.inclusion_tag('kompomaatti/tags/competition_nav_items.html')
def render_base_competitions_nav(event_id):
return {
'event_id': event_id,
'competitions': Competition.objects.filter(active=True, event_id=event_id)
}
@register.inclusion_tag('kompomaatti/tags/count.html')
def render_base_compos_count(event_id):
return {'count': Compo.objects.filter(active=True, event_id=event_id).count()}
@register.inclusion_tag('kompomaatti/tags/count.html')
def render_base_competitions_count(event_id):
return {'count': Competition.objects.filter(active=True, event_id=event_id).count()}
@register.simple_tag
def event_name(event_id):
try:
event = Event.objects.get(pk=event_id)
return event.name
except Event.DoesNotExist:
pass
return ''
```
#### File: Instanssi/kompomaatti/views.py
```python
from Instanssi.common.http import Http403
from Instanssi.common.auth import user_access_required
from Instanssi.common.rest import rest_api, RestResponse
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.urls import reverse
from django.utils import timezone
from Instanssi.kompomaatti.forms import VoteCodeRequestForm, ParticipationForm,\
EntryForm, TicketVoteCodeAssocForm
from Instanssi.kompomaatti.models import Event, VoteCodeRequest, TicketVoteCode, Compo, Entry,\
Vote, CompetitionParticipation, Competition, Profile, VoteGroup
from Instanssi.kompomaatti.misc.time_formatting import compo_times_formatter, competition_times_formatter
from Instanssi.kompomaatti.misc import awesometime, entrysort
from Instanssi.kompomaatti.misc.events import get_upcoming
from Instanssi.store.models import TransactionItem
def eventselect(request):
try:
latest_event = Event.objects.latest('id')
except Event.DoesNotExist:
return render(request, 'kompomaatti/event_select.html', {})
return HttpResponseRedirect(reverse('km:index', args=(latest_event.pk,)))
def index(request, event_id):
event = get_object_or_404(Event, pk=event_id)
# Add urls and formatted timestamps to event list
events = []
for event in get_upcoming(event):
event['formatted_time'] = awesometime.format_single(event['date'])
if event['type'] == 1:
event['url'] = reverse('km:compo', args=(event_id, event['id'],))
elif event['type'] == 2:
event['url'] = reverse('km:competition', args=(event_id, event['id'],))
else:
event['url'] = None
# Add to list
events.append(event)
# Check if user has an associated vote code
votecode_associated = False
if request.user.is_authenticated:
# See if ticket is used as votecode
try:
TicketVoteCode.objects.get(event=event_id, associated_to=request.user)
votecode_associated = True
except TicketVoteCode.DoesNotExist:
pass
# See if votecode request is accepted
try:
VoteCodeRequest.objects.get(event=event_id, user=request.user, status=1)
votecode_associated = True
except VoteCodeRequest.DoesNotExist:
pass
else:
votecode_associated = True
# Get compos the user has not yet voted on
not_voted_on = []
if request.user.is_active and request.user.is_authenticated and votecode_associated:
for compo in Compo.objects.filter(event=event_id, active=True):
if compo.is_voting_open():
if Vote.objects.filter(user=request.user, compo=compo).count() == 0:
not_voted_on.append(compo)
# Has profile already been checked and saved
profile_checked = False
if request.user.is_authenticated and Profile.objects.filter(user=request.user).exists():
profile_checked = True
# All done, dump template
return render(request, 'kompomaatti/index.html', {
'sel_event_id': int(event_id),
'events': events,
'not_voted_on': not_voted_on,
'votecode_associated': votecode_associated,
'profile_checked': profile_checked
})
def compos(request, event_id):
# Get compos, format times
compo_list = []
for compo in Compo.objects.filter(active=True, event_id=int(event_id)):
compo_list.append(compo_times_formatter(compo))
# Dump the template
return render(request, 'kompomaatti/compos.html', {
'sel_event_id': int(event_id),
'compos': compo_list,
})
def compo_details(request, event_id, compo_id):
# Get compo
compo = compo_times_formatter(get_object_or_404(Compo, pk=compo_id, active=True, event=event_id))
# Check if user may vote (voting open, user has code)
can_vote = False
if request.user.is_active and request.user.is_authenticated:
# See if ticket is used as votecode
try:
TicketVoteCode.objects.get(associated_to=request.user, event=event_id)
can_vote = True
except TicketVoteCode.DoesNotExist:
pass
if not can_vote:
try:
VoteCodeRequest.objects.get(user=request.user, event=event_id, status=1)
can_vote = True
except VoteCodeRequest.DoesNotExist:
pass
# Handle entry adding
if request.method == 'POST' and compo.is_adding_open():
# Make sure user is authenticated
if not request.user.is_active or not request.user.is_authenticated:
raise Http403
# Handle data
entryform = EntryForm(request.POST, request.FILES, compo=compo)
if entryform.is_valid():
entry = entryform.save(commit=False)
entry.user = request.user
entry.compo = compo
entry.save()
return HttpResponseRedirect(reverse('km:compo', args=(event_id, compo_id,)))
else:
entryform = EntryForm(compo=compo)
# Get entries, and only show them if voting has started
# (only show results if it has been allowed in model)
all_entries = []
if compo.has_voting_started:
if compo.show_voting_results:
all_entries = entrysort.sort_by_score(Entry.objects.filter(compo=compo))
else:
all_entries = Entry.objects.filter(compo=compo).order_by('name')
# Stuff for users that have logged in
my_entries = []
has_voted = False
if request.user.is_active and request.user.is_authenticated:
# Get all entries added by the user
my_entries = Entry.objects.filter(compo=compo, user=request.user)
# Check if user has already voted
if Vote.objects.filter(user=request.user, compo=compo).count() > 0:
has_voted = True
# Dump template
return render(request, 'kompomaatti/compo_details.html', {
'sel_event_id': int(event_id),
'compo': compo,
'entryform': entryform,
'can_vote': can_vote,
'all_entries': all_entries,
'my_entries': my_entries,
'has_voted': has_voted,
})
@user_access_required
def compo_vote(request, event_id, compo_id):
# Make sure the user has an active votecode or ticket votecode
can_vote = False
try:
TicketVoteCode.objects.get(associated_to=request.user, event=event_id)
can_vote = True
except TicketVoteCode.DoesNotExist:
pass
if not can_vote:
try:
VoteCodeRequest.objects.get(user=request.user, event=event_id, status=1)
can_vote = True
except VoteCodeRequest.DoesNotExist:
pass
if not can_vote:
raise Http403
# Get compo
compo = get_object_or_404(Compo, pk=int(compo_id))
# Make sure voting is open
if not compo.is_voting_open():
raise Http403
# Get votes cast by user
votes = Vote.objects.filter(user=request.user, compo=compo).order_by('rank')
# Check if user has already voted
has_voted = False
if votes.count() > 0:
has_voted = True
# Check if we have data!
if request.method == 'POST':
# Get as list, convert to ints
results = []
_results = request.POST.getlist('results[]')
for result in _results:
results.append(int(result))
# Make sure we have right amount of entries (more than 0)
if len(results) < 1:
return HttpResponse("On äänestettävä vähintään yhtä entryä.")
# Make sure there are no id's twice
_checked = []
for id in results:
if id in _checked:
return HttpResponse("Syötevirhe!")
else:
_checked.append(id)
# See that all id's are entries belonging to this compo
entries = []
try:
for entry_id in results:
entry = Entry.objects.get(compo=compo, disqualified=False, id=entry_id)
entries.append(entry)
except Entry.DoesNotExist:
return HttpResponse("Syötevirhe")
# Delete old entries (if any) and add new ones
group = VoteGroup.objects.filter(compo=compo, user=request.user).first()
if group:
group.delete_votes()
else:
group = VoteGroup.objects.create(
user=request.user,
compo=compo
)
# Add new voted entries
group.create_votes(entries)
# Return success message
return HttpResponse("0")
# Get entries. If user hasn't voted yet, make sure the entries are in random order to minimize bias
# If user has already voted, sort entries in previously voted order.
nvoted_entries = []
voted_entries = []
if has_voted:
# Get voted entries. Add to "voted" list
for vote in votes:
if not vote.entry.disqualified:
voted_entries.append(vote.entry)
# Get all entries
_nvoted_entries = Entry.objects.filter(compo=compo, disqualified=False).order_by('?')
for entry in _nvoted_entries:
if entry not in voted_entries:
nvoted_entries.append(entry)
else:
nvoted_entries = Entry.objects.filter(compo=compo, disqualified=False).order_by('?')
# Dump template
return render(request, 'kompomaatti/compo_vote.html', {
'sel_event_id': int(event_id),
'compo': compo,
'voted_entries': voted_entries,
'nvoted_entries': nvoted_entries,
'has_voted': has_voted,
})
@user_access_required
def compoentry_edit(request, event_id, compo_id, entry_id):
# Get compo
compo = get_object_or_404(Compo, pk=int(compo_id))
# Check if user is allowed to edit
if timezone.now() >= compo.editing_end:
raise Http403
# Get entry (make sure the user owns it, too)
entry = get_object_or_404(Entry, pk=int(entry_id), compo=compo, user=request.user)
# Handle entry adding
if request.method == 'POST':
entryform = EntryForm(request.POST, request.FILES, instance=entry, compo=compo)
if entryform.is_valid():
entryform.save()
return HttpResponseRedirect(reverse('km:compo', args=(event_id, compo_id,)))
else:
entryform = EntryForm(instance=entry, compo=compo)
# Dump template
return render(request, 'kompomaatti/entry_edit.html', {
'sel_event_id': int(event_id),
'compo': compo,
'entry': entry,
'entryform': entryform,
})
@user_access_required
def compoentry_delete(request, event_id, compo_id, entry_id):
# Get compo
compo = get_object_or_404(Compo, pk=int(compo_id))
# Check if user is allowed to edit
if timezone.now() >= compo.adding_end:
raise Http403
# Get entry (make sure the user owns it, too)
entry = get_object_or_404(Entry, pk=int(entry_id), compo=compo, user=request.user)
# Delete entry
entry.delete()
# Redirect
return HttpResponseRedirect(reverse('km:compo', args=(event_id, compo_id,)))
def competitions(request, event_id):
# Get competitions
competitions = []
for competition in Competition.objects.filter(active=True, event_id=int(event_id)):
competitions.append(competition_times_formatter(competition))
# Dump the template
return render(request, 'kompomaatti/competitions.html', {
'sel_event_id': int(event_id),
'competitions': competitions,
})
def competition_details(request, event_id, competition_id):
# Get competition
competition = competition_times_formatter(get_object_or_404(Competition, pk=int(competition_id), active=True, event_id=int(event_id)))
# Check if user can participate (deadline not caught yet)
can_participate = False
if timezone.now() < competition.participation_end:
can_participate = True
# Handle signup form
if request.method == 'POST' and can_participate:
# Make sure user is authenticated
if not request.user.is_active or not request.user.is_authenticated:
raise Http403
# Handle post data
participationform = ParticipationForm(request.POST)
if participationform.is_valid():
p = participationform.save(commit=False)
p.competition = competition
p.user = request.user
p.save()
return HttpResponseRedirect(reverse('km:competition', args=(event_id, competition_id,)))
else:
participationform = ParticipationForm()
# Get all participants
participants = CompetitionParticipation.objects.filter(competition=competition)
# Check if user has participated
signed_up = False
participation = None
if request.user.is_active and request.user.is_authenticated:
try:
participation = CompetitionParticipation.objects.get(competition=competition, user=request.user)
signed_up = True
except CompetitionParticipation.DoesNotExist:
pass
# All done, dump template
return render(request, 'kompomaatti/competition_details.html', {
'sel_event_id': int(event_id),
'competition': competition,
'participation': participation,
'signed_up': signed_up,
'can_participate': can_participate,
'participationform': participationform,
'participants': participants,
})
@user_access_required
def competition_signout(request, event_id, competition_id):
# Get competition
competition = get_object_or_404(Competition, pk=int(competition_id))
# Check if user is still allowed to sign up
if timezone.now() >= competition.participation_end:
raise Http403
# Delete participation
try:
CompetitionParticipation.objects.get(competition=competition, user=request.user).delete()
except CompetitionParticipation.DoesNotExist:
pass
# Redirect
return HttpResponseRedirect(reverse('km:competition', args=(event_id, competition_id,)))
def entry_details(request, event_id, compo_id, entry_id):
# Get compo
compo = get_object_or_404(Compo, pk=int(compo_id))
# Make sure voting has started before allowing this page to be shown
if timezone.now() < compo.voting_start:
raise Http404
# Get entry
entry = get_object_or_404(Entry, pk=int(entry_id), compo=compo)
# Render
return render(request, 'kompomaatti/entry_details.html', {
'sel_event_id': int(event_id),
'entry': entry,
'compo': compo,
})
@user_access_required
@rest_api
def validate_votecode_api(request, event_id, vote_code):
event = get_object_or_404(Event, pk=event_id)
# Make sure the key length is at least 8 chars before doing anything
if len(vote_code) < 8:
return RestResponse(code=403, error_text='Lippuavain liian lyhyt!')
# Check if key is already used, return error if it is
try:
TicketVoteCode.objects.get(event=event, ticket__key__startswith=vote_code)
return RestResponse(code=403, error_text='Lippuavain on jo käytössä!')
except TicketVoteCode.DoesNotExist:
pass
# Check if key exists
try:
TransactionItem.objects.get(item__event=event, item__is_ticket=True, key__startswith=vote_code)
except TransactionItem.DoesNotExist:
return RestResponse(code=403, error_text='Lippuavainta ei ole olemassa!')
# Everything done. Return default response with code 200 and no error text.
return RestResponse({})
@user_access_required
def votecode(request, event_id):
# Get event
event = get_object_or_404(Event, pk=int(event_id))
# Check if user has the right to vote via separate vote code
reserved_code = None
can_vote = False
votecode_type = None
# Check if user has the right to vote via ticket
try:
ticket_votecode = TicketVoteCode.objects.get(event=event, associated_to=request.user)
reserved_code = ticket_votecode.ticket.key
can_vote = True
votecode_type = "ticket"
except TicketVoteCode.DoesNotExist:
pass
# Check if request for vote code has been made
request_made = False
request_failed = False
try:
vcr = VoteCodeRequest.objects.get(event=event, user=request.user)
if vcr.status == 0:
request_made = True
elif vcr.status == 1:
can_vote = True
votecode_type = 'request'
else:
request_failed = True
except VoteCodeRequest.DoesNotExist:
pass
# Ticket votecode association form
if request.method == 'POST' and 'submit-ticketvcassoc' in request.POST:
ticket_votecode_form = TicketVoteCodeAssocForm(request.POST, event=event, user=request.user)
if ticket_votecode_form.is_valid():
ticket_votecode_form.save()
return HttpResponseRedirect(reverse('km:votecode', args=(event_id,)))
else:
ticket_votecode_form = TicketVoteCodeAssocForm(event=event, user=request.user)
# Votecode Request form
if request.method == 'POST' and 'submit-vcreq' in request.POST:
votecoderequestform = VoteCodeRequestForm(request.POST)
if votecoderequestform.is_valid():
vcr = votecoderequestform.save(commit=False)
vcr.user = request.user
vcr.event = event
vcr.save()
return HttpResponseRedirect(reverse('km:votecode', args=(event_id,)))
else:
votecoderequestform = VoteCodeRequestForm()
# Render
return render(request, 'kompomaatti/votecode.html', {
'sel_event_id': int(event_id),
'votecoderequestform': votecoderequestform,
'ticket_votecode_form': ticket_votecode_form,
'reserved_code': reserved_code,
'can_vote': can_vote,
'votecode_type': votecode_type,
'request_made': request_made,
'request_failed': request_failed
})
```
#### File: Instanssi/store/handlers.py
```python
import uuid
import logging
from django.db import transaction
from django.utils import timezone
from Instanssi.store.methods import paytrail, bitpay, no_method, PaymentMethod
from Instanssi.store.models import StoreTransaction, StoreItem, TransactionItem, StoreItemVariant
logger = logging.getLogger(__name__)
class TransactionException(Exception):
pass
def validate_item(item: dict):
# First, make sure the item exists at all
try:
store_item = StoreItem.items_available().get(id=item['item_id'])
except StoreItem.DoesNotExist:
raise TransactionException("Tuotetta ei ole saatavilla")
# Make sure the variant exists and belongs to the requested item
if item['variant_id']:
try:
store_item.variants.get(id=item['variant_id'])
except StoreItemVariant.DoesNotExist:
raise TransactionException("Tuotetyyppiä ei ole saatavilla")
# Make sure there are enough items in the stock to satisfy this request
if store_item.num_available() < item['amount']:
raise TransactionException("Tuotetta {} ei ole saatavilla riittävästi!".format(store_item.name))
def get_item_and_variant(item: dict) -> (StoreItem, StoreItemVariant):
"""
Return store item and variant (if any).
"""
store_item = StoreItem.items_available().get(pk=item['item_id'])
store_variant = store_item.variants.get(pk=item['variant_id']) if item['variant_id'] else None
return store_item, store_variant
def validate_payment_method(items: list, method: PaymentMethod):
"""
Make sure payment method is okay for the selected order. NO_METHOD is only acceptable when total sum of the order
is 0 eur! Other methods are always acceptable.
"""
if method == PaymentMethod.NO_METHOD:
for item in items:
store_item, store_variant = get_item_and_variant(item)
purchase_price = store_item.get_discounted_unit_price(item['amount'])
if purchase_price > 0:
raise TransactionException("Valittu maksutapa ei ole sallittu tälle tilaukselle!")
def create_store_transaction(data: dict) -> StoreTransaction:
# Handle creation of the order in a transaction to avoid creating crap to db in errors
try:
with transaction.atomic():
ta = StoreTransaction()
ta.firstname = data['first_name']
ta.lastname = data['last_name']
ta.company = data['company']
ta.email = data['email']
ta.telephone = data['telephone']
ta.mobile = data['mobile']
ta.street = data['street']
ta.postalcode = data['postal_code']
ta.city = data['city']
ta.country = data['country']
ta.information = data['information']
ta.time_created = timezone.now()
ta.key = <KEY>().hex
ta.save()
# Check items
for item in data['items']:
store_item, store_variant = get_item_and_variant(item)
# Find the price with discounts (if any)
purchase_price = store_item.get_discounted_unit_price(item['amount'])
# Form the transaction item(s)
for m in range(item['amount']):
ta_item = TransactionItem()
ta_item.transaction = ta
ta_item.item = store_item
ta_item.variant = store_variant
ta_item.key = <KEY>().hex
ta_item.purchase_price = purchase_price
ta_item.original_price = store_item.price
ta_item.save()
return ta
except Exception as e:
logger.error("Unable to save store transaction: %s", str(e))
raise
def begin_payment_process(method: PaymentMethod, ta: StoreTransaction):
return {
PaymentMethod.NO_METHOD: no_method.start_process,
PaymentMethod.BITPAY: bitpay.start_process,
PaymentMethod.PAYTRAIL: paytrail.start_process
}[method](ta)
```
#### File: store/methods/no_method.py
```python
from django.urls import reverse
from django.shortcuts import render
from Instanssi.store.utils import ta_common
def start_process(ta):
"""
No payment method was required, so just mark everything done right away.
"""
# Since no payment is required, just mark everything done right away
ta.payment_method_name = 'No payment'
ta.save()
ta.refresh_from_db()
ta_common.handle_payment(ta)
# All done, redirect user
return reverse('store:pm:no-method-success')
def handle_success(request):
return render(request, 'store/success.html')
```
#### File: store/utils/paytrail.py
```python
import hashlib
import requests
class PaytrailException(Exception):
pass
def validate_failure(order_no, timestamp, authcode, secret):
m = hashlib.md5()
m.update('{}|{}|{}'.format(order_no, timestamp, secret).encode('UTF-8'))
return authcode == m.hexdigest().upper()
def validate_success(order_no, timestamp, paid, method, authcode, secret):
m = hashlib.md5()
m.update('{}|{}|{}|{}|{}'.format(order_no, timestamp, paid, method, secret).encode('UTF-8'))
return authcode == m.hexdigest().upper()
def request(rid, secret, data):
req = requests.post(
'https://payment.paytrail.com/api-payment/create',
auth=(rid, secret),
json=data,
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
'X-Verkkomaksut-Api-Version': '1',
})
# Send request, receive response
message = req.json()
# Paytrail responded with error
if req.status_code == 401:
raise PaytrailException(message['errorMessage'], message['errorCode'])
# No response from paytrail (other error)
if req.status_code != 201:
raise PaytrailException('HTTP request failure.', req.status_code)
# Return parsed JSON
return message
```
#### File: users/misc/auth_decorator.py
```python
from django.http import HttpResponseRedirect
from django.urls import reverse
def user_access_required(view_func):
def _checklogin(request, *args, **kwargs):
if request.user.is_authenticated and request.user.is_active:
return view_func(request, *args, **kwargs)
return HttpResponseRedirect(reverse('users:login'))
return _checklogin
``` |
{
"source": "jpotter/angel",
"score": 2
} |
#### File: services/devops/devops_service.py
```python
import os
import re
import sys
import time
from devops.generic_service import GenericService
from angel.util.pidfile import is_pid_in_pidfile_running
from angel.util.checksum import get_md5_of_path_contents
from devops.simple_cache import simple_cache_get, simple_cache_set
from angel.stats.disk_stats import disk_stats_get_usage, disk_stats_get_usage_for_path
import angel.settings
class DevopsService(GenericService):
'''
Manages OS and deploy-level logic; provides some tools for devops use.
'''
HIDDEN_TOOLS = ('update-system-config',)
# This is a devops service that configs OS-level system stuff one-time on install and reports OS-level stats into our status update.
# We do this so that OS-level stats can flow back to our service monitoring system in-line with other metrics.
# It also allows us to report errors on things like free disk space to prevent upgrades from causing system failure.
def trigger_start(self):
# This is a gross place to squeeze this in, but it'll work for now:
if os.path.exists('/sbin/blockdev') and os.path.exists('/dev/md0'):
(out, err, exitcode) = self.getCommandOutput('/sbin/blockdev --report /dev/md0')
if exitcode == 0 and out and out.find('rw 4096') > 0:
print >>sys.stderr, "Setting readahead of /dev/md0 to 128 sectors"
self.runCommand('/sbin/blockdev --setra 128 /dev/md0')
# Override parent class, to avoid spurious "no lockfile" warning:
return 0
def service_start(self):
return 0 # Someday: self.start_supervisor_with_function('devops-manager', self.runDevopsManager)
def stop(self):
return 0 # Override default logic that tries to stop based on _supervisor_pidfile
def decommission_precheck(self):
return True
def decommission(self):
print >>sys.stderr, "Warning: devops.decommission() isn't waiting for logfiles to drain yet."
allowed_drain_time = 600
while self._getPostfixQueueSize() > 0:
time.sleep(10)
allowed_drain_time -= 10
if allowed_drain_time < 0:
print >>sys.stderr, "Postfix queue has messages"
return 1
return self.trigger_stop()
# We override trigger_status instead of service_status to completely bypass the supervisor status checks:
def trigger_status(self):
return self._getStatus()
def shell_tool_linux_nettop(self):
'''Run iftraf
'''
iftraf = self.which('iftraf')
if iftraf is None:
print >>sys.stderr, "Error: can't find iftraf (are you on linux?)"
return 1
print >>sys.stderr, "You will need to add filters:\n" +\
" 0.0.0.0/0:0 --> 172.16.58.3/16:0 E\n" +\
" 10.0.0.0/8:0 --> 10.0.0.0/8:0 E\n" +\
" 0.0.0.0/0:0 --> 0.0.0.0/0:0 I\n"
time.sleep(2)
return self.execCommand(iftraf)
def shell_tool_update_system_config(self):
''' Called by devops system logic after a first-time install or a version switch. '''
# There are a number of services which we don't have service objects for, because they're not part of the devops package (i.e. ubuntu-specific).
# That is, basic Linux tweaks that we do across the board and that don't have any variable settings.
# This logic could be handled by chef or puppet, but we already have a good distribution mechanism, so easy enough to piggy-back on top of it.
# We used to store this logic in debian/postinst, but this doesn't work when we can switch versions with our multi-version setup. That is, we might upgrade
# system level (Specifically, auto-download without auto-upgrade means that any conf changes to system-level conf files won't be picked up.) So, instead, we
# tuck the setup logic here, as part of the devops switch_version() lagic, call this.
# Only run if we're root:
if os.getuid() != 0:
print >>sys.stderr, "Error: update-system-config needs to be run as root."
return 1
# Only run if we're on linux (cheap and fast way to tell; admittedly not portable):
if not sys.platform.startswith('linux'):
print >>sys.stderr, "Error: update-system-config only runs on Linux."
return 1
# Only run if we're an actual install (i.e. not a git-checkout run):
if not self._angel.is_versioned_install():
print >>sys.stderr, "Error: not a versioned install."
return 1
return self._configAll() # returns 0 on no errors; non-zero otherwise
#-------- Config logic ---------------------------------------------------------------
def _runCommand(self, cmd, silent=False):
if not silent: print >>sys.stderr, "System service running: %s" % cmd
return self.runCommand(cmd)
def _configAll(self):
# Config Linux OS-level components; returning the number of errors.
# VERY important notes:
# 1) All config operations must be idempotent and be safe to run whether we're running or stopped.
# 2) All changes are outside package management, so uninstalling the package will NOT undo any
# modifications this makes.
errors = 0
errors += self._configBase()
errors += self._configPostfix()
errors += self._configNagios()
errors += self._configCollectd()
errors += self._configMonit()
return errors
def _configBase(self):
# Install Linux-based /etc files that tweak the OS as appropriate for this version.
# We copy conf files into various subdirs, instead of just running installConf() on the whole thing,
# so that we can check individual subsystems and restart just those we need to. It also helps prevent us from
# doing really stupid things (like clobbering random files under /etc).
# Kernel parameters:
(dummy, change_count) = self.createConfDirFromTemplates(src_path='sysctl.d', dest_path='/etc/sysctl.d/')
if change_count > 0:
# Make sure any kernel parameter changes are applied
if 0 != self._runCommand('service procps start'):
return 1
# User limits (note these won't apply to any existing processes / shells):
self.createConfDirFromTemplates(src_path='security/limits.d', dest_path='/etc/security/limits.d/')
# Init.d script:
self.createConfDirFromTemplates(src_path='init.d', dest_path='/etc/init.d/')
if not os.path.exists('/etc/rc2.d/S20tappy'):
if 0 != self._runCommand('update-rc.d -f tappy remove && update-rc.d tappy start 20 2 3 4 5 . stop 20 0 1 6 .'):
return 1
# Bash autocomplete:
self.createConfDirFromTemplates(src_path='bash_completion.d', dest_path='/etc/bash_completion.d/')
# Cron:
(dummy, change_count) = self.createConfDirFromTemplates(src_path='cron.d', dest_path='/etc/cron.d/')
if change_count > 0:
if 0 != self._runCommand('service cron restart'):
return 1
return 0
def _configPostfix(self):
(dummy, change_count) = self.createConfDirFromTemplates(src_path='mail-aliases/aliases', dest_path='/etc/aliases')
if change_count > 0:
if 0 != self._runCommand('newaliases'):
return 1
# Check if the hostname has changed -- this can happen if auto-conf sets it after we run through this.
# Failure to re-check this will cause postfix to queue up mail and not deliver it.
hostname_has_changed = False
out, err, exitcode = self.getCommandOutput("/bin/hostname -f")
if 0 == exitcode and os.path.isfile('/etc/mailname'):
try:
current_mailname = open('/etc/mailname').read().rstrip()
if out.rstrip() != current_mailname:
hostname_has_changed = True
except:
print >>sys.stderr, "Error: unable to check current postfix mailname (%s)." % e
(dummy, change_count) = self.createConfDirFromTemplates(src_path='postfix', dest_path='/etc/postfix/')
if change_count > 0 or hostname_has_changed:
self._runCommand('hostname -f > /etc/mailname')
self._runCommand('cd /etc/postfix && postmap /etc/postfix/relay_domains')
self._runCommand('cd /etc/postfix && postmap /etc/postfix/transport')
self._runCommand('cd /etc/postfix && postmap /etc/postfix/relay_passwords')
self._runCommand('service postfix start && service postfix reload') # Start in case it's stopped; reload in case it's running
return 0
def _configNagios(self):
(dummy, change_count) = self.createConfDirFromTemplates(src_path='nagios', dest_path='/etc/nagios/')
if change_count > 0:
self._runCommand("perl -pi -e 's:^allowed_hosts:#allowed_hosts:gs' /etc/nagios/nrpe.cfg")
self._runCommand('service nagios-nrpe-server restart')
return 0
def _configCollectd(self):
(dummy, change_count) = self.createConfDirFromTemplates(src_path='collectd', dest_path='/etc/collectd/')
# Always restart collectd -- it dereferences the /usr/bin/chill symlink path, so upgrades require it to be kicked.
# Plus, doing this means we won't hold an old version of the codebase open, avoiding the need for an extra prior version.
self._runCommand('service collectd restart > /dev/null', silent=True)
return 0
def _configMonit(self):
(dummy, change_count) = self.createConfDirFromTemplates(src_path='monit.d', dest_path='/etc/monit.d/')
if change_count > 0:
self._runCommand("perl -pi -e 's:\#\s+include /etc/monit\.d:include /etc/monit\.d:gs' /etc/monit/monitrc")
self._runCommand("perl -pi -e 's:startup=0:startup=1:' /etc/default/monit")
self._runCommand('sysv-rc-conf --level 2345 monit on') # Make sure monit starts on reboot:
if os.path.exists('/var/run/monit.pid'):
self._runCommand('service monit restart')
else:
self._runCommand('service monit start')
return 0
#-------- Devops Manager logic -------------------------------------------------------
def runDevopsManager(self):
''' Process that manages devops-level checks while system is running. '''
print >>sys.stderr, "runDevopsManager: test"
while True:
time.sleep(2)
#-------- Status logic ---------------------------------------------------------------
def _getStatus(self):
# Note that this function is called even when services are stopped.
if not os.path.isfile('/proc/loadavg'):
return self.getStatStruct(message='Not supported on this platform', state=angel.constants.STATE_RUNNING_OK)
# Return a status struct with all our system-level checks.
stat_struct = self.getStatStruct()
self._checkVersion(stat_struct)
self._checkLoad(stat_struct)
self._checkDisk(stat_struct)
self._checkNetwork(stat_struct)
self._checkPostfix(stat_struct)
self._checkCron(stat_struct)
return stat_struct
def _checkVersion(self, stat_struct):
# To-Do: pull latest version from shared storage (e.g. zookeeper) and compare if we're behind newest-known?
try:
self.addDataPointToStatStruct(stat_struct, 'build', self._angel.get_project_code_version())
self.updateStatStruct(stat_struct, message='build %s' % self._angel.get_project_code_version(), state=angel.constants.STATE_RUNNING_OK)
except Exception:
self.updateStatStruct(stat_struct, message='unknown build number', state=angel.constants.STATE_WARN)
def _checkLoad(self, stat_struct):
active_services = filter(lambda x: x, self._angel.get_enabled_or_running_service_objects())
# Find the largest MONITORING_LOAD1_THRESHOLD in all active services that are not None on this node
LOAD_1_WARN_THRESHOLD = max( map(lambda x: x.MONITORING_LOAD1_THRESHOLD, active_services))
LOAD_5_WARN_THRESHOLD = max( map(lambda x: x.MONITORING_LOAD5_THRESHOLD, active_services))
LOAD_15_WARN_THRESHOLD = max( map(lambda x: x.MONITORING_LOAD15_THRESHOLD, active_services))
SHORTTERM_SPIKE_GRACE = max( map(lambda x: x.MONITORING_SHORTTERM_SPIKE_GRACE, active_services))
SHORTTERM_SPIKE_TIME = max( map(lambda x: x.MONITORING_SHORTTERM_SPIKE_TIME, active_services))
# In general, we only want to flag error alerts when the system is hard-down, but a super high load suggests something is seriously borked, so we'll error on that as well:
LOAD_1_ERROR_THRESHOLD = 40
LOAD_5_ERROR_THRESHOLD = 40
LOAD_15_ERROR_THRESHOLD = 40
try:
(load_1, load_5, load_15) = os.getloadavg()
except:
self.updateStatStruct(stat_struct, message="os.getloadavg failed", state=angel.constants.STATE_WARN)
return
# Determine what state we should be in -- we enter "warn" state when the load is higher than threshold+shorterm_spike_grace,
# or if the load is higher than threshold for more than spike_time. This allows us to ignore things like a load5 value > 0.9
# for only a few minutes, but still catch conditions where the load on a box goes high for longer than expected.
# (We could probably skip this if "load60" existed...)
state = angel.constants.STATE_RUNNING_OK
latest_warn_start_time_cache_keyname = 'devops-checkload-latest-warn-start-time'
latest_warn_start_time = simple_cache_get(latest_warn_start_time_cache_keyname)
if load_1 > LOAD_1_WARN_THRESHOLD or load_5 > LOAD_5_WARN_THRESHOLD or load_15 > LOAD_15_WARN_THRESHOLD:
# Then load is in warning state...
shortterm_spike_grace_allowed = SHORTTERM_SPIKE_GRACE
if not latest_warn_start_time:
# ...and we just entered warning state.
simple_cache_set(latest_warn_start_time_cache_keyname, int(time.time()))
else:
if time.time() - latest_warn_start_time > SHORTTERM_SPIKE_TIME:
# We've been in warning state for longer than our grace period:
shortterm_spike_grace_allowed = 0
# Re-check if the warning state, with the spike grace, is enough to trigger an alert:
if load_1 > (LOAD_1_WARN_THRESHOLD+shortterm_spike_grace_allowed) or load_5 > (LOAD_5_WARN_THRESHOLD+shortterm_spike_grace_allowed) or load_15 > (LOAD_15_WARN_THRESHOLD+shortterm_spike_grace_allowed):
state = angel.constants.STATE_WARN
else:
if latest_warn_start_time:
# Then we just transitioned from warn state to ok state, clear the latest warning start time marker:
simple_cache_set(latest_warn_start_time_cache_keyname, None)
if load_1 > LOAD_1_ERROR_THRESHOLD or load_5 > LOAD_5_ERROR_THRESHOLD or load_15 > LOAD_15_ERROR_THRESHOLD:
state = angel.constants.STATE_ERROR
message = ''
if load_1 > LOAD_1_WARN_THRESHOLD: message += "Load 1: %s " % (load_1)
if load_5 > LOAD_5_WARN_THRESHOLD: message += "Load 5: %s " % (load_5)
if load_15 > LOAD_15_WARN_THRESHOLD: message += "Load 15: %s " % (load_15)
self.addDataPointToStatStruct(stat_struct, 'load1', load_1)
self.updateStatStruct(stat_struct, message=message, state=state)
def _checkPathIsOkay(self, path):
# This checks if the kernel has forced the disk into read-only mode due to ext3 errors;
# it happens that the exception for a read-only filesystem takes precedence over "Permission denied" and "Operation not permitted" errors.
try:
path = os.path.expanduser(path)
os.utime(path, (os.stat(path).st_atime, os.stat(path).st_mtime))
except Exception as e:
if 'Permission denied' in e or 'Operation not permitted' in e:
return True
else:
print >>sys.stderr, "Error when checking path %s: %s" % (path, e)
return False
return True
def _checkDisk(self, stat_struct):
# Warn thresholds for disk usage:
DISK_PERCENTAGE_WARN_THRESHOLD = 80
DISK_FREE_SPACE_LEFT_WARN_THRESHOLD = 1000 # MB
# Error thresholds for disk usage -- keep in mind that errors should only be reported when the system is actively failing. We'll give a little wiggle room here though...
DISK_PERCENTAGE_ERROR_THRESHOLD = 98
DISK_FREE_SPACE_LEFT_ERROR_THRESHOLD = 250 # MB
# Verify that filesystems backing our project and DATA_DIR are okay:
if not self._checkPathIsOkay(self._angel.get_project_base_dir()) or \
(os.path.exists(self._config['DATA_DIR']) and not self._checkPathIsOkay(self._config['DATA_DIR'])):
self.updateStatStruct(stat_struct, message='Filesystem issue; check dmesg',
state=angel.constants.STATE_ERROR)
# If running software raid, check disks & raid health:
if os.path.exists('/proc/mdstat'):
mdstat_fh = open('/proc/mdstat')
mdstat_fh.readline()
mdstat_data = mdstat_fh.read()
mdstat_fh.close()
if mdstat_data != 'unused devices: <none>\n':
self.mergeStatStructs(stat_struct, self.checkStatusViaNagiosPlugin('check_linux_raid', []))
usage_info = disk_stats_get_usage()
for partition in usage_info:
# Check absolute space:
state = angel.constants.STATE_RUNNING_OK
message = ''
if usage_info[partition]['free_mb'] < DISK_FREE_SPACE_LEFT_WARN_THRESHOLD:
self.updateStatStruct(stat_struct, message="Disk %s: %sMB left" % (partition, usage_info[partition]['free_mb']))
self.updateStatStruct(stat_struct, state=angel.constants.STATE_WARN)
if usage_info[partition]['free_mb'] < DISK_FREE_SPACE_LEFT_ERROR_THRESHOLD:
self.updateStatStruct(stat_struct, state=angel.constants.STATE_ERROR)
if usage_info[partition]['free_inodes'] < 90000:
self.updateStatStruct(stat_struct, message="Disk %s: %s inodes left" % (partition, usage_info[partition]['free_inodes']))
self.updateStatStruct(stat_struct, state=angel.constants.STATE_WARN)
if usage_info[partition]['free_inodes'] < 10000:
self.updateStatStruct(stat_struct, state=angel.constants.STATE_ERROR)
self.updateStatStruct(stat_struct, message=message, state=state)
# Check percentage space:
state = angel.constants.STATE_RUNNING_OK
message = ''
if usage_info[partition]['used_percent'] > DISK_PERCENTAGE_WARN_THRESHOLD:
message = "Disk %s: %s%% full" % (partition, usage_info[partition]['used_percent'])
state = angel.constants.STATE_WARN
if usage_info[partition]['used_percent'] > DISK_PERCENTAGE_ERROR_THRESHOLD:
message = "Disk %s: %s%% full" % (partition, usage_info[partition]['used_percent'])
state = angel.constants.STATE_ERROR
self.updateStatStruct(stat_struct, message=message, state=state)
# Make sure that data_dir is big enough on ec2 nodes -- not ideal to check DATA_DIR this way, but better than nothing for now:
if self._config['DATA_DIR'].startswith('/mnt'):
active_services = self._angel.get_enabled_or_running_service_objects()
try:
min_ok_data_dir_size = max( map(lambda x: x.MIN_OK_DATA_DIR_DISK_SIZE_IN_MB, active_services)) # Find the largest value in all active services on this node
max_ok_data_dir_usage = min( map(lambda x: x.MAX_OK_DATA_DIR_DISK_USAGE, active_services)) # 0 to 1 (0 to 100%)
disk_stats = disk_stats_get_usage_for_path(self._config['DATA_DIR'])
if disk_stats['size_mb'] < min_ok_data_dir_size:
self.updateStatStruct(stat_struct, message="DATA_DIR too small", state=angel.constants.STATE_WARN)
if disk_stats['used_percent'] > (100*max_ok_data_dir_usage):
self.updateStatStruct(stat_struct, message="DATA_DIR at %s%%" % disk_stats['used_percent'], state=angel.constants.STATE_WARN)
except AttributeError:
self.updateStatStruct(stat_struct, message="Can't figure out required DATA_DIR size (invalid service in enabled/running list)", state=angel.constants.STATE_WARN)
def _checkNetwork(self, stat_struct):
# To-Do
# /proc/net/dev
# Ignore first 3 lines; 4th line should be eth0
# (interface, bytes_rec, dummy, dummy, dummy, dummy, dummy, dummy, dummy, bytes_tx, dummy) = split on spaces
good_hosts = ()
bad_hosts = ()
hosts = self._angel.get_all_known_hosts()
for host in hosts:
# Ping with packet size 2000 to make sure jumbo packets are working
(stdout, stderr, exitcode) = self.getCommandOutput(self.which("ping"),args=("-s", 2000, "-c", 1, "-t", 1, host))
if 0 != exitcode:
bad_hosts += (host,)
else:
good_hosts += (host,)
if len(bad_hosts):
self.updateStatStruct(stat_struct, message="%s of %s peers down: %s" %
(len(bad_hosts), len(hosts), ', '.join(bad_hosts)))
else:
self.updateStatStruct(stat_struct, message="%s peers ok" %
len(good_hosts))
return
def _checkPostfix(self, stat_struct):
# We expect postfix to be listening on localhost:25. This is unrelated to EMAIL_HOST:EMAIL_PORT.
smtp_status = self.checkStatusViaNagiosPlugin('check_smtp', ['-H', '127.0.0.1', '-t', '2', '-p', 25])
self.deleteStatFromStatStruct(smtp_status, 'time')
if not self.isStatStructStateOk(smtp_status):
self.updateStatStruct(smtp_status, message="Postfix: %s" % smtp_status['message'], replace_instead_of_append=True)
self.mergeStatStructs(stat_struct, smtp_status)
self._checkPostfixQueueSize(stat_struct)
def _checkPostfixQueueSize(self, stat_struct):
queue_size = simple_cache_get('devops-service-postfix-queue-size', get_function=self._getPostfixQueueSize, get_function_ttl_in_seconds=60)
if queue_size is None:
return self.updateStatStruct(stat_struct, message="Postfix: error checking queue size", state=angel.constants.STATE_UNKNOWN)
self.addDataPointToStatStruct(stat_struct, 'postfix_queue', queue_size)
if queue_size > 0:
self.updateStatStruct(stat_struct, message="Postfix: %s queued messages" % queue_size)
if queue_size > 5:
self.updateStatStruct(stat_struct, state=angel.constants.STATE_WARN)
if queue_size > 1000:
self.updateStatStruct(stat_struct, state=angel.constants.STATE_ERROR)
def _getPostfixQueueSize(self):
queue_size, err, exitcode = self.getCommandOutput("/usr/sbin/postqueue -p | grep '^[A-Z0-9][A-Z0-9][A-Z0-9][A-Z0-9][A-Z0-9]' | wc -l")
# to-do: this will return queue size of 0 and exit code of 0 when postqueue fails -- ie postqueue: fatal: open /etc/postfix/main.cf: No such file or directory
# (grep note: when queue is empty, postqueue 'Mail queue is empty', so grep A-Z0-9 chars 5 times to avoid matching 'Mail '.)
if exitcode != 0:
print >>sys.stderr, "Error: _getPostfixQueueSize failed: %s / %s" % (exitcode, err)
return None
try:
queue_size = int(queue_size)
return queue_size
except:
print >>sys.stderr, "Error: _getPostfixQueueSize cast failed: %s" % queue_size
return None
def _checkCron(self, stat_struct):
cron_pid = '/var/run/crond.pid'
if not os.path.isfile(cron_pid):
self.updateStatStruct(stat_struct, message="Cron pidfile missing", state=angel.constants.STATE_WARN)
return
if not is_pid_in_pidfile_running(cron_pid):
self.updateStatStruct(stat_struct, message="Cron stopped", state=angel.constants.STATE_WARN)
def shell_tool_copy_to_s3(self, path, remote_name=None, verbose=False, silent=False,
s3_bucket=None, s3_key=None, s3_secret=None, s3_region=None):
''' Copy files to a tmp public S3 bucket and return a URL for the contents.
- Note: only print a URL to STDOUT, so we can backtick this).
* path: file or directory to copy
* remote_name: name of file to copy to in S3 (defaults to a checksum, IP-named, safe filename)
* s3_bucket: bucket to copy to (defaults to DEVOPS_S3_TMP_BUCKET_NAME)
* s3_key: AWS access key to use (defaults to DEVOPS_S3_TMP_BUCKET_ACCESS_KEY_ID)
* s3_secret: AWS secret key to use (defaults to DEVOPS_S3_TMP_BUCKET_SECRET_ACCESS_KEY)
* s3_region: AWS endpoint (defaults to DEVOPS_S3_TMP_BUCKET_REGION)
* verbose: display transfer status
* silent: print as little as possible
'''
# See https://github.com/rlmcpherson/s3gof3r -- would give us faster uploads for this...
# Set defaults if no overrides are given:
if s3_bucket is None:
s3_bucket = self._config['DEVOPS_S3_TMP_BUCKET_NAME']
if s3_key is None:
s3_key = self._config['DEVOPS_S3_TMP_BUCKET_ACCESS_KEY_ID']
if s3_secret is None:
s3_secret = self._config['DEVOPS_S3_TMP_BUCKET_SECRET_ACCESS_KEY']
if s3_region is None:
s3_region = self._config['DEVOPS_S3_TMP_BUCKET_REGION']
# Set up file and path vars:
file = None
if path.startswith('~'):
path = os.path.expanduser(path)
# Figure out which tmp dir to use:
tmp_dir = self._config['TMP_DIR']
try:
tmp_test = os.path.join(tmp_dir, ".devops-tmp-test-%s" % time.time())
open(tmp_test, "w")
os.remove(tmp_test)
except:
tmp_dir = "/tmp"
# Generate a temporary s3cmd config using our settings:
tmp_s3_cfg = '[default]\naccess_key = %s\nsecret_key = %s\nhost_base = %s\n' % (s3_key, s3_secret, s3_region)
tmp_s3_cfg_file = os.path.join(tmp_dir, '.copy-to-s3-cfg-%s' % os.getpid())
try:
open(tmp_s3_cfg_file, 'w').write(tmp_s3_cfg)
except Exception as e:
print >>sys.stderr, "Error: unable to create temp s3cmd config (%s)." % e
return 1
# Check the file we're sending -- or if it's stdin, generate a file:
delete_tmp_file_after_transfer = False # So we know to remove tmp file in stdin case
tmp_file = None
file_is_stdin = False
if '-' == path:
# s3cmd doesn't have a "read from stdin" feature (well, not released yet), so we'll hack this ourselves:
tmp_file = os.path.join(tmp_dir, 'copy-to-s3-stdin-%s-%s' % (int(time.time()), os.getpid()))
delete_tmp_file_after_transfer = True
file_is_stdin = True
try:
tmp_fh = os.open(tmp_file, os.O_WRONLY|os.O_CREAT, 0600)
size = 0
size_warning_printed = False
size_warning_threshold = 1024*1024*20
for input in sys.stdin:
os.write(tmp_fh, input)
size += len(input)
if size > size_warning_threshold and not size_warning_printed:
if not silent:
print >>sys.stderr, "Warning: passing STDIN to copy-to-s3 generates a tmp file equal to the entire size of STDIN; make sure you don't fill up disk."
size_warning_printed = True
os.close(tmp_fh)
if size_warning_printed or verbose:
if not silent:
print >>sys.stderr, "STDIN tmp file created (%s bytes); uploading it to S3 now." % size
if 0 == size:
print >>sys.stderr, "Error: nothing on stdin."
return 1
except Exception as e:
print >>sys.stderr, "Error: unable to generate tmp file %s: %s" % (tmp_file, e)
try:
os.remove(tmp_file)
except:
pass
return 1
file = tmp_file
if os.path.isdir(path):
if path.endswith('/'):
path = path[:-1]
dir_name = os.path.basename(path)
tmp_file = os.path.join(tmp_dir, "%s.tgz" % dir_name)
if os.path.exists(tmp_file):
tmp_file = os.path.join(tmp_dir, "%s-%s.tgz" % (dir_name, int(time.time())))
tar_exit_code = 0
try:
tar_exec = self.which('tar')
tar_args = ('-czf', tmp_file, path)
tar_exit_code = self.execCommand(tar_exec, args=tar_args, run_as_child_and_block=True)
if 0 != tar_exit_code:
print >>sys.stderr, "Error: tar failed (%s) when running %s %s" % (tar_exit_code, tar_exec, ' '.join(tar_args))
return 1
except (Exception, KeyboardInterrupt) as e:
tar_exit_code = -1
finally:
if 0 != tar_exit_code and os.path.isfile(tmp_file):
print >>sys.stderr, "Error: unable to generate temporary tar file (%s)." % (tmp_file)
os.remove(tmp_file)
delete_tmp_file_after_transfer = True
file = tmp_file
if os.path.isfile(path):
file = path
if file is None:
print >>sys.stderr, "Error: path '%s' isn't a file." % path
return 1
upload_size = os.stat(file).st_size
if upload_size <= 0:
print >>sys.stderr, "Error: file %s is empty." % file
return 1
# Generate a semi-useful, safe name:
if remote_name is None:
hostname_part = '-%s' % self._angel.get_project_code_branch()
# Using the branch name can be misleading if we run a different branch in a cluster during testing,
# but it's probably more useful than not to do
if '127.0.0.1' != self._angel.get_private_ip_addr():
hostname_part += '-%s' % self._angel.get_private_ip_addr()
checksum_part = ''
checksum = get_md5_of_path_contents(file)
if checksum:
checksum_part = '-%s' % checksum[0:8]
origname_part = ''
if not file_is_stdin:
origname_part = '.'.join(os.path.basename(file).lower().split('.')[:-1])[0:32]
origname_part = '-' + re.sub('[^0-9a-z]+', '-', origname_part)
suffix = ''
if not file_is_stdin:
suffix = file.split('.')[-1].lower()
if suffix == file:
suffix = '.data'
else:
suffix = '.' + suffix
else:
suffix = '.data'
remote_name = 'devops%s%s%s%s' % (hostname_part, checksum_part, origname_part, suffix)
with_progress = False
s3_cmd_flags = ('--no-progress',)
if not silent:
if verbose or upload_size > 1024*1024*2:
with_progress = True
s3_cmd_flags = ('--progress',)
try:
# -rr for reduced redundancy (cheaper)
args = ('--config=%s' % tmp_s3_cfg_file, 'put', file, 's3://%s/%s' % (s3_bucket, remote_name), "-rr") + \
s3_cmd_flags
if not silent and verbose:
print >>sys.stderr, "Running: s3cmd %s" % ' '.join(args)
if with_progress:
if 0 != self.execCommand(self.which('s3cmd'), args=args, run_as_child_and_block=True):
print >>sys.stderr, "Error: s3cmd failed."
return 1
else:
(stdout, stderr, exitcode) = self.getCommandOutput(self.which('s3cmd'), args=args)
if 0 != exitcode:
print >>sys.stderr, stderr
return 1
# To-do: maybe list the object and make sure the size matches?
print "https://%s.%s/%s" % (s3_bucket, s3_region, remote_name)
return 0
finally:
os.remove(tmp_s3_cfg_file)
if delete_tmp_file_after_transfer:
os.remove(tmp_file)
```
#### File: lib/devops/generic_service.py
```python
import base64
import datetime
import fnmatch
import inspect
import os
import pwd
import re
import shutil
import signal
import socket
import stat
import string
import sys
import time
import urllib2
import angel
import angel.settings
import angel.util.checksum
from devops.stats import *
from devops.file_and_dir_helpers import create_dirs_if_needed, set_file_owner
from angel.util.pidfile import is_pid_running, is_pid_in_pidfile_running, get_pid_from_pidfile, release_pidfile, read_data_from_lockfile, write_pidfile
from devops.process_helpers import launch, launch_via_function, get_command_output, exec_process, run_function_in_background
from devops.unix_helpers import get_pid_relationships, kill_and_wait, hard_kill_all, get_all_children_of_process
class GenericService(object):
# Number of seconds service is allowed to take to start:
# (This controls how long status shows 'starting' as well as minimum length of time a process must be running before 'repair' will do anything.)
ALLOWED_STARTUP_TIME_SECS = 300
# Number of seconds we wait when stop is called before returning an error:
ALLOWED_STOP_TIME_SECS = 600
# Unix signals and timeouts uses for stopping the service:
STOP_SOFT_KILL_SIGNAL = signal.SIGHUP
STOP_SOFT_KILL_TIMEOUT = 30
STOP_HARD_KILL_SIGNAL = signal.SIGTERM
STOP_HARD_KILL_TIMEOUT = 30
# How much disk space do we expect this service to need? If the disk partition that we use is less than this, then status will issue warnings.
MIN_OK_DATA_DIR_DISK_SIZE_IN_MB = 0 # Override as needed in subclasses
# How much free disk space do we require on the DATA_DIR partition, as a percentage? If we go over this, then status will issue warnings.
MAX_OK_DATA_DIR_DISK_USAGE = 0.8
# Thresholds for load monitoring:
MONITORING_LOAD1_THRESHOLD = 4.0
MONITORING_LOAD5_THRESHOLD = 1.0
MONITORING_LOAD15_THRESHOLD = 0.7
MONITORING_SHORTTERM_SPIKE_GRACE = 1.0 # Add this much to the threshold for SPIKE_TIME seconds
MONITORING_SHORTTERM_SPIKE_TIME = 300 # In seconds
SUPERVISOR_NOT_RUNNING_MESSAGE = "supervisor not running"
# List of tools to exclude -- some services may have tools defined by files under their ./server/bin/ directory that must not be executed
DISABLED_TOOLS = ()
# List of tools to hide from tool command, but still allow for calling -- essentially for "unpublished" tools that shouldn't be visible
HIDDEN_TOOLS = ()
# Config dict that holds all key-value settings:
_angel = None
_config = None # This is legacy and should go away, but for now, gives us backwards compatibility
def __init__(self, angel_obj):
self._config = angel_obj.get_settings()
for f in self._config:
if isinstance(self._config[f], basestring) and self._config[f].startswith('~'):
self._config.set(f, os.path.expanduser(self._config[f]))
self._angel = angel_obj
self._supervisor_pidfile = self._angel.get_supervisor_lockpath(self.__class__.__name__)
self._supervisor_statusfile = self._angel.get_supervisor_lockpath(self.__class__.__name__).replace('.lock','') + '.status'
start_command_hint = " [Try: %s]" % self._angel.get_command_for_running(args=('tool', self.getServiceName(), 'start'))
self.SUPERVISOR_NOT_RUNNING_MESSAGE = "supervisor not running%s" % start_command_hint
def service_start(self):
''' Start the service. Make SURE to create lockfile at self._supervisor_pidfile -- that's what defines the service as currently running.'''
raise Exception('service_start() must be implemented in service %s' % self.getServiceName())
def service_status(self):
''' status is expected to return a status struct -- see self.getStatStruct(). '''
return self.checkStatusViaPid()
def service_stop(self, server_pid):
# Stop service using the server pid that our supervisor daemon is waiting on.
# server_pid is the process id of the server process itself (e.g. postgresql postmaster; apache2; varnish); NOT the supervisor process.
# Note that this is called from within the same running instance of the code that started the service;
# that is, upgrades to this logic won't be applied to already-running instances.
return self.killUsingPid(server_pid)
def service_reload(self, is_code_changed, is_conf_changed, flush_caches_requested):
''' This is normally called when the code has been upgraded or the conf has changed.
In testing conditions, it may also be called if the data has been changed -- i.e. data in the DB has been reset and any caches should be cleared.
- Reloading a service MUST not cause any downtime (i.e. this is called on prod with live traffice flowing).
- Reload is called with is_conf_changed=True whenever the config has changed and a service may need to update itself (i.e. new IP addresses in varnish)
- When code has changed, it's very likely that our base dir will be under a new path on this run (i.e. package-based installs during upgrades)
'''
return 0 # by default, don't do anything.
def rotateLogs(self):
if not self.isServiceRunning():
print >>sys.stderr, "Log rotation: service %s not running; need to move files aside, otherwise nothing to do." % self.getServiceName()
return 0
supervisor_pid = get_pid_from_pidfile(self._supervisor_pidfile)
if supervisor_pid is None:
print >>sys.stderr, "Warning: skipping logfile rotation (can't find supervisor pid for service %s)." % self.getServiceName()
return -2
try:
os.kill(supervisor_pid, signal.SIGWINCH) # Send supervisor SIGWINCH to tell it to roll over STDOUT / STDERR files.
except Exception as e:
print >>sys.stderr, "Warning: skipping logfile rotation (failed to send SIGWINCH to supervisor pid %s: %s)." % (supervisor_pid, e)
return -3
# To-do: check that all files in dir have been rolled over?
return 0
def switchToRegularMode(self):
''' Called when system is coming out off offline / maintenance mode. Most services should ignore this. '''
return 0
def switchToMaintenanceMode(self):
''' Called when system is going into offline / maintenance mode. Most services should ignore this. '''
return 0
def service_repair(self):
''' Called when the service status shows errors and 'service repair' is triggered.
This can be overriden in your subclass to do something smarter; restarting() is a reasonable default for now.
'''
return self.trigger_restart()
def decommission_precheck(self):
''' Check if the service can be decommissioned. Return True if and only if this service can support a call to decommission. '''
return False
def decommission(self):
''' Tell the service that the node is being decommissioned, and that the service should transfer data away and transition traffic elsewhere.
MUST NOT RETURN until any necessary data has been completed.
For example, if the service needs to finish push events to another service,
the queue must be satisfactorially drained before this returns.
- The node will be marked as decommissioned regardless of the return of this function; this function should
only be called once, ever.
- The decommission_precheck() call can be used to prevent a call to decommission().
- A return of 0 means that all data for the service has been processed and that
there is no data for the service on this node that we care about any longer.
- A non-zero return will fail the decommission call.
'''
# By default, we can't be decommissioned -- this way if a service fails to implement this, we don't hose ourselves:
return 1
def trigger_start(self):
''' Called during service start -- don't override this; override service_start() instead. '''
if self._angel.is_decommissioned():
print >>sys.stderr, "Error: can't start service on a decomissioned node."
return -1
if self._config['RUN_AS_USER']:
if 0 != os.getuid() and os.getuid() != pwd.getpwnam(self._config['RUN_AS_USER']).pw_uid:
print >>sys.stderr, "Error: can't start service as current user. Try sudo?"
return -2
self.setSupervisorStatusMessage(None) # Make sure to clear out any stale message (i.e. why a last start failed)
ret_val = self.service_start()
if not os.path.isfile(self._supervisor_pidfile) and ret_val == 0:
print >>sys.stderr, "Error: start failed to create lockfile '%s'." % self._supervisor_pidfile
return -3
return ret_val
def trigger_restart(self):
''' Restart the service. '''
self.trigger_stop() # Trigger stop will invoke service_stop via unix signal, so we'll stop using the current running version; and then start with this version.
return self.service_start()
def trigger_status(self):
''' Manage calls to service_status().
Updates the state from Error to Starting if the service is expected to still be coming up.
'''
daemon_info = self.getDaemonLockfileInfo()
if daemon_info is None or 'pid' not in daemon_info:
reason = self.getSupervisorStatusMessage()
if reason is None:
reason = self.SUPERVISOR_NOT_RUNNING_MESSAGE
return self.getStatStruct(message=reason, state=angel.constants.STATE_STOPPED)
struct = self.service_status()
if self.isStatStructStateOk(struct):
return struct
# If state isn't OK, then we'll check to see if the supervisor has any reasons to append to the status check:
if daemon_info is not None:
reason = self.getSupervisorStatusMessage()
if reason is not None:
self.updateStatStruct(struct, replace_instead_of_append=True, message=reason)
# If we're just starting, always force the state to be STARTING (unless we're already at an OK/WARN level):
if 'uptime' in daemon_info:
if daemon_info['uptime'] < self.ALLOWED_STARTUP_TIME_SECS:
# Then daemon manager has only been up for a short while -- assume starting up:
self.updateStatStruct(struct, replace_instead_of_append=True, state=angel.constants.STATE_STARTING)
# We'll also check if iptables is blocking us, via the linux-netdown tool:
if os.path.isfile(self._get_linux_netup_filename()):
self.updateStatStruct(struct, message='[see linux-netdown tool!]')
return struct
def trigger_stop(self):
''' Called by service stop -- don't override this; override service_stop() instead. '''
if self._angel.is_decommissioned():
# Don't override this -- if a node is in-process of decomissioning, stop services underneath the decomissioning logic could be Very Bad.
print >>sys.stderr, "Error: can't stop services on a decomissioned node."
return -1
# Stop our supervisor process, which will in turn run any stop logic as defined in service_stop():
daemon_pid = get_pid_from_pidfile(self._supervisor_pidfile)
if daemon_pid is None:
print >>sys.stderr, "Warning: service %s isn't running; skipping stop request (no lockfile?)." % (self.getServiceName())
return 0
ret_val = 0
if not is_pid_running(daemon_pid):
print >>sys.stderr, "Error: went to stop supervisor daemon %s, but pid isn't running." % daemon_pid
try:
os.remove(self._supervisor_pidfile)
print >>sys.stderr, "Error: went to stop supervisor daemon %s, but pid isn't running; stale lock file removed." % daemon_pid
except Exception as e:
print >>sys.stderr, "Error: went to stop supervisor daemon %s, but pid isn't running; unable to remove lockfile (%s)" % (daemon_pid, e)
return -1
try:
os.kill(daemon_pid, signal.SIGTERM)
except Exception as e:
print >>sys.stderr, "Error: failed to send SIGTERM to supervisor daemon %s: %s" % (daemon_pid, e)
return -2
wait_time = self.ALLOWED_STOP_TIME_SECS
if wait_time < (self.STOP_SOFT_KILL_TIMEOUT + self.STOP_HARD_KILL_TIMEOUT):
wait_time = self.STOP_SOFT_KILL_TIMEOUT + self.STOP_HARD_KILL_TIMEOUT + 10
print >>sys.stderr, "ALLOWED_STOP_TIME_SECS %s too short (soft timeout: %s, hard timeout: %s); setting to %s." % (self.ALLOWED_STOP_TIME_SECS, self.STOP_SOFT_KILL_TIMEOUT, self.STOP_HARD_KILL_TIMEOUT, wait_time)
try:
while is_pid_running(daemon_pid):
time.sleep(0.5)
wait_time -= 0.5
if wait_time < 0:
print >>sys.stderr, "Error: %s failed to stop within %s seconds; process %s still running." % (self.getServiceName(), self.ALLOWED_STOP_TIME_SECS, daemon_pid)
ret_val = -3
break;
except KeyboardInterrupt:
print >>sys.stderr, "\nInterrupted while waiting for process %s to quit. (It should exit eventually but may leave a stale lockfile; should be ok.)" % daemon_pid
return -4
except Exception as e:
print >>sys.stderr, "Error: aborted while waiting for %s to stop; process %s still running: %s" % (self.getServiceName(), daemon_pid, e)
return -5
return ret_val
def trigger_repair(self):
''' Called by service repair -- don't override this; override service_repair() instead. '''
# To-Do: check that we're not in shutdown mode?
# If service isn't running, start it:
if not self.isServiceRunning():
print >>sys.stderr, "Repair: service %s not running; starting it." % self.getServiceName()
return self.trigger_start()
# If service is running, check if last startup time is less than ALLOWED_STARTUP_TIME_SEC seconds ago; if so, don't do anything:
time_since_last_start = self._get_seconds_since_last_child_start()
if time_since_last_start is not None and time_since_last_start < self.ALLOWED_STARTUP_TIME_SECS:
print >>sys.stderr, "Repair: service %s last started %s seconds ago; skipping repair." % (self.getServiceName(), int(time_since_last_start))
return 0
if self.getServiceState() == angel.constants.STATE_ERROR:
# Don't use self.isServiceStateOk -- that'll return false during startup and shutdown states.
print >>sys.stderr, "Repair: service %s has errors; attempting to fix it." % self.getServiceName()
return self.service_repair()
return 0
def trigger_reload(self, is_code_changed, is_conf_changed, flush_caches_requested):
''' Called by service management -- don't override this; override service_reload() instead. '''
if not self._config['DEFAULT_SERVICE_RELOAD_ON_UPGRADE']:
return 0
reload_config_setting_name = self.getServiceName().upper() + '_SERVICE_RELOAD_ON_UPGRADE'
if reload_config_setting_name in self._config:
if not self._config[reload_config_setting_name] or self._config[reload_config_setting_name].lower() == 'false':
# Check for 'false' string -- there may not be a default setting for the variable to cast the type, so we might be getting a string instead
print >>sys.stderr, "Warning: skipping %s reload; %s is false." % (self.getServiceName(), reload_config_setting_name)
return 0
return self.service_reload(is_code_changed, is_conf_changed, flush_caches_requested)
def get_process_uptime(self):
''' Returns how many seconds the process has been running. Note that the service itself may have been up longer;
if the process crashes we'll restart it.
'''
return self._get_seconds_since_last_child_start()
def get_supervisor_pid(self):
''' Returns the process ID of the running supervisor for this service, or None.
'''
daemon_info = self.getDaemonLockfileInfo()
if daemon_info is not None and 'pid' in daemon_info:
if is_pid_running(daemon_info['pid']):
return daemon_info['pid']
return None
def get_server_process_pid(self):
''' Returns the process ID of the process running under supervisor for this service, or None.
'''
daemon_info = self.getDaemonLockfileInfo()
if daemon_info is not None and angel.constants.LOCKFILE_DATA_CHILD_PID in daemon_info:
if is_pid_running(daemon_info[angel.constants.LOCKFILE_DATA_CHILD_PID]):
return daemon_info[angel.constants.LOCKFILE_DATA_CHILD_PID]
return None
def _get_seconds_since_last_child_start(self):
''' Returns the number of seconds since the supervised process was last started, or None.
Note: not the same thing as when supervisor was started. '''
time_since_last_start = None
daemon_info = self.getDaemonLockfileInfo()
if daemon_info is not None and 'pid' in daemon_info:
if is_pid_running(daemon_info['pid']):
if angel.constants.LOCKFILE_DATA_CHILD_START_TIME in daemon_info:
time_since_last_start = int(time.time() - int(daemon_info[angel.constants.LOCKFILE_DATA_CHILD_START_TIME]))
return time_since_last_start
def isServiceRunning(self):
return is_pid_in_pidfile_running(self._supervisor_pidfile)
def isNamedServiceRunningLocally(self, name_of_service):
''' Is a service with the given name running locally, as tracked by a supervisor pid file?
This is gross -- really, we should be using a directory service to keep track of this,
but for now this buys us a quick fix to shutdown dependency ordering on a local dev node. '''
return is_pid_in_pidfile_running(self._angel.get_supervisor_lockpath(name_of_service))
def isUserIdOkForPort(self, port):
# If we're non-root, make sure we're not trying to bind to a port below 1024:
if 0 != os.getuid() and int(port) < 1024:
print >>sys.stderr, "Can't start service on port %s without being root." % port
return False
return True
def getServiceCodeDir(self):
return os.path.dirname(os.path.realpath(sys.modules[self.__module__].__file__))
def get_service_data_dir(self):
return os.path.join(os.path.expanduser(self._config['DATA_DIR']), self.getServiceName())
def get_service_built_dir(self):
return os.path.join(os.path.expanduser(self._angel.get_project_base_dir()),
'built', 'services', os.path.split(self.getServiceCodeDir())[1])
def getServiceName(self):
''' Return a short human-readable name of service. '''
name = self.__class__.__name__.replace('Service','')
return '-'.join(re.findall('[A-Z][^A-Z]*', name)).lower() # ExampleName -> example-name
def getServiceNameConfStyle(self):
''' Return a short human-readable name of service. '''
name = self.__class__.__name__.replace('Service','')
return '_'.join(re.findall('[A-Z][^A-Z]*', name)).upper() # ExampleName -> EXAMPLE_NAME
def setSupervisorStatusMessage(self, message):
''' Set a status message for our supervisor (or None to unset it). This status message gets displayed as part of status;
it can be set by any process (typically a child worker process of the supervisor). '''
try:
if message is None or len(message) == 0:
if os.path.isfile(self._supervisor_statusfile):
os.remove(self._supervisor_statusfile)
else:
open(self._supervisor_statusfile, 'w').write(message)
except:
print >>sys.stderr, "Warning: couldn't update status message: %s = %s" % (self._supervisor_statusfile, message)
def getSupervisorStatusMessage(self):
if not os.path.isfile(self._supervisor_statusfile):
return None
try:
return open(self._supervisor_statusfile, 'r').read()
except:
return None
def getLockViaPidfile(self, pidfile, print_errors=True):
return write_pidfile(pidfile, os.getpid(), print_errors=print_errors)
def releasePidfile(self, pidfile):
return release_pidfile(pidfile)
def _getLockPidfile(self, lockname):
lockname = re.sub(r'([^\w\-\_])+', '?', lockname)
return os.path.join(os.path.expanduser(self._config['LOCK_DIR']), 'servicelock-%s-%s' % (self.getServiceName(), lockname))
def getLock(self, lockname, print_errors=True):
''' Attempt to get a lock using the given name. (Mostly meant for tool commands that want to ensure non-concurrent runs.) '''
return self.getLockViaPidfile(self._getLockPidfile(lockname), print_errors=print_errors)
def releaseLock(self, lockname):
return self.releasePidfile(self._getLockPidfile(lockname))
def isLockAvailable(self, lockname):
if not os.path.isfile(self._getLockPidfile(lockname)):
return True
# It's possible that we have a stale lock file -- so we'll try to actually get the lock, which will release it.
# This only works if the callee has write permission to the lockdir, though.
if 0 != self.getLock(lockname, print_errors=False):
return False
self.releaseLock(lockname)
return True
def getPidOfLockOwner(self, lockname):
''' return the PID of the process owning the named lock, or None if it's not locked. '''
return get_pid_from_pidfile(self._getLockPidfile(lockname))
def getDaemonLockfileInfo(self):
return self.getLockfileInfo(self._supervisor_pidfile)
def getLockfileInfo(self, pidfile):
''' Return all info in the given pidfile. Adds 'uptime' and 'pid' to the returned dict where applicable. 'pid' will only exist if the process is up and running. '''
lockfile_data = read_data_from_lockfile(pidfile)
if lockfile_data is None:
return None
pid = get_pid_from_pidfile(pidfile)
if pid is None or not is_pid_running(pid):
return lockfile_data
lockfile_data['pid'] = pid
if angel.constants.LOCKFILE_DATA_DAEMON_START_TIME in lockfile_data:
try:
lockfile_data['uptime'] = time.time() - int(lockfile_data[angel.constants.LOCKFILE_DATA_DAEMON_START_TIME])
except:
print >>sys.stderr, "Warning: invalid start time '%s' in lockfile %s; ignoring." % \
(lockfile_data[angel.constants.LOCKFILE_DATA_DAEMON_START_TIME], pidfile)
if angel.constants.LOCKFILE_DATA_CHILD_PID in lockfile_data:
try:
child_pid = int(lockfile_data[angel.constants.LOCKFILE_DATA_CHILD_PID])
lockfile_data[angel.constants.LOCKFILE_DATA_CHILD_PID] = child_pid
except:
print >>sys.stderr, "Warning: invalid child_pid '%s' in lockfile %s." % \
(lockfile_data[angel.constants.LOCKFILE_DATA_CHILD_PID], pidfile)
return lockfile_data
def getStatStruct(self, service_name=None, message=None, state=None, data=None):
stat_struct = stats_create_struct(message=message, state=state, data=data)
if service_name is None:
service_name = self.getServiceName()
stat_struct['service_name'] = service_name.replace('Service',"")
return stat_struct
def isStatStructStateOk(self, struct, accept_warnings_as_ok=True):
if 'state' not in struct: return False
if struct['state'] == angel.constants.STATE_RUNNING_OK: return True
if struct['state'] == angel.constants.STATE_WARN and accept_warnings_as_ok: return True
return False
def parseNagiosOutputToStatStruct(self, stat_struct, nagios_string):
''' Nagios plugins generate a message on STDOUT that is <message>|<data>. Split the data portion into the approriate key/value pairs. '''
if nagios_string is None or len(nagios_string) == 0: # This can happen when a nagios plugin times out or fails.
return stat_struct
nagios_message = nagios_string.split("\n")[0].partition('|')[0].strip()
nagios_data = None
nagios_data = nagios_string.split("\n")[0].partition('|')[2]
self.updateStatStruct(stat_struct, message=nagios_message)
stats_import_nagios_data(stat_struct, nagios_data)
return stat_struct # return is for convience of chaining calls together
def deleteStatFromStatStruct(self, stat_struct, data_name):
stats_delete_data_record(stat_struct, data_name)
return stat_struct # return is for convience of chaining calls together
def mergeStatStructs(self, stat_struct_to_merge_into, stat_struct_to_import):
stats_merge_structs(stat_struct_to_merge_into, stat_struct_to_import)
return stat_struct_to_merge_into # return is for convience of chaining calls together
def updateStatStruct(self, struct, message=None, state=None, replace_instead_of_append=False):
stats_update_struct(struct, message=message, state=state, replace_instead_of_append=replace_instead_of_append)
return struct # return is for convience of chaining calls together
def addDataPointToStatStruct(self, struct, name, value, unit=None, stat_group=None):
stats_add_data_record(struct, name, value, unit=unit, stat_group=stat_group)
return struct # return is for convience of chaining calls together
def getServiceState(self):
s = self.service_status()
return s['state']
def isServiceStateOk(self, accept_warnings_as_ok=True):
return self.isStatStructStateOk(self.service_status(), accept_warnings_as_ok=accept_warnings_as_ok)
def checkStatusViaNagiosPlugin(self, name, args):
''' name is a string containing the name of the Nagios Plugin inside the Nagios plugin dir; args is an '[ ]' list of parameters to pass '''
plugin_binary = self.getNagiosPluginPath(name)
if plugin_binary is None:
return self.getStatStruct(message="Missing nagios plugin %s" % name, state=angel.constants.STATE_UNKNOWN)
output, err, exitcode = self.getCommandOutput(plugin_binary, args=args, log_nonzero_exits=False)
return self.parseNagiosOutputToStatStruct(self.getStatStruct(state=exitcode), output)
def checkStatusViaPid(self):
daemon_info = self.getDaemonLockfileInfo()
if daemon_info is None or 'pid' not in daemon_info:
return self.getStatStruct(message=self.SUPERVISOR_NOT_RUNNING_MESSAGE, state=angel.constants.STATE_STOPPED)
return self.getStatStruct(message="Running (pid %s)" % daemon_info['pid'], state=angel.constants.STATE_RUNNING_OK)
def checkStatusViaTCP(self, host, port, warn_time=2, error_time=4, check_timeout=6):
if host is None:
host = '127.0.0.1'
args = ['-H', host, '-p', str(port)]
if warn_time is not None:
args += ['-w', str(warn_time)]
if error_time is not None:
args += ['-c', str(error_time)]
if check_timeout is not None:
args += ['-t', str(check_timeout)]
status = self.checkStatusViaNagiosPlugin('check_tcp', args)
if 'message' in status and status['message'] is not None:
status['message'] = status['message'].replace('Connection refused', 'Connection to %s:%s refused' % (host,port))
return status
def checkStatusViaPidAndTcp(self, ip, port):
daemon_info = None
if ip is None:
# Then we're checking a local instance, also check the pidfile:
daemon_info = self.getDaemonLockfileInfo()
if daemon_info is None or 'pid' not in daemon_info:
return self.getStatStruct(message=self.SUPERVISOR_NOT_RUNNING_MESSAGE, state=angel.constants.STATE_STOPPED)
# By default, an ip of None will check 127.0.0.1, and this usually works, because most services bind to 0.0.0.0.
stat_struct = self.checkStatusViaTCP(ip, port)
if daemon_info is None: return stat_struct
return stat_struct
def getNagiosPluginPath(self, name):
''' Return the full path for the given nagios plugin. If the given name starts with a /, assume we have a full-path already and return that. '''
path = name
if not path.startswith('/'):
possible_common_dirs = ('/usr/lib/nagios/plugins/', '/usr/lib64/nagios/plugins', '/usr/local/sbin', '/usr/sbin')
path = self.which(name, additional_paths=possible_common_dirs)
if path is None:
print >>sys.stderr, "Error: can't find nagios plugin '%s' under default and nagios paths (%s)." % \
(name, ':'.join(possible_common_dirs))
return None
if not os.path.exists(path):
print >>sys.stderr, "Warning: nagios plugin %s missing (can't find %s)." % (name, path)
return None
return path
def waitForServicesToStart(self, dependents, timeout_in_seconds=None):
''' Given a list of service names ('apache2','varnish'), wait for the services to return an OK status. '''
for service_name in dependents:
service_obj = self._angel.get_service_object_by_name(service_name)
if service_obj is None:
print >>sys.stderr, "Error: unknown service '%s' in %s service dependencies." % (service_name, self.getServiceName())
return 1
if 0 != self.waitForOkayStatus(service_obj.service_status, timeout_in_seconds=timeout_in_seconds):
return 1
return 0
def waitForOkayStatus(self, status_ok_func, timeout_in_seconds=None, args=()):
''' status_ok_func needs to be a function that returns a dict, which should contain key 'state' with one of the defined Nagios state values. '''
''' Returns 0 once the service comes up; non-zero otherwise (i.e. timeout). '''
retry_interval_in_seconds = 1
if timeout_in_seconds is None:
timeout_in_seconds = 60*60 # After an hour, something is probably wedged -- exit out
accept_warnings_as_ok = True
update_status_messages = True
wait_time = 0
last_message_printed_time = 0
cur_state = self.getStatStruct(state=angel.constants.STATE_UNKNOWN)
ret_val = 1
cancel_count_until_error = 3
while not self.isStatStructStateOk(cur_state, accept_warnings_as_ok=accept_warnings_as_ok) and wait_time <= timeout_in_seconds:
cur_state = status_ok_func(*args)
if self.isStatStructStateOk(cur_state, accept_warnings_as_ok=accept_warnings_as_ok):
ret_val = 0
break
wait_time += retry_interval_in_seconds
if wait_time - last_message_printed_time > 5:
last_message_printed_time = wait_time
print >>sys.stderr, '%s[%s]: waiting for %s: %s' % (self.getServiceName(), os.getpid(), cur_state['service_name'], cur_state['message'])
if wait_time < timeout_in_seconds:
if update_status_messages:
self.setSupervisorStatusMessage('Waiting for %s (%s seconds elapsed)' % (cur_state['service_name'], int(wait_time)))
try:
time.sleep(retry_interval_in_seconds)
except:
cancel_count_until_error -= 1
if cancel_count_until_error <= 0:
return 1
print >>sys.stderr, "Warning: time.sleep threw exception while waiting for service to start"
if update_status_messages:
self.setSupervisorStatusMessage(None)
return ret_val
def killUsingPidfile(self, pidfile, soft_kill_signal=None, gracetime_for_soft_kill=None, hard_kill_signal=None, gracetime_for_hard_kill=None):
''' Kill the given process, starting with a nice signal (usually SIGHUP), then a warning (usually SIGTERM), then eventually SIGKILL.
Return 0 on success, non-zero otherwise. (A 0 return means the process is not running; non-zero means it is running or unknown state.) '''
if not os.path.exists(pidfile):
return 0
pid = get_pid_from_pidfile(pidfile)
if pid is None:
release_pidfile(pidfile)
return 0
if not is_pid_running(pid):
print >> sys.stderr, "Killer[%s]: went to kill %s[%s], but process not running." % (os.getpid(), self.getServiceName(), pid)
release_pidfile(pidfile)
return 0
return self.killUsingPid(pid, soft_kill_signal=soft_kill_signal, gracetime_for_soft_kill=gracetime_for_soft_kill, hard_kill_signal=hard_kill_signal, gracetime_for_hard_kill=gracetime_for_hard_kill)
def killUsingPid(self, pid, soft_kill_signal=None, gracetime_for_soft_kill=None, hard_kill_signal=None, gracetime_for_hard_kill=None):
''' See killUsingPidfile. '''
if soft_kill_signal is None:
soft_kill_signal = self.STOP_SOFT_KILL_SIGNAL
if hard_kill_signal is None:
hard_kill_signal = self.STOP_HARD_KILL_SIGNAL
if gracetime_for_soft_kill is None: gracetime_for_soft_kill = self.STOP_SOFT_KILL_TIMEOUT
if gracetime_for_hard_kill is None: gracetime_for_hard_kill = self.STOP_HARD_KILL_TIMEOUT
min_time_between_kills = 2
if gracetime_for_soft_kill < min_time_between_kills: gracetime_for_soft_kill = min_time_between_kills
if gracetime_for_hard_kill < min_time_between_kills: gracetime_for_hard_kill = min_time_between_kills
max_time_between_kills = 60*60
if gracetime_for_soft_kill > max_time_between_kills: gracetime_for_soft_kill = max_time_between_kills
if gracetime_for_hard_kill > max_time_between_kills: gracetime_for_hard_kill = max_time_between_kills
# Take a snapshot of all process relationships before kill:
pid_mapping = get_pid_relationships()
# Step through each level of kill using above helper function:
name = self.getServiceName()
if 0 != kill_and_wait(pid, name, soft_kill_signal, gracetime_for_soft_kill):
if 0 != kill_and_wait(pid, name, hard_kill_signal, gracetime_for_hard_kill):
if 0 != kill_and_wait(pid, name, signal.SIGUSR1, 5):
print >>sys.stderr, "Killer[%s]: %s[%s] failed to exit; resorting to SIGKILL." % (os.getpid(), name, pid)
if 0 != kill_and_wait(pid, name, signal.SIGKILL, 10):
print >>sys.stderr, 'Killer[%s]: unable to kill %s[%s]. Do you need to run as root?' % (os.getpid(), name, pid)
return -1
if pid_mapping is None: # OS X
return 0
# Helper function to print warnings about any still-running processes that were owned by the process we killed:
def _check_child_processes_not_running(pid_to_check):
if is_pid_running(pid_to_check):
print >>sys.stderr, "Killer[%s]: warning: child process %s still running!" % (os.getpid(), pid_to_check)
if pid_to_check not in pid_mapping:
#print >>sys.stderr, "Killer[%s]: warning: process %s missing from pid_mapping!" % (os.getpid(), pid_to_check) # This happens if the process exited after we made our mapping
return
for child_pid in pid_mapping[pid_to_check]:
_check_child_processes_not_running(child_pid)
_check_child_processes_not_running(pid)
return 0
def start_supervisor_with_binary(self, binary, args={}, env=None, reset_env=False, name=None, log_basepath=None,
init_func=None, stop_func=None,
run_as_config_user=False, run_as_user=None, run_as_group=None,
pidfile=None, chdir_path=None,
process_oom_adjustment=0, nice_value=0, run_as_daemon=True,
include_conf_in_env=False):
# To-do: elminate run_as_user/group, use run_as_config_user, default it to True.
if binary is None:
print >>sys.stderr, "Missing binary in service %s (args: %s)" % (self.getServiceName(), args)
return -1
if run_as_config_user:
run_as_user = self._config['RUN_AS_USER']
run_as_group = self._config['RUN_AS_GROUP']
if env is None:
env = {}
if include_conf_in_env:
env = self._export_settings_into_env(env)
if run_as_daemon:
# Make sure the pidfile path is set up correctly:
if pidfile is None:
pidfile = self._supervisor_pidfile
if pidfile.startswith('~'):
pidfile = os.path.abspath(pidfile)
if not pidfile.startswith('/'):
print >>sys.stderr, "Warning: relative pidfile path %s; adding LOCK_DIR in." % pidfile
pidfile = os.path.join(os.path.expanduser(self._config['LOCK_DIR']), pidfile)
# If no stop function, tell launch to call stop() so that we can send the supervisor daemon a SIGHUP and have it send the correct signals for soft and hard kill:
if stop_func is None:
stop_func = lambda server_pid: self.service_stop(server_pid)
# Check log output:
if log_basepath is None:
log_name = None
if name is not None and len(name):
log_name = re.sub(r'\W+', ' ', name).split(' ')[0]
if log_name is None:
log_name = self.getServiceName()
log_basepath = '%s/%s' % (self.getServiceName(), log_name)
return launch(self._config, binary, args, env=env, reset_env=reset_env, name=name, log_basepath=log_basepath, chdir_path=chdir_path,
run_as_daemon=run_as_daemon, pid_filename_for_daemon=pidfile, init_func=init_func, stop_func=stop_func, run_as_user=run_as_user, run_as_group=run_as_group,
nice_value=nice_value, process_oom_adjustment=process_oom_adjustment)
def start_supervisor_with_function(self, function_name, run_func):
log_name = function_name.replace('_','-')
log_basepath = '%s/%s' % (self.getServiceName(), log_name)
return launch_via_function(self._config, function_name, log_basepath, run_func,
run_as_daemon=True, pid_filename_for_daemon=self._supervisor_pidfile, stop_func = lambda server_pid: self.service_stop(server_pid),
run_as_user=self._config['RUN_AS_USER'], run_as_group=self._config['RUN_AS_GROUP'])
def isPortAvilable(self, port):
host='' # defaults to all interfaces
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Allow bind to work on ports in the wait state (i.e. if something was listening)
try:
s.bind((host, int(port)))
s.close()
except Exception:
return False
return True
def get_service_run_dir(self, create_if_missing=False):
path = os.path.join(os.path.expanduser(self._config['RUN_DIR']), self.getServiceName())
if not os.path.exists(path) and create_if_missing:
self.createDirsIfNeeded(path, owner_user=self._config['RUN_AS_USER'], owner_group=self._config['RUN_AS_GROUP'])
return path
def get_service_log_dir(self, create_if_missing=False):
path = os.path.join(os.path.expanduser(self._config['LOG_DIR']), self.getServiceName())
if not os.path.exists(path) and create_if_missing:
self.createDirsIfNeeded(path, owner_user=self._config['RUN_AS_USER'], owner_group=self._config['RUN_AS_GROUP'])
return path
def createConfDirFromTemplates(self, src_path=None, dest_path=None, owner_user=None, owner_group=None, mode=0755, vars=None, reset_dir=False):
''' When src_path is None, copy template files from ./services/<service>/conf/ to <RUN_DIR>/<service>/, replacing settings tokens with current values;
and also delete any unknown files in dest_path.
If src_path is given, unknown files in dest_path will be left unmodified; making it safe to expand into settings dirs for other services.
src_path can be a full path or a relative path under ./services/<current_service>/conf/
If reset_dir is True, any existing conf dir will be deleted first. This can lead to unexpected results if a service's tools are actively running
with the conf dir -- i.e. don't reset the conf dir on ever conf_dir creation if multiple tools are firing off at once using it.
Returns a tuple of the full path to newly-created conf file or directory and number of lines changed from existing files in dir, or None, -1 on error.
'''
delete_unknown_files = False
if src_path is None:
delete_unknown_files = True
src_path = os.path.join(self.getServiceCodeDir(), 'conf')
if not os.path.isdir(src_path):
print >>sys.stderr, "Error: can't find conf template dir '%s' for service %s." % (src_path, self.getServiceName())
return None, -1
else:
if not src_path.startswith('/'):
src_path = os.path.join(self.getServiceCodeDir(), 'conf', src_path)
if dest_path is None:
dest_path = os.path.join(self.get_service_run_dir(), "conf")
if reset_dir and os.path.isdir(dest_path):
try:
shutil.rmtree(dest_path)
except:
print >>sys.stderr, "Error: can't reset conf template dir '%s' for service %s." % (src_path, self.getServiceName())
return None, -2
ret_val = self.install_template_to_dir(src_path, dest_path,
owner_user=owner_user, owner_group=owner_group, mode=mode,
vars=vars, delete_unknown_files=delete_unknown_files)
if ret_val < 0: return None, -3
return dest_path, ret_val
def createDirsIfNeeded(self, absolute_path, name="service", owner_user=None, owner_group=None, mode=0755):
return create_dirs_if_needed(absolute_path, name=name, owner_user=owner_user, owner_group=owner_group, mode=mode)
def which(self, names, additional_paths=(), must_be_executable=True):
''' Return the path to the named executable, or None if not found.
names can be a string or list of strings to be searched for within those paths that are directories.
paths can be a list of additional potential directories or file paths to search inside.
If no matching executable is found within the paths given, we'll also search in the following directories in this order:
<base_dir>/services/<service>/{bin, server/bin, server/sbin}
<base_dir>/built/services/<service>/{bin, server/bin, server/sbin}
<base_dir>/built/bin
<base_dir>/bin
/opt/local/bin
/usr/local/bin
/usr/bin
/usr/sbin
/bin
'''
# Any paths passed in to us take precedence over default paths, so start list with them:
paths = additional_paths
# Add service-specific paths:
built_base_dir = self.get_service_built_dir()
for base in (self.getServiceCodeDir(), built_base_dir):
for dir in ('bin', os.path.join('server', 'bin'), os.path.join('server', 'sbin')):
paths += (os.path.join(base, dir),)
# Add project-level bin paths:
for bin_path in self._config['BIN_PATHS'].split(':'):
paths += (os.path.join(self._angel.get_project_base_dir(), bin_path),)
# Add top-level and system dirs to path (system dirs come last so that project paths take precedence):
paths += (os.path.join(os.path.expanduser(self._angel.get_project_base_dir()),'built','bin'),
os.path.join(os.path.expanduser(self._angel.get_project_base_dir()),'bin'),
'/opt/local/bin',
'/usr/local/bin',
'/usr/bin',
'/usr/sbin',
'/bin')
# We could include os.environ['PATH'], but that's risky: if something is started from a shell that happens
# to have that set to something that wouldn't exist otherwise, services might appear to work when they
# wouldn't on a proper startup.
for some_path in paths:
if not some_path.startswith('/'):
print >>sys.stderr, "Warning: non-absolute path '%s' given to which()" % some_path
continue
if os.path.isfile(some_path) and os.access(some_path, os.X_OK):
return some_path
if os.path.isdir(some_path):
if names is None:
print >>sys.stderr, "Warning: 'which' function given dir path '%s' but no names to search for." % some_path
continue
if isinstance(names, str):
names = list((names,))
for name in names:
this_path = os.path.join(some_path,name)
# Do not use realpath -- some sbin dirs use symlinks with different bins
# pointing to different dirs, and unaliasing that can break things (e.g. nagios)
if os.path.isfile(this_path):
if must_be_executable:
if os.access(this_path, os.X_OK):
return this_path
else:
return this_path
return None
def whichDir(self, paths):
'''Given a list of directories, return the first one that exists, or None.'''
for path in paths:
if not path.startswith('/'):
print >>sys.stderr, "Warning: non-absolute path given to whichDir()"
if os.path.isdir(path):
return path
return None
def execCommand(self, command, args=None, env=None, reset_env=False, nice_value=None, chdir_path=None,
run_as_config_user=False, stdin_string=None, include_conf_in_env=False,
run_as_child_and_block=False, stdout_fileno=None):
''' Exec (replace current running process) with the given command and optional args and env.
When run_as_child_and_block is true, fork, exec in child, block, and return the child exit code. '''
run_as_user = None
run_as_group = None
if run_as_config_user:
run_as_user = self._config['RUN_AS_USER']
run_as_group = self._config['RUN_AS_GROUP']
if include_conf_in_env:
env = self._export_settings_into_env(env)
return exec_process(command, args=args, env=env, reset_env=reset_env, nice_value=nice_value,
chdir_path=chdir_path, run_as_user=run_as_user, run_as_group=run_as_group,
stdin_string=stdin_string, run_as_child_and_block=run_as_child_and_block,
stdout_fileno=stdout_fileno)
def _getBackgroundFunctionLockfilePath(self, name):
return os.path.join(os.path.expanduser(self._config['LOCK_DIR']),
"%s-bg-function-%s" % (self.getServiceName(), name))
def runFunctionInBackground(self, exec_func, name, run_as_user=None, run_as_group=None):
lockfile_path = self._getBackgroundFunctionLockfilePath(name)
log_basepath = os.path.join(os.path.expanduser(self._config['LOG_DIR']), self.getServiceName(), name)
return run_function_in_background(self._config, name, lockfile_path, exec_func, log_basepath=log_basepath,
run_as_user=run_as_user, run_as_group=run_as_group)
def killFunctionInBackground(self, name):
pidfile = self._getBackgroundFunctionLockfilePath(name)
pid = get_pid_from_pidfile(pidfile)
if pid is None:
return 0
return self.killUsingPid(pid)
def isBackgroundFunctionRunning(self, name):
pidfile = self._getBackgroundFunctionLockfilePath(name)
pid = get_pid_from_pidfile(pidfile)
if pid is None:
return False
return True
def runCommand(self, command, args=None, env=None, reset_env=False, run_as_config_user=False, chdir_path=None,
log_nonzero_exits=True, timeout_in_seconds=None, include_conf_in_env=False, stdin_string=None):
''' Run the given command (swallowing stdout/stderr) with optional args and env, and return the exit code. '''
out, err, exitcode = self.getCommandOutput(command,
args=args,
env=env,
run_as_config_user=run_as_config_user,
reset_env=reset_env,
chdir_path=chdir_path,
stdin_string=stdin_string,
log_nonzero_exits=log_nonzero_exits,
timeout_in_seconds=timeout_in_seconds,
include_conf_in_env=include_conf_in_env)
return exitcode
def get_angel_command_output(self, args, setting_overrides=None, timeout_in_seconds=None, stdin_string=None):
command = self._angel.get_project_exec()
(args, env) = self._angel.get_args_and_env_for_running_self(args=args,
setting_overrides=setting_overrides)
return self.getCommandOutput(command, args=args, env=env,
timeout_in_seconds=timeout_in_seconds,
stdin_string=stdin_string,
log_nonzero_exits=False)
def getCommandOutput(self, command, args=None, env=None, chdir_path=None,
reset_env=False, include_conf_in_env=False, tee_output=False,
run_as_config_user=False, log_nonzero_exits=True, timeout_in_seconds=None, stdin_string=None):
''' Run given command with optional args and env settings and return stdout, stderr, and exit code.
If run_as_config_user is True, this will be run with the user and group as defined in config.run_as_user / config.run_as_group, otherwise the current effective user is used. '''
run_as_user = run_as_group = None
if run_as_config_user:
run_as_user = self._config['RUN_AS_USER']
run_as_group = self._config['RUN_AS_GROUP']
if include_conf_in_env:
env = self._export_settings_into_env(env)
if env is None:
return None, None, None
return get_command_output(command, args=args, env=env, chdir=chdir_path, reset_env=reset_env, tee_output=tee_output,
run_as_user=run_as_user, run_as_group=run_as_group,
print_error_info=log_nonzero_exits, timeout_in_seconds=timeout_in_seconds,
stdin_string=stdin_string)
def _export_settings_into_env(self, env):
if env is None:
env = {}
env_prefix = "%s_SETTING_" % self._angel.get_project_name().upper()
for k in self._config:
if k.startswith('.'): continue
if type(self._config[k]) not in (float, int, bool, type(None), str):
continue # This might lead to some vars being dropped if we add supporting other types...
env['%s%s' % (env_prefix, k)] = self._config[k]
settings_env_name = "%s_SETTINGS" % self._angel.get_project_name().upper()
if settings_env_name not in env:
env[settings_env_name] = self.export_settings_to_tmp_file()
if env[settings_env_name] is None:
return None
return env
def fetchDataFromUrl(self, url, timeout=5, silent=False, headers=None,
http_auth_username=None, http_auth_password=None):
try:
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(url)
if headers:
for h in headers:
request.add_header(h, headers[h])
if http_auth_username and http_auth_password:
request.add_header('Authorization', "Basic %s" % base64.encodestring('%s:%s' % (http_auth_username, http_auth_password)))
connection = opener.open(request, None, timeout)
return connection.read()
except urllib2.URLError as e:
if not silent:
print >>sys.stderr, "Warning: URLError while fetching '%s' (%s)." % (url, e)
return None
except Exception as e:
print >>sys.stderr, "Warning: unexpected error while fetching '%s' (%s)." % (url, e)
return None
def fetchStatsFromUrl(self, url, timeout=5, silent=False):
''' Given a URL that outputs a text document of "Key: value\n" values, return a dict; or None on failure. '''
data_str = self.fetchDataFromUrl(url, timeout=timeout, silent=silent)
if data_str is None:
return None
try:
return dict([ [y.strip() for y in x.split(': ')] for x in data_str.strip().split('\n')]) # "Key: value\nKey-2: value2\n" -> dict{key} -> value
except Exception as e:
if not silent:
print >>sys.stderr, "Warning: failed to parse data from %s (%s)." % (url, e)
return None
def shell_tool_start(self, wait=False):
''' Start the service, regardless of conf HOST settings.
* wait: wait for service to start '''
ret_val = self.trigger_start()
if not wait or ret_val != 0: return ret_val
return self.waitForOkayStatus(self.service_status, timeout_in_seconds=120)
def shell_tool_stop(self, hard=False, mean=False):
'''
Stop the service (note: 'service repair' may start it again)
* hard: send kill SIGTERM signals to all service processes; not recommended in prod
* mean: send kill SIGKILL signals to all service processes; not recommended ever!
'''
daemon_pid = get_pid_from_pidfile(self._supervisor_pidfile)
if daemon_pid is None:
print >>sys.stderr, "Service %s already stopped." % self.getServiceName()
return 0
if hard or mean:
print >>sys.stderr, "Reminder: --hard and --mean should only be used when a service has become unresponsive to normal stop requests."
hard_kill_all(self._supervisor_pidfile, send_sigterm=hard, send_sigkill=mean,
yes_i_understand_this_is_potentially_dangerous=True)
return self.trigger_stop()
def shell_tool_status(self, key=None, interval=None, with_timestamp=False, count=-1, wait=None, with_stats=True, with_summary=True):
'''
Show detailed status information with extended data
* count: when running with an interval, stop after N samples (defaults to forever)
* interval: show output every N seconds
* key: output only the value for names key; non-zero exit if min/max ranges defined and exceeded
* wait: wait for N seconds (defaults to 120) for an okay status; non-zero exit otherwise
* with_stats: include stats on each interval
* with_summary: include summary stats at end
* with_timestamp: include timestamps (epoch,key format when key-based output)
'''
if interval is not None:
try:
interval = float(interval)
except:
print >>sys.stderr, "Invalid interval '%s'." % interval
return 1
if interval < 0:
print >>sys.stderr, "Warning: invalid negative interval %s; using 0 instead." % interval
interval = 0
if interval > 3600:
print >>sys.stderr, "Using interval of 3,600 seconds instead of %s." % interval
interval = 3600
wait_for_ok = False
wait_for_ok_time_left = 120
if wait is not None:
if type(wait) is bool:
wait_for_ok = wait
else:
try:
wait_for_ok_time_left = float(wait)
wait_for_ok = True
except:
print >>sys.stderr, "Invalid wait time '%s'." % wait
return 1
if wait_for_ok and interval is not None:
print >>sys.stderr, "Error: --interval and --wait-for-ok are mutually-exclusive."
return 1
whole_second_interval = False
if interval:
# This is a bit of a cheat -- if we're running on an interer interval, sleep for a tiny bit so that
# stats are triggered at the top of the second -- this will roughly syncronize stats on different systems.
if int(interval) == float(interval):
whole_second_interval = True
def _sleep_until_top_of_the_second(max_sleep=0.999):
''' sleep until the time rolls over to the next second; skip sleep if that can't happen within max_sleep seconds. '''
now = time.time()
fractional_second = 1 - abs(now - int(now))
if fractional_second > max_sleep:
return
try:
time.sleep(fractional_second)
except Exception as e:
print >>sys.stderr, "Error in drift correction (%s)." % e
return 1
if whole_second_interval:
_sleep_until_top_of_the_second()
do_loop = True
loops_left = None
if count > 0:
loops_left = count
ret_val = 0
loop_count = 0
statistics_values = {}
statistics_warn_count = {}
statistics_error_count = {}
statistics_sample_count = {}
try:
while do_loop:
loop_count += 1
if loops_left is not None:
loops_left -= 1
if loops_left <= 0:
do_loop = False
start_time = time.time()
stat_struct = self.trigger_status()
if self.isServiceRunning():
last_child_start = self._get_seconds_since_last_child_start()
if last_child_start is None: last_child_start = -1
last_service_start = -1
daemon_info = self.getDaemonLockfileInfo()
if daemon_info is not None and 'uptime' in daemon_info and daemon_info['uptime'] is not None:
try:
last_service_start = int(daemon_info['uptime'])
except:
pass
self.addDataPointToStatStruct(stat_struct, 'service uptime', last_service_start, unit=angel.constants.STAT_TYPE_SECONDS)
self.addDataPointToStatStruct(stat_struct, 'process uptime', last_child_start, unit=angel.constants.STAT_TYPE_SECONDS)
server_pid = self.get_server_process_pid()
if server_pid:
self.addDataPointToStatStruct(stat_struct, 'server pid', server_pid)
if wait_for_ok:
if not self.isStatStructStateOk(stat_struct):
try:
time.sleep(1)
except:
print >>sys.stderr, "Interrupted while waiting for service."
return 1
wait_for_ok_time_left -= (time.time() - start_time)
if wait_for_ok_time_left > 0:
continue
if key is not None:
# When key is given, output format is *just* the data value of the requested key, or empty string if it's missing.
# Do not chance this format, so that `tool <servicename> --key <somekey>` can be expanded safely by users.
# Return value is 0 if key exists and is within min / max values
if 'data' in stat_struct and key in stat_struct['data']:
d = stat_struct['data'][key]
value = d['value']
if with_timestamp:
print "%s,%s" % (int(start_time), value)
else:
print value
if 'min' in d and value < d['min']:
ret_val = 1
if 'max' in d and value < d['max']:
ret_val = 1
else:
print >>sys.stderr, "Error: can't find status info key '%s'." % key
ret_val = 1
else:
# When no key is specified, print a human-readable summary of all status info for the service:
if with_timestamp:
print datetime.datetime.fromtimestamp(start_time).isoformat()
if 'service_name' not in stat_struct: stat_struct['service_name'] = '(unknown service!)'
if 'message' not in stat_struct: stat_struct['message'] = '(no status message!)'
if 'state' not in stat_struct:
stat_struct['state'] = None
state_string = '(unknown state! %s)' % stat_struct['state']
if stat_struct['state'] in angel.constants.STATE_CODE_TO_TEXT: state_string = angel.constants.STATE_CODE_TO_TEXT[stat_struct['state']]
loop_count_string = ''
if interval:
if interval >= 1:
loop_count_string = "[%s] " % int(time.time())
else:
loop_count_string = "[%0.2f] " % time.time()
print "%s%s %s: %s" % (loop_count_string, stat_struct['service_name'], state_string, stat_struct['message'])
if 'data' in stat_struct:
data = stat_struct['data']
for this_key in sorted(data):
value = data[this_key]['value']
line = "%20s: %s" % (this_key, value)
if 'unit' in data[this_key]:
line += ' %s' % data[this_key]['unit']
if data[this_key]['unit'] == angel.constants.STAT_TYPE_SECONDS and value > 120:
minutes = int(value/60) % 60
hours = int(value/3600) % 24
days = int(value/86400)
time_str = ' ('
if days > 0:
time_str += '%s day%s, ' % (days, ('s' if days != 1 else ''))
if hours > 0:
time_str += '%s hour%s, ' % (hours, ('s' if hours != 1 else ''))
time_str += '%s minute%s)' % (minutes, ('s' if minutes != 1 else ''))
line += time_str
is_in_error_range = False
error_is_larger_than_warn = True
if 'error' in data[this_key] and 'warn' in data[this_key]:
if data[this_key]['warn'] > data[this_key]['error']:
error_is_larger_than_warn = False
if 'error' in data[this_key]:
if this_key not in statistics_error_count:
statistics_error_count[this_key] = 0
if (error_is_larger_than_warn and value > data[this_key]['error']) or (not error_is_larger_than_warn and value < data[this_key]['error']):
is_in_error_range = True
statistics_error_count[this_key] += 1
line += ' *** crossed error threshold %s *** ' % data[this_key]['error']
if 'warn' in data[this_key] and not is_in_error_range:
if this_key not in statistics_warn_count:
statistics_warn_count[this_key] = 0
if (error_is_larger_than_warn and value > data[this_key]['warn']) or (not error_is_larger_than_warn and value < data[this_key]['warn']):
statistics_warn_count[this_key] += 1
line += ' *** crossed warn threshold %s *** ' % data[this_key]['warn']
if with_stats:
print line
if isinstance(value, int) or isinstance(value, float):
if this_key not in statistics_values:
statistics_sample_count[this_key] = 1
statistics_values[this_key] = (value,)
else:
statistics_sample_count[this_key] += 1
statistics_values[this_key] += (value,)
if interval is not None and len(stat_struct['data']) and with_stats:
print ""
if not self.isStatStructStateOk(stat_struct):
ret_val = 1
if interval is None:
break
try:
# Sleep until the next interval.
# This is a little messy, but it works: if we're running on an integer interval,
# sleep a little less than needed, then sleep to roll forward to the next second.
# This keeps our interval on the "top" of the second, which is sorta nice.
run_time = time.time() - start_time
delta = interval - run_time
if whole_second_interval:
delta -= 0.05
if delta > 0:
time.sleep(delta)
if whole_second_interval:
_sleep_until_top_of_the_second(max_sleep=0.05)
except Exception as e:
print >>sys.stderr, e
break
except KeyboardInterrupt:
pass
# If we're running in a loop with full output, display averages:
if interval is not None and key is None and len(statistics_values) and with_summary:
print "\n--- %s statistics ---%s" % (self.getServiceName(), '-' * (80-len(self.getServiceName())))
print " average min val max val warnings errors sample count"
for key in sorted(statistics_sample_count):
avg_value = (sum(statistics_values[key])/statistics_sample_count[key])
max_value = max(statistics_values[key])
min_value = min(statistics_values[key])
format_type = 'd'
if isinstance(statistics_values[key][0], float):
format_type = 'f'
else:
avg_value = int(round(avg_value))
max_value = int(round(max_value))
min_value = int(round(min_value))
format_string = "{0:>17s}: {1:<12%s} {2:<12%s} {3:<12%s} {4:<12s} {5:<12s} {6:s}" % (format_type, format_type, format_type)
warn_info = '-'
if key in statistics_warn_count:
warn_info = statistics_warn_count[key]
err_info = '-'
if key in statistics_error_count:
err_info = statistics_error_count[key]
try:
print format_string.format(key, avg_value, min_value, max_value, str(warn_info), str(err_info), str(statistics_sample_count[key]))
except Exception as e:
print >>sys.stderr, "(Error: can't print info for %s: %s)" % (key, e)
return ret_val
def shell_tool_debug_gdb(self, pid=None):
"""
Attach gdb to the running server process (for linux).
* pid: override pid to use
"""
if pid is None:
if not self.isServiceRunning():
print >>sys.stderr, "Error: service not running."
return 2
pid = self.get_server_process_pid()
if pid is None:
print >>sys.stderr, "Error: no server pid."
return 2
cmd = self.which('lldb') # OS X
if cmd is None:
cmd = self.which('gdb')
if cmd is None:
print >>sys.stderr, "Error: can't find gdb or lldb."
return 2
args = ("-p", pid)
if 0 != os.getuid():
args = (cmd,) + args
cmd = self.which("sudo")
print >>sys.stderr, "Running debugger:"
print >>sys.stderr, " %s %s" % (cmd, ' '.join(map(str, args)))
print >>sys.stderr, "Tips:"
print >>sys.stderr, " backtrace"
print >>sys.stderr, " info threads"
print >>sys.stderr, " thread apply <threadnumber> backtrace"
print >>sys.stderr, " thread apply all backtrace"
print >>sys.stderr, ""
return self.execCommand(cmd, args=args)
def shell_tool_telnet(self, host=None, port=None, use_ssl=False, code=None):
''' Connect via telnet to running service (will use local node where possible)
* host: Connect to the given host, instead of auto-discovering it from settings
* port: Connect to the given port, instead of auto-discovering it from settings
* use_ssl: Connect with OpenSSL instead of telnet
* code: connect and write given string; print response and then close connection
'''
port_var = "%s_PORT" % self.getServiceName().upper().replace('-','_')
host_var = "%s_HOST" % self.getServiceName().upper().replace('-','_')
hosts_var = "%s_HOSTS" % self.getServiceName().upper().replace('-','_')
if port is not None:
try:
port = int(port)
if port < 1 or port > 65535: raise
except:
print >>sys.stderr, "Error: invalid port %s." % port
return 1
if port is None:
if port_var not in self._config:
print >>sys.stderr, "Error: can't find setting %s for telnet connection." % port_var
return 1
port = self._config[port_var]
if host is None:
if host_var in self._config:
host = self._config[host_var]
elif hosts_var in self._config:
service_hosts = self._config[hosts_var].split(',')
if self._angel.get_private_ip_addr() in service_hosts:
host = self._angel.get_private_ip_addr()
elif '127.0.0.1' in service_hosts:
host = '127.0.0.1'
else:
host = sorted(service_hosts)[0]
print >>sys.stderr, "Warning: picking first host (%s) out of %s setting." % (host, hosts_var)
else:
print >>sys.stderr, "Warning: couldn't find host; assuming 127.0.0.1"
host = '127.0.0.1'
if use_ssl:
if code:
print >>sys.stderr, "Error: --code and --use-ssl not implemented."
return 1
print >>sys.stderr, "openssl s_client -crlf -connect %s:%s" % (host, port) # AWS ELB needs -crlf
return self.execCommand(self.which('openssl'), args=('s_client', '-crlf', '-connect', '%s:%s' % (host, port)))
else:
if not code:
print >>sys.stderr, "telnet %s %s" % (host, port)
return self.execCommand(self.which('telnet'), args=(host, port))
# Use a basic socket to connect and write the --code string:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.sendall(code)
while True:
data = s.recv(1024)
if 0 == len(data):
break
sys.stdout.write(data)
sys.stdout.flush()
s.close()
return 0
except Exception as e:
print >>sys.stderr, "Error connecting to %s:%s (%s)" % (e, host, port)
return 1
def shell_tool_rotate_logs(self):
''' Rotate log files '''
return self.rotateLogs()
def shell_tool_restart(self, wait=False, only_if_running=False):
''' Restart the service
* wait: wait for service to start
* only_if_running: only restart the service if it is already running
'''
if only_if_running:
if not self.isServiceRunning():
return 0
ret_val = self.trigger_restart()
if not wait or ret_val != 0:
return ret_val
return self.waitForOkayStatus(self.service_status, timeout_in_seconds=120)
def shell_tool_reload(self, reload_code=False, flush_caches=False):
''' Reload the service for config changes without service interruption.
* reload_code: Attempt to also reload code (for code changes)
* flush_caches: Requests the service to flush any cached data when applicable
'''
is_conf_changed = True
return self.service_reload(reload_code, is_conf_changed, flush_caches)
def shell_tool_repair(self):
''' Attempt to correct service issues (e.g. failed supervisor or non-ok status state) '''
return self.trigger_repair()
def print_command_help(self, command):
self.shell_tool_help(command=command, print_to_stderr=True)
def shell_tool_help(self, command=None, print_to_stderr=False):
''' Show help text for tool commands.
* command: tool to display help for
'''
# Display a notice to use the main help command, instead of tool help
print >>sys.stderr, "*** deprecated; you should use: %s help tool %s %s" % (self._angel.get_project_name(), self.getServiceName(), command or "")
time.sleep(3)
options = self.get_tool_names()
out = sys.stdout
if print_to_stderr:
out = sys.stderr
if command is not None:
if command in options:
print >>out, options[command].rstrip()
return 0
print >>sys.stderr, 'Unknown command "%s".\n' % command
return 1
print >>out, "tool %s ..." % self.getServiceName()
for option in sorted(options):
include_option = True
for exclusion_pattern in self.HIDDEN_TOOLS:
if fnmatch.fnmatch(option, exclusion_pattern):
include_option = False
break
if include_option:
if len(options[option]):
print >>out, options[option][:-1]
else:
print >>out, " %s (unknown info)" % option
return 0
def shell_tool_get_autocomplete_options(self):
''' Return a string containing all the valid tools commands, for use by bash autocomplete. This is sorta magic...'''
options = self.get_tool_names()
options_to_include = ()
for option in sorted(options):
include_option = True
for exclusion_pattern in self.HIDDEN_TOOLS:
if fnmatch.fnmatch(option, exclusion_pattern):
include_option = False
break
if include_option:
options_to_include += (option,)
print ' '.join(options_to_include)
return 0
def get_usage_for_tools(self):
ret_val = {"commands": {}}
for tool in self.get_tool_names():
ret_val["commands"][tool] = self.get_usage_for_tool(tool)
return ret_val
def get_usage_for_tool(self, tool_name):
ret_val = {}
for exclusion_pattern in self.HIDDEN_TOOLS:
if fnmatch.fnmatch(tool_name, exclusion_pattern):
ret_val["hidden"] = True
try:
tool_func = getattr(self, "shell_tool_%s" % tool_name.replace('-','_'))
except AttributeError:
ret_val["description"] = "(No description for tool %s; is it a bin-based one?" % tool_name
return ret_val
comment = tool_func.__doc__
if comment is None:
ret_val["description"] = "(No description for tool %s; check function docstring." % tool_name
return ret_val
# We expect the docstring to be formatted like so:
# '''
# Some description to show here
# Any additional info here does not get used
# * option: description of option
# any additional info here also does not get used
# * option2: another descript
# '''
first_line_of_comment = comment.lstrip().split('\n')[0].lstrip().rstrip()
if first_line_of_comment.endswith('.'): first_line_of_comment = first_line_of_comment[:-1] # Trim periods from sentences
if first_line_of_comment.startswith('#'):
first_line_of_comment = first_line_of_comment[1:].lstrip()
ret_val["description"] = first_line_of_comment
arg_lines = [ a.lstrip()[1:].lstrip().rstrip() for a in comment.split('\n') if a.lstrip().startswith('*') ]
try:
arg_info_from_doc = dict(arg.split(': ', 1) for arg in arg_lines)
except ValueError:
print >>sys.stderr, "Warning: unable to parse docstrings for tool %s." % tool_name
argspec = inspect.getargspec(tool_func)
if argspec.defaults is not None:
ret_val['options'] = {}
kwdefaults = zip( argspec.args[-len(argspec.defaults):], argspec.defaults ) # (required1,required2,optionalA,optionalB),(A,B) -> ((optionalA,A), (optionalB,B))
for kwdefault_name, kwdefault_value in kwdefaults:
description = "(Unknown description, check function docstring?)"
try:
description = arg_info_from_doc[kwdefault_name]
except:
pass
ret_val['options']['--%s' % kwdefault_name.replace("_", "-")] = {
"description": description
}
# if tool_kwarg.startswith('with_') and 'without_%s' % tool_kwarg[5:] in dict(kwdefaults):
# normalized_tool_kwargs['without_%s' % tool_kwarg[5:]] = not tool_kwargs[tool_kwarg]
# elif tool_kwarg.startswith('without_') and 'with_%s' % tool_kwarg[8:] in dict(kwdefaults):
# normalized_tool_kwargs['with_%s' % tool_kwarg[8:]] = not tool_kwargs[tool_kwarg]
# else:
return ret_val
def get_tool_names(self):
''' Return a list of all tools available for this service.
This includes python functions defined in the class, as well as executable scripts in the appropriate bin directories.
See ./services/README.txt for more info on naming requirements.
'''
right_column_indent = 32
options = {}
my_service_name = self.getServiceName()
# Include <service-name>-<command-name> executables/scripts from our bin directory:
our_bin_dir = os.path.join(self.getServiceCodeDir(), 'bin')
if os.path.isdir(our_bin_dir):
required_beginning = "%s-" % my_service_name
for entry in os.listdir(our_bin_dir):
if os.access(os.path.join(our_bin_dir, entry), os.X_OK) and entry[:len(required_beginning)] == required_beginning:
option_name = entry[len(required_beginning):]
options[option_name] = ' %s%s(usage unknown)\n' % (option_name, ' ' * (right_column_indent - 2 - len(option_name)))
# Include executables from the server/bin directory:
built_server_bin_dir = os.path.join(self.get_service_built_dir(), 'server', 'bin')
server_bin_dir = os.path.join(self.getServiceCodeDir(), 'server', 'bin')
for base in (server_bin_dir, built_server_bin_dir):
if os.path.isdir(base):
for entry in os.listdir(base):
if os.access(os.path.join(base, entry), os.X_OK):
options[entry] = ' %s%s(usage unknown)\n' % (entry, ' ' * (right_column_indent - 2 - len(entry)))
# Find all functions that define shell_tool_<command>s:
required_beginning = 'shell_tool_'
for entry in dir(self):
if entry[:len(required_beginning)] != required_beginning: continue
if entry is 'shell_tool_get_autocomplete_options': continue
if entry is 'shell_tool_help': continue
option_name = entry[len(required_beginning):].replace('_','-')
arg_info_from_doc = {}
tool_func = getattr(self, entry)
tool_options_info = ' %s%s(usage unknown)\n' % (option_name, ' ' * (right_column_indent - 2 - len(option_name)))
comment = tool_func.__doc__
if comment is not None:
# We expect the comment to be formatted like so:
# '''
# Some description to show here
# Any additional info here does not get used
# * option: description of option
# any additional info here also does not get used
# * option2: another descript
# '''
first_line_of_comment = comment.lstrip().split('\n')[0].lstrip().rstrip()
if first_line_of_comment.endswith('.'): first_line_of_comment = first_line_of_comment[:-1] # Trim periods from sentences
arg_lines = [ a.lstrip()[1:].lstrip().rstrip() for a in comment.split('\n') if a.lstrip().startswith('*') ]
try:
arg_info_from_doc = dict(arg.split(': ', 1) for arg in arg_lines)
except ValueError:
print >>sys.stderr, "Warning: unable to parse docstrings in %s." % option_name
if first_line_of_comment.startswith('#'):
first_line_of_comment = first_line_of_comment[1:].lstrip()
tool_options_info = ' %s%s%s\n' % (option_name, ' ' * (right_column_indent - 2 - len(option_name)), first_line_of_comment)
argspec = inspect.getargspec(tool_func)
optional_args = {}
if argspec.defaults is not None:
kwargs = sorted(zip( argspec.args[-len(argspec.defaults):], argspec.defaults )) # (required1,required2,optionalA,optionalB),(A,B) -> ((optionalA,A), (optionalB,B))
for kwdefault_name, kwdefault_value in kwargs:
optional_args[kwdefault_name] = kwdefault_value
if argspec.args is not None:
for arg_name in argspec.args:
if arg_name is 'self': continue
if arg_name in optional_args: continue
info = '(no doc string)'
if arg_name in arg_info_from_doc:
info = arg_info_from_doc[arg_name]
tool_options_info += ' <%s>%s -- %s\n' % (arg_name.replace('_', '-'), ' ' * (right_column_indent - 7 - len(arg_name)), info)
for kwdefault_name in sorted(optional_args):
info = '(no doc string)'
if kwdefault_name in arg_info_from_doc:
info = arg_info_from_doc[kwdefault_name]
if optional_args[kwdefault_name] is False:
tool_options_info += ' [--%s]%s -- %s\n' % (kwdefault_name.replace('_', '-'), ' ' * (right_column_indent - 9 - len(kwdefault_name)), info)
elif optional_args[kwdefault_name] is None:
tool_options_info += ' [--%s <value>]%s -- %s\n' % (kwdefault_name.replace('_', '-'), ' ' * (right_column_indent - 17 - len(kwdefault_name)), info)
else:
indent = ' ' * (right_column_indent - 12 - len(kwdefault_name) - len(str(optional_args[kwdefault_name])))
tool_options_info += ' [--%s <%s>]%s -- %s\n' % (kwdefault_name.replace('_', '-'), optional_args[kwdefault_name], indent, info)
options[option_name] = tool_options_info
options_to_delete = ()
for disable_tool_pattern in self.DISABLED_TOOLS:
for option in options:
if fnmatch.fnmatch(option, disable_tool_pattern):
options_to_delete += (option,)
for i in options_to_delete:
del options[i]
return options
def reset_service_data_dir(self, confirm_ok=False, data_dir=None, post_reset_func=None):
''' If confirmed, and settings allow for it, stop (if running) the service, move data dir aside, and restart (if had been running) the service.
If post_reset_func is defined, it will be called with a path to the old dir after the reset and before a potential service start (if service had been running). '''
if not confirm_ok:
print >>sys.stderr, "Error: missing --confirm-ok flag"
return -1
if not self._config['SYSTEM_RESET_DATA_ALLOWED']:
print >>sys.stderr, "Error: refusing to run; SYSTEM_RESET_DATA_ALLOWED is set to false."
return -2
is_running = self.isServiceRunning()
if data_dir is None:
data_dir = self.get_service_data_dir()
if not os.path.isdir(data_dir):
print >>sys.stderr, "Warning: no data to reset; ignoring."
return 0
if is_running:
print "Stopping %s..." % self.getServiceName(),
ret_val = self.trigger_stop()
if ret_val != 0:
print >>sys.stderr, "Error: non-zero return %s while stopping %s." % (ret_val, self.getServiceName())
return -4
print "ok."
old_data_dir = "%s-old-%s" % (data_dir, int(time.time()))
try:
os.rename(data_dir, old_data_dir)
except Exception as e:
print >>sys.stderr, "Error: unable to move %s -> %s: %s" % (data_dir, old_data_dir, e)
return -5
if post_reset_func:
post_reset_func(old_data_dir)
if is_running:
print "Starting %s..." % self.getServiceName()
ret_val = self.trigger_start()
if ret_val != 0:
print >>sys.stderr, "Error: non-zero while %s starting %s." % (ret_val, self.getServiceName())
return -6
return 0
def execBeanshell(self, classpath=None):
''' Exec an interactive java shell (useful for debugging); or return non-zero error code. '''
jar_path = os.path.join(os.path.expanduser(self._angel.get_project_base_dir()), 'share', 'tools', 'beanshell', 'bsh-2.0b4.jar')
if not os.path.isfile(jar_path):
return 1
if classpath is None:
classpath = jar_path
else:
classpath = jar_path + ':' + classpath
return self.execCommand(self.which('java'), args=('bsh.Interpreter',), env={'CLASSPATH': classpath})
def shell_tool_debug_show_disk_io(self, pid=None, interval=1):
''' Show disk I/O, per-process, for the running service.
* pid: run against given pid instead of service
* interval: seconds between stats
'''
if not sys.platform.startswith("linux"):
print >>sys.stderr, "Error: not implemented for this platform."
return 1
if pid is None:
pid = self.get_server_process_pid()
if pid is None:
print >>sys.stderr, "Error: can't find process id."
return 1
all_pids = get_all_children_of_process(pid)
if all_pids is not None and len(all_pids) > 1:
pid = ','.join(map(str,all_pids))
args = ('-p', pid)
args += ('-u', '-d')
if interval > 0:
args += (interval,)
return self.execCommand(self.which('pidstat'), args=args)
def shell_tool_debug_linux_netdown(self, duration=None):
''' Simulate net outage by dropping packets to settings.SERVICE_xxx_PORT[S] (linux only, root only)
* duration: if given, limit the "network outage" for this many seconds
'''
if os.path.isfile(self._get_linux_netup_filename()):
print >>sys.stderr, "Error: netdown already in effect for this service?" # Could be a stale file from a reboot?
return 1
port_vars = [x for x in self._config if x.endswith('_PORT') and x.startswith('%s_' % self.getServiceNameConfStyle())]
ports_vars = [x for x in self._config if x.endswith('_PORTS') and x.startswith('%s_' % self.getServiceNameConfStyle())]
ports = [self._config[x] for x in port_vars]
[ports.extend(self._config[x].split(',')) for x in ports_vars]
try:
map(int, ports)
except:
print >>sys.stderr, "Error: non-numeric port in ports list (%s)." % ports
return 2
if os.getuid() != 0:
print >>sys.stderr, "Error: root access required." # Maybe we should fall back on sudo iptables?
return 3
iptables = self.which('iptables', additional_paths=('/sbin',))
if iptables is None:
print >>sys.stderr, "Error: can't find iptables."
return 4
if duration is not None:
try:
duration = int(duration)
except:
print >>sys.stderr, "Error: invalid duration '%s'." % duration
return 5
if duration < 1 or duration > 60*60*24:
print >>sys.stderr, "Error: invalid duration '%s' (too long or too short)." % duration
return 6
import datetime
duration = ' -m time --datestop %s' % (datetime.datetime.now() + datetime.timedelta(0, duration)).strftime("%Y-%m-%dT%H:%M:%S")
else:
duration = ''
if 0 == len(ports):
print >>sys.stderr, "Warning: no ports detected for the service."
else:
print "Blocking inbound traffic to: %s" % ', '.join(map(str,ports))
add_rules = ['%s -A INPUT -p tcp --destination-port %s -j DROP%s' % (iptables, port, duration) for port in ports]
add_rules += ['%s -A INPUT -p udp --destination-port %s -j DROP%s' % (iptables, port, duration) for port in ports]
remove_rules = ['%s -D INPUT -p tcp --destination-port %s -j DROP%s' % (iptables, port, duration) for port in ports]
remove_rules += ['%s -D INPUT -p udp --destination-port %s -j DROP%s' % (iptables, port, duration) for port in ports]
try:
open(self._get_linux_netup_filename(), 'a').write('\n'.join(remove_rules))
except Exception as e:
print >>sys.stderr, "Error: can't write to %s: %s" % (self._get_linux_netup_filename(), e)
return 7
ret_val = 0
for rule in add_rules:
if 0 != self.runCommand(rule):
print >>sys.stderr, "Error: failed to run iptable command to add: %s" % rule
ret_val = 1
return ret_val
def shell_tool_debug_linux_netup(self):
''' Remove previously-added iptable rules that are dropping packets (linux only, root only). '''
if os.getuid() != 0:
print >>sys.stderr, "Error: root access required."
return 1
if not os.path.isfile(self._get_linux_netup_filename()):
print >>sys.stderr, "Warning: no rules for this service were found (missing %s)." % self._get_linux_netup_filename()
return 0
# We're not going to bother with locking on the netup file; it should be so rarely used.
# Concurrent netup/netdowns on the same service will cause issues... don't do that.
try:
remove_rules = open(self._get_linux_netup_filename()).read().split('\n')
except Exception as e:
print >>sys.stderr, "Error: can't read %s: %s" % (self._get_linux_netup_filename(), e)
return 2
ret_val = 0
for rule in remove_rules:
if not len(rule): continue
if 0 != self.runCommand(rule):
print >>sys.stderr, "Error: failed to run iptable command from %s: %s" % (self._get_linux_netup_filename(), rule)
ret_val = 1
if ret_val == 0:
os.remove(self._get_linux_netup_filename())
return ret_val
def shell_tool_debug_get_server_pid(self):
"""Return the pid of the process being managed (e.g. the actual process, not the supervisor)."""
pid = self.get_server_process_pid()
if pid:
print pid
return 0
print >>sys.stderr, "Error: no server process running."
return 1
def _get_linux_netup_filename(self):
return '/.angel-netup-iptables-%s' % self.getServiceName().lower()
def export_settings_to_tmp_file(self):
'''Export settings to a tmp file and return the filename,
using a checksum-based name so that we re-use files for exports of identical setting values.
'''
settings_as_string = self._config.export_settings_to_string()
settings_checksum = angel.util.checksum.get_checksum(settings_as_string)[0:8]
settings_filepath = os.path.join(os.path.expanduser(self._config['TMP_DIR']),
'%s-settings-%s.conf' % (self._angel.get_project_name(), settings_checksum))
if not os.path.exists(self._config['TMP_DIR']):
try:
os.makedirs(self._config['TMP_DIR'])
except:
print >>sys.stderr, "Error: tmp dir '%s' doesn't exist?" % self._config['TMP_DIR']
return None
if not os.path.isfile(settings_filepath):
self._config.export_settings_to_file(settings_filepath)
return settings_filepath
def install_template_to_dir(self, src_path, dest_path,
owner_user=None, owner_group=None, mode=0755,
vars=None, delete_unknown_files=False):
"""Given a template path, copy files into dest path and replace tokens in the template files with values
from our settings and vars.
Tokens are expected to be of the form __PROJECT_SETTING_xxx__ and __PROJECT_VAR_xxx__, where "PROJECT" is the
name of the project (from get_project_name()). PROJECT_VAR are extra vars, in a separate namespace from
project settings.strings using config and vars.
Returns the number of files that are changed (0 if no changes from previously filled-in files), or a negative number on error.
When delete_unknown_files is set, removes any files under dest_path that are not present in src_path,
which is useful in cases such as a code upgrade changes a conf filename.
"""
token_prefix = self._angel.get_project_name().upper()
if not os.path.isfile(src_path) and not os.path.isdir(src_path): # dirs are files, so this will work work with a src_dir, too.
print >>sys.stderr, "Missing configuration template '%s'." % (src_path)
return -1
# To-do: figure out a more elegant solution for TOP_DIR, but for now, this gets us going.
# Ideally, we'd use a normal template engine and pass an object to templates that can be queried / looped over.
if vars is None:
vars = {}
if 'TOP_DIR' not in vars:
vars['TOP_DIR'] = self._angel.get_project_base_dir()
if os.path.isdir(src_path) and src_path[-1] == '/':
src_path = src_path[:-1] # Get rid of trailing slashes, they result in double-slashes which causes problems
if os.path.isdir(dest_path) and dest_path[-1] == '/':
dest_path = dest_path[:-1] # Get rid of trailing slashes, they result in double-slashes which causes problems
if os.path.islink(src_path):
print >>sys.stderr, "Skipping symlink file %s in setting up template dir %s." % (src_path, dest_path)
return 0 # We won't consider this a hard error -- it means there's a symlink in our src_dir, and we just don't support those.
if os.path.isdir(src_path):
# Then we have a directory to install, recurse through it.
files_changed_count = 0
# Check if there are unknown files under the dest path:
if delete_unknown_files and os.path.exists(dest_path):
for f in os.listdir(dest_path):
this_src_path = os.path.join(src_path, f)
this_dest_path = os.path.join(dest_path, f)
if not os.path.exists(this_src_path):
#pass
# We could warn or delete the file, but not doing this yet because there are some services that write files into the conf dir currently:
print >>sys.stderr, "Warning: template file %s exists in run-time conf dir at %s but does not exist in src template dir (no file at %s)." % (f, this_dest_path, this_dest_path)
# os.remove(this_dest_path)
# files_changed_count++
# For each file in the src conf dir, recurse through and add it to dest path:
for f in os.listdir(src_path):
this_src_path = os.path.join(src_path, f)
this_dest_path = os.path.join(dest_path, f)
ret_val = self.install_template_to_dir(this_src_path, this_dest_path,
owner_user=owner_user, owner_group=owner_group, mode=mode,
vars=vars, delete_unknown_files=delete_unknown_files)
if ret_val < 0:
return ret_val
files_changed_count += ret_val
return files_changed_count
if mode is None:
mode = stat.S_IMODE(os.stat(src_path).st_mode) # Then fall back on the mode of the original file
if 0 != create_dirs_if_needed(os.path.dirname(dest_path), name='config', owner_user=owner_user, owner_group=owner_group, mode=mode):
print >>sys.stderr, "Can't create config dir for file '%s'." % dest_path
return -1
if src_path.endswith(".pyc") or src_path.endswith(".pyo"):
print >>sys.stderr, "Warning: .pyc/.pyo file in template src dir %s; skipping file." % os.path.dirname(src_path)
return 0
try:
with open(src_path, 'r') as fd:
new_data = fd.read()
# Replace __<PROJECT>_SETTING_<NAME>__ with value from settings:
for key in self._config:
new_data = string.replace(new_data, '__%s_SETTING_%s__' % (token_prefix, key), str(self._config[key]))
# Replace __<PROJECT>_VAR_<NAME>__ with value from vars:
if vars is not None:
for key in vars:
new_data = string.replace(new_data, '__%s_VAR_%s__' % (token_prefix, key), str(vars[key]))
if os.path.isdir(dest_path):
dest_path = os.path.join(dest_path, os.path.basename(src_path))
unexpanded_token_offset = new_data.find('__%s_' % token_prefix)
line_count = len(new_data[0:unexpanded_token_offset].split("\n"))
if unexpanded_token_offset > 0:
print >>sys.stderr, "Error: undefined token (%s) found on line %s of template %s:" % \
(', '.join(re.findall('__%s_(.*?)__' % token_prefix, new_data)), line_count, src_path)
print >>sys.stderr, new_data.split('\n')[line_count-1] #[unexpanded_token_offset:(unexpanded_token_offset+64)]
if os.path.isfile(dest_path):
# There can be an old, stale file -- remove it, otherwise it's really confussing in debugging the error.
try:
os.remove(dest_path)
except:
pass
return -1
if os.path.islink(dest_path):
print >>sys.stderr, "Warning: removing symlink %s during template installation" % (dest_path)
os.unlink(dest_path)
old_data = ''
if os.path.isfile(dest_path):
with open(dest_path, 'r') as fd:
old_data = fd.read()
if old_data == new_data and os.path.isfile(dest_path): # Need to check if dest_path exists here; otherwise 0-length files won't actually get created
return 0
fd = open(dest_path, 'w')
fd.write(new_data)
fd.close
if owner_user is not None or owner_group is not None:
if 0 != set_file_owner(dest_path, owner_user=owner_user, owner_group=owner_group):
print >>sys.stderr, "Error: unable to set file owner (%s to %s/%s)" % (dest_path, owner_user, owner_group)
return -1
if os.access(src_path, os.X_OK):
os.chmod(dest_path, (os.stat(dest_path).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH))
return 1
except Exception as e:
print >>sys.stderr, "Error while installing template: unable to read from %s or write to %s (%s)." % (src_path, dest_path, e)
raise e
return -1
print >>sys.stderr, "Error: impossible condition in file_and_dir_helpers.py"
return -1
```
#### File: lib/devops/monitoring.py
```python
import copy
import datetime
import re
import socket
import string
import sys
import time
import traceback
import angel.util.terminal
import angel.util.network
import angel.stats.mem_stats
import angel.settings
import angel.constants
from devops.stats import *
from devops.unix_helpers import set_proc_title
def run_status_check(angel_obj, do_all_checks=False, do_state_checks=False, do_service_checks=False, check_only_these_services=None, format=None, interval=None, timeout=None):
''' Performs various status checks on the running system.
do_all_checks: flip this on to make sure all checks are run, so that in the future as we add additional check flags, they'll default on.
do_state_checks: check that the running services match what should be configured
do_service_checks: call status() on each running service, gathering health and performance data
check_only_these_services: if defined, and do_service_checks is true, only inspect the named services
* Note that checks that this function runs are expected to complete quickly and run as efficiently as possible;
* this function is run in a continuous loop by collectd and polled by nagios on every node in production.
* Please take care when adding any additional logic that it is as efficient as possible!
format:
"" / None -- default action is to print human-readable status info
"collectd" -- run in continuous mode for collectd with given interval (defaults to 10)
"nagios" -- output nagios-formatted output and return a valid nagios exit code
"errors-only" -- display only error info; return non-zero if errors or unknown state
"silent" -- don't output anything; just return an exit code
'''
if do_all_checks:
do_state_checks = do_service_checks = True
if interval is None:
interval = 10 # Used only in collectd currently
if format == '':
format = None
if timeout is None:
if format is None:
timeout = 10 # Most likely a command-line user
else:
timeout = 14 # Nagios nrpe is set to 15 seconds
if format == 'collectd':
try:
run_collectd_monitor(angel_obj, check_only_these_services, interval) # Will only return once services are stopped
if angel_obj.are_services_running():
print >>sys.stderr, "Error: run_collectd_monitor() unexpectedly returned!"
sys.exit(1)
sys.exit(0)
except KeyboardInterrupt:
sys.exit(0)
except Exception as e:
print >>sys.stderr, "Error: run_collectd_monitor thew an exception(%s)." % e
sys.exit(1)
# For all other formats, we'll query status and generate output in the requested format.
# This function could use some clean-up / refactoring, but conceptually it's simple:
# 1) set up some common variables; 2) call status_check on all services; 3) generate the output.
# To-do: there's some odd rare network condition that causes a ~30 second delay in the following 3 lines
# even when services are stopped -- presumably hostname lookup stuff when DNS is unresolvable?
# Wasn't able to trace it further than this before networking resumed; so leaving this note here for now.
services_are_running = angel_obj.are_services_running()
running_services = sorted(angel_obj.get_running_service_names())
enabled_services = sorted(angel_obj.get_enabled_services())
running_unexpectedly = list(set(running_services) - set(enabled_services))
if not services_are_running:
running_unexpectedly = running_services
not_running_but_should_be = list(set(enabled_services) - set(running_services))
if 'devops' in not_running_but_should_be:
not_running_but_should_be.remove('devops')
left_column_width = 10
if len(running_services):
# Find the length of the longest service name:
left_column_width = max(left_column_width, 1 + max(map(len, running_services)))
# Default format (usually command line user) prints some info before checking each service status:
if format is None and do_state_checks:
_print_status_preamble(angel_obj, left_column_width)
if len(running_services) and do_service_checks:
print "-" * angel.util.terminal.terminal_width()
# Gather data for each service by calling their status() functions:
time_exceeded = False
stat_structs = {}
if do_service_checks:
start_time = time.time()
unused_ret_val, stat_structs = angel_obj.service_status(services_to_check=check_only_these_services, timeout=timeout)
end_time = time.time()
check_time = end_time - start_time
if check_time > timeout:
time_exceeded = True
if stat_structs is None:
print >>sys.stderr, "Error: service status struct invalid"
return angel.constants.STATE_UNKNOWN
# Run through the data for each status, checking it:
service_info = {}
status_seen_by_type = {}
status_data = {}
state_message = ''
if do_state_checks:
state_message = "%s %s" % (angel_obj.get_project_code_branch(), angel_obj.get_project_code_version())
if format == 'nagios':
if angel_obj.is_multinode_install() or True:
public_ip = angel_obj.get_public_ip_addr()
private_ip = angel_obj.get_private_ip_addr()
if private_ip != public_ip:
state_message += " on " + public_ip
def _merge_status_data(key_prefix, new_status_data):
for k in new_status_data:
new_key = "%s_%s" % (key_prefix, k)
if new_key in status_data:
print >>sys.stderr, "Warning: %s already in status_data?" % new_key
status_data[new_key] = new_status_data[k]
# Run through the results for each service, building up our results set:
for key in sorted(stat_structs):
if stat_structs[key] is None or not isinstance(stat_structs[key], dict):
# Then the given service failed to return anything from status() -- stub in an entry here:
stat_structs[key] = {}
stat_structs[key]['state'] = angel.constants.STATE_UNKNOWN
stat_structs[key]['message'] = 'Status check failed'
if time_exceeded:
stat_structs[key]['message'] = 'Status check failed or timed out'
try:
# Generate a lower-cased name of the service, without the word "service" in it:
this_service_name = '-'.join(re.findall('[A-Z][^A-Z]*', string.replace(key, 'Service', ''))).lower()
service_info[this_service_name] = {}
this_state = stat_structs[key]['state']
if this_state is None:
print >>sys.stderr, "Error: service %s failed to return a state code" % this_service_name
this_state = angel.constants.STATE_UNKNOWN
service_info[this_service_name]['state'] = this_state
status_seen_by_type[this_state] = True
this_message = 'Unknown'
if 'message' in stat_structs[key] and stat_structs[key]['message'] is not None:
this_message = stat_structs[key]['message']
if this_state != angel.constants.STATE_RUNNING_OK or do_state_checks is False:
if len(state_message):
state_message += ", "
if not (check_only_these_services is not None and 1 == len(check_only_these_services)):
# If we're only checking one service, don't preface the status message with the service name.
state_message += "%s: " % this_service_name
state_message += this_message.split("\n")[0]
try:
state_name = angel.constants.STATE_CODE_TO_TEXT[this_state]
except:
state_name = 'UNKNOWN(%s)' % this_state
format_str = "{:>%s}:{:>9} {}" % left_column_width
service_info[this_service_name]['message'] = format_str.format(this_service_name, state_name, this_message.split("\n")[0])
service_info[this_service_name]['message_raw'] = this_message.split("\n")[0]
if 'data' in stat_structs[key]:
_merge_status_data(this_service_name.lower(), stat_structs[key]['data'])
except:
print >>sys.stderr, "Error in status check %s: %s\n%s" % (key, sys.exc_info()[0], traceback.format_exc(sys.exc_info()[2]))
state_message += " error in %s status data" % (str(key))
status_seen_by_type[angel.constants.STATE_UNKNOWN] = True
# Reduce multiple status_codes down to one value for our exit_code. This isn't elegant, but it seems to be the cleanest way of managing this.
# Order of importance, most important to least important, in general:
# Decommissioned > Unknown > Error > Stopped > Starting|Stopping > Warn > Okay
# - If we're "ok" but the node is marked as in maintenance mode, we flip the level up one to warning.
# - If a service is in starting or stopping state, that masks any Warn level stuff.
# - If the single status code is stopped, but services are supposed to be running, then that's a real error.
extra_state_message = ''
if services_are_running:
if do_state_checks:
extra_state_message += " Running %s services" % len(running_services)
exit_code = angel.constants.STATE_RUNNING_OK
else:
exit_code = angel.constants.STATE_UNKNOWN
else:
exit_code = angel.constants.STATE_STOPPED
enabled_services_str = copy.copy(enabled_services)
try:
enabled_services_str.remove('devops')
except:
pass
enabled_services_str = ', '.join(enabled_services_str)
if angel_obj.is_decommissioned():
exit_code = angel.constants.STATE_DECOMMISSIONED
extra_state_message = ' DECOMMISSIONED'
elif angel.constants.STATE_UNKNOWN in status_seen_by_type:
exit_code = angel.constants.STATE_UNKNOWN
elif angel.constants.STATE_ERROR in status_seen_by_type:
exit_code = angel.constants.STATE_ERROR
elif angel.constants.STATE_STOPPED in status_seen_by_type:
exit_code = angel.constants.STATE_STOPPED
elif angel.constants.STATE_STARTING in status_seen_by_type:
exit_code = angel.constants.STATE_STARTING
elif angel.constants.STATE_STOPPING in status_seen_by_type:
exit_code = angel.constants.STATE_STOPPING
elif angel.constants.STATE_WARN in status_seen_by_type:
exit_code = angel.constants.STATE_WARN
elif angel.constants.STATE_RUNNING_OK in status_seen_by_type:
exit_code = angel.constants.STATE_RUNNING_OK
if services_are_running:
extra_state_message = ' ok: running %s' % enabled_services_str
else:
if do_service_checks:
extra_state_message = ' unknown state for services %s' % enabled_services_str
if do_state_checks:
if services_are_running:
if exit_code == angel.constants.STATE_STOPPED:
# If all the services are reporting STOPPED state, but we're supposed to be running, that's an error:
exit_code = angel.constants.STATE_ERROR
if angel_obj.is_in_maintenance_mode():
extra_state_message += ' (in maintenance mode)'
if exit_code == angel.constants.STATE_RUNNING_OK:
exit_code = angel.constants.STATE_WARN
if not services_are_running:
if len(running_services) and False:
extra_state_message += ' (stopped; running %s; normally runs %s)' % (', '.join(running_services), enabled_services_str)
else:
extra_state_message += ' (stopped; normally runs %s)' % enabled_services_str
if exit_code == angel.constants.STATE_RUNNING_OK or exit_code == angel.constants.STATE_WARN:
exit_code = angel.constants.STATE_STOPPED
if len(running_unexpectedly):
extra_state_message += ' (running unexpected services: %s)' % ', '.join(running_unexpectedly)
if exit_code == angel.constants.STATE_RUNNING_OK:
exit_code = angel.constants.STATE_WARN
if services_are_running:
if len(not_running_but_should_be):
extra_state_message += ' (services missing: %s)' % ', '.join(not_running_but_should_be)
exit_code = angel.constants.STATE_ERROR
state_message += extra_state_message.replace(') (', '; ') # in case we have multiple (foo) (bar) messages
# We now have a state_message and exit code -- transform it according to the requested output format:
# Default output:
if format == '' or format is None:
if not services_are_running and 'devops' in service_info:
del service_info['devops']
# It's possible to have a running service and a stopped system, e.g. during maintenance work.
if len(service_info):
for entry in sorted(service_info):
color_start = ''
color_end = ''
if angel.util.terminal.terminal_stdout_supports_color():
if service_info[entry]['state'] in (angel.constants.STATE_WARN,angel.constants.STATE_STOPPED):
color_start = '\033[0;31m'
color_end = '\033[0m'
if service_info[entry]['state'] in (angel.constants.STATE_ERROR, angel.constants.STATE_UNKNOWN):
color_start = '\033[1;31m'
color_end = '\033[0m'
message_to_print = service_info[entry]['message']
if angel.util.terminal.terminal_width_is_true_size():
message_to_print = service_info[entry]['message'][:angel.util.terminal.terminal_width()]
print color_start + message_to_print + color_end
if do_service_checks and do_state_checks:
print "-" * angel.util.terminal.terminal_width()
general_status_state = 'UNKNOWN EXIT CODE (%s)' % exit_code
try:
general_status_state = angel.constants.STATE_CODE_TO_TEXT[exit_code]
except:
pass
if do_state_checks:
print ('{:>%s}: {}' % left_column_width).format("State", extra_state_message.lstrip())
status_notes = ''
if len(running_unexpectedly):
status_notes += '; running unexpected services (%s)' % ', '.join(running_unexpectedly)
status_info = "%s%s as of %s" % (general_status_state,
status_notes,
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC"))
print ('{:>%s}: {}' % left_column_width).format("Status", status_info)
# In default format, we return 0 exit code on Ok and Warn states; non-zero otherwise:
if exit_code == angel.constants.STATE_RUNNING_OK or exit_code == angel.constants.STATE_WARN:
return 0
return 1
# Silent -- exit code only:
if format == 'silent':
if exit_code == angel.constants.STATE_RUNNING_OK or exit_code == angel.constants.STATE_WARN:
return 0
if exit_code == 0:
return -1
return exit_code
# Errors-only -- useful for running across a large number of nodes:
if format == 'errors-only':
# If we're in an error or unknown state, display short message with error info and exit 1; otherwise silent and 0 exit code:
in_error = False
if exit_code == angel.constants.STATE_ERROR or exit_code == angel.constants.STATE_UNKNOWN or exit_code == angel.constants.STATE_DECOMMISSIONED: in_error = True
if services_are_running and exit_code == angel.constants.STATE_STOPPED: in_error = True
if in_error:
print "%s: %s" % (angel_obj.get_node_hostname(), state_message)
return 1
return 0
# Nagios formatted output:
if format == 'nagios':
# Nagios doesn't use decommissioned, starting, stopping, or stopped states, so we have to remap those:
nagios_exit_code = exit_code
if exit_code == angel.constants.STATE_DECOMMISSIONED: nagios_exit_code = angel.constants.STATE_ERROR
if exit_code == angel.constants.STATE_STARTING: nagios_exit_code = angel.constants.STATE_WARN
if exit_code == angel.constants.STATE_STOPPING: nagios_exit_code = angel.constants.STATE_WARN
if exit_code == angel.constants.STATE_STOPPED:
if not services_are_running:
nagios_exit_code = angel.constants.STATE_WARN
else:
nagios_exit_code = angel.constants.STATE_ERROR
# Create nagios format output:
nagios_data_str = ''
for key in sorted(status_data):
d = status_data[key]
if not 'value' in d:
print >>sys.stderr, "Error: status data key %s has no value." % key
continue
nagios_data_str += " %s=%s" % (key, d['value'])
if 'unit' in d:
if d['unit'] in angel.constants.STAT_TYPES_NAGIOS:
nagios_data_str += "%s" % (angel.constants.STAT_TYPES_NAGIOS[ d['unit'] ].replace('~','') ) # See note in stats about '~' chars
else:
print >>sys.stderr, "Error: unknown stat unit type '%s'" % d['unit']
if not 'warn' in d and not 'error' in d and not 'min' in d and not 'max' in d:
continue
nagios_data_str += ";"
if 'warn' in d:
nagios_data_str += "%s" % d['warn']
if not 'error' in d and not 'min' in d and not 'max' in d:
continue
nagios_data_str += ";"
if 'error' in d:
nagios_data_str += "%s" % d['error']
if not 'min' in d and not 'max' in d:
continue
nagios_data_str += ";"
if 'min' in d:
nagios_data_str += "%s" % d['min']
if not 'max' in d:
continue
nagios_data_str += ";%s" % d['max']
# Print state message without hints:
# (e.g. get rid of [...] comments in string like: process not running [try 'tool foo start'])
print re.sub('\[[^\]]*\]', '', state_message)
if len(nagios_data_str):
print '|' + nagios_data_str.lstrip(),
print ""
return nagios_exit_code
# Unknown format (this should never be reached, unless an invalid format is specified):
print >>sys.stderr, "Error: unknown status format '%s'" % format
return angel.constants.STATE_UNKNOWN
def run_collectd_monitor(angel_obj, services_to_check, interval):
hostname = socket.gethostname()
if interval < 5:
print >>sys.stderr, "Collectd interval too short; setting to 5 seconds."
interval = 5
if interval > 60*5:
print >>sys.stderr, "Warning: very long collectd interval"
while True:
# Check that we're not leaking memory:
mem_usage = angel.stats.mem_stats.get_mem_usage()
if mem_usage['rss'] > 1024*1024*256:
print >>sys.stderr, "Warning: collectd memory usage is high! %s bytes rss?" % mem_usage['rss']
sample_start_time = time.time()
set_proc_title('collectd: getting status')
overall_status_code, stat_structs = angel_obj.service_status(services_to_check=services_to_check, run_in_parallel=False, timeout=interval) # run serially so we don't spike resource usage
set_proc_title('collectd: running')
running_service_count = len(stat_structs)
if 0 == running_service_count:
# This happens when we're stopped. In this case, we'll return, which will cause us to exit.
# Collectd will then re-start the script on the next interval.
# We do this to guarantee that any config changes are picked up by collectd, otherwise we might
# not pick up changes in IP addresses or services we should be monitoring.
return 1
for key in sorted(stat_structs):
try:
this_service_name = '-'.join(re.findall('[A-Z][^A-Z]*', string.replace(key, 'Service', ''))).lower()
if stat_structs[key] is None:
print >>sys.stderr, "Error: service %s failed to return a stat struct" % this_service_name
continue
if 'data' not in stat_structs[key]:
continue # This is ok -- just means this service doesn't have any performance data metrics
this_data = stat_structs[key]['data']
for data_point in this_data:
if 'value' not in this_data[data_point]:
print >>sys.stderr, "Error in status check %s: key %s has no value" % (this_service_name, data_point)
continue
collectd_group = this_service_name
collectd_name = data_point
if 'stat_group' in this_data[data_point]:
stat_group_name = this_data[data_point]['stat_group'].lower().replace(' ', '_').replace('-', '_')
if this_service_name != stat_group_name:
collectd_group = "%s-%s" % (this_service_name, stat_group_name)
if 'stat_name' in this_data[data_point]:
collectd_name = this_data[data_point]['stat_name']
collectd_name = collectd_name.replace(' ', '_').replace('-', '_')
if 'unit' in this_data[data_point]:
stat_unit = angel.constants.STAT_TYPES_COLLECTD[ this_data[data_point]['unit'] ]
if stat_unit != collectd_name:
collectd_name = "%s-%s" % (stat_unit, collectd_name)
print 'PUTVAL "%s/%s/%s" interval=%s %d:%s' % (hostname[0:62], collectd_group[0:62], collectd_name[0:62], interval, sample_start_time, this_data[data_point]['value'])
except:
print >>sys.stderr, "Error in status check %s: %s\n%s" % (key, sys.exc_info()[0], traceback.format_exc(sys.exc_info()[2]))
sample_end_time = time.time()
sample_run_time = sample_end_time - sample_start_time
try:
sleep_time = interval - sample_run_time
if sleep_time < 2:
print >>sys.stderr, "warning: collectd interval=%s; run time=%s; sleep time=%s; sleeping for 2 seconds instead" % (interval, sample_run_time, sleep_time)
sleep_time = 2
set_proc_title('collectd: sleeping for %.2f' % sleep_time)
time.sleep(sleep_time)
except:
print >>sys.stderr, "collectd sleep interrupted"
return 1
def _print_status_preamble(angel_obj, left_column_width):
"""Print some basic info about the node -- a "header" to the status output"""
def _print_line(label, value):
print ('{:>%s}: {}' % left_column_width).format(label, value)
# " Node:
ip_addr_info = angel_obj.get_private_ip_addr()
if angel_obj.get_public_ip_addr():
if angel_obj.get_public_ip_addr() != ip_addr_info:
ip_addr_info += ' / ' + angel_obj.get_public_ip_addr()
nodename = angel_obj.get_node_hostname()
nodename_warning_msg = ""
if not angel.util.network.is_hostname_reverse_resolving_correctly():
nodename_warning_msg = " !!! INVALID REVERSE DNS ENTRY !!! "
_print_line("Node", "%s - %s%s" % (nodename, ip_addr_info, nodename_warning_msg))
version_manager = angel_obj.get_version_manager()
branch = angel_obj.get_project_code_branch()
code_version = angel_obj.get_project_code_version()
newest_code_version = None
pinning_message = ''
if version_manager:
if version_manager.is_version_pinned():
pinning_message = '; version is pinned (%s)' % version_manager.get_version_pinned_reason()
if branch is None:
branch = '_unknown-branch'
if code_version is None:
code_version = '_unknown-build'
if version_manager:
newest_code_version = version_manager.get_highest_installed_version_number(branch)
branch_and_build = branch
if code_version is not None:
branch_and_build += " %s" % code_version
version_message = ''
if code_version and newest_code_version:
if code_version != newest_code_version:
version_message = '; newer version %s available' % newest_code_version
_print_line("Version", "%s%s%s" % (branch_and_build, version_message, pinning_message))
```
#### File: lib/devops/speed_testing.py
```python
import urllib
import urllib2
import base64
import re
def call_and_return_with_timing(f, *args, **kwargs):
"""Helper for calling a function that returns (result, duration) where result
is the returned value, and duration is the datetime.timedelta of the call.
Exceptions are not caught.
"""
from datetime import datetime
before = datetime.now()
result = f(*args, **kwargs)
after = datetime.now()
return (result, after-before)
def get_total_seconds(td):
"""Returns total seconds represented by a datetime.timedelta object.
"""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
# TODO(Jeff): Replace user/pass defaults with HEALTHCHECK_LOGIN_USERNAME and
# HEALTHCHECK_LOGIN_PASSWORD, once they're defined.
def check_url(domain, rel_url, domain_port=80, threshold_seconds=10, login_user='dan', login_password='<PASSWORD>'):
"""Checks health of url, does login first. The rel_url param should start with a backslash.
"""
try:
# Setup
cookie_handler= urllib2.HTTPCookieProcessor()
opener = urllib2.build_opener(cookie_handler)
urllib2.install_opener(opener)
# POST user/pass
login_url = 'http://%s:%s%s' % (domain, domain_port, '/account/login/')
login_data = urllib.urlencode({'username': login_user, 'password':<PASSWORD>, 'next':'/'})
response = opener.open(login_url, data=login_data)
if not response.code == 200:
raise AssertionError('Bad login response code: %s' % response.code)
# GET reuqested URL, capture duration.
requested_url = 'http://%s:%s%s' % (domain, domain_port, rel_url)
(response, time_delta) = call_and_return_with_timing ( lambda : opener.open(requested_url))
duration = get_total_seconds(time_delta)
if not response.code==200:
raise AssertionError('Bad main response code: %s' % response.code)
# Make sure userId contains our user
returned_user = re.search('userId="([^"]*)"', response.read()).group(1)
if not returned_user == login_user:
raise AssertionError('Expected userId to be "%s", was "%s"' % (login_user, returned_user))
# Formulate return values
if duration <= threshold_seconds:
state = 'OK'
message = 'Check_url() succeeded, see data.'
data = {'duration': '%.6f' % duration}
else:
state = 'WARN'
data = {}
message = 'Timeout exceeded.'
except Exception, ex:
state = 'ERR'
message = 'Exception: %s' % repr(ex)
data = {}
# TODO(Jeff) - change to use the new stats/structs builder functions.
return {'state':state, 'message': message, 'data':data}
# UNCOMMENT TO TEST:
# print check_url('preview.namesaketools.com', '/conversations/private')
```
#### File: lib/devops/supervisor.py
```python
import fcntl
import os
import random
import select
import signal
import sys
import time
import traceback
from devops.file_and_dir_helpers import *
from angel.util.pidfile import *
from devops.unix_helpers import set_proc_title
from angel.stats.disk_stats import disk_stats_get_usage_for_path
from devops.process_helpers import *
import angel.settings
# This function is similar to Python's subprocess module, with some tweaks and customizations.
# Like subprocess, it forks a child process, waits for it to exit, and re-starts it on exit. It never returns.
# Our supervisor handles shutdown conditions, calling a stop_func when the supervisor process receives SIGTERM.
# We also handle log rotation, rolling over stdout/stderr when the supervisor process receives SIGWINCH.
# Most other signals are propogated to the child process -- that is, sending the supervisor process SIGHUP will
# be passed through to the child process.
def supervisor_manage_process(config, name, pid_filename_for_daemon, run_as_user, run_as_group, log_basepath,
restart_daemon_on_exit, process_oom_adjustment, init_func, exec_func, stop_func):
''' Creates and manages a child process, running given functions.
- If init_func is defined, it is called in the child process first. If it returns a non-zero status, then supervisor will exit.
- exec_func is then called. If restart_daemon_on_exit is True, exec_func is restarted whenever it exits.
- If stop_func is defined, it is called when this managing process receives a SIGTERM.
- pid_filename_for_daemon is used by this manager process to update status info and track that the manager should be running.
- process_oom_adjustment is a value, typically between -15 and 0, that indicates to the Linux kernel how "important" the process is.
This function never returns.
'''
# Create supervisor logger:
supervisor_logfile_path = launcher_get_logpath(config, log_basepath, 'supervisor')
if 0 != create_dirs_if_needed(os.path.dirname(supervisor_logfile_path), owner_user=run_as_user, owner_group=run_as_group):
print >>sys.stderr, "Supervisor error: unable to create log dirs."
os._exit(0) # Never return
try:
supervisor_logger = SupervisorLogger(open(supervisor_logfile_path, 'a', buffering=0))
except Exception as e:
print >>sys.stderr, "Supervisor error: unable to create supervisor log (%s: %s)." % (supervisor_logfile_path, e)
os._exit(0) # Never return
# Send SIGTERM to the supervisor daemon to tell it to quit the child process and exit.
# Send SIGWINCH to the supervisor daemon to tell it to rotate logs.
# Any other trappable_signal is sent to the child process to do any service-defined logic as necessary.
trappable_signals = (signal.SIGINT, signal.SIGWINCH, signal.SIGHUP, signal.SIGTERM, signal.SIGUSR1, signal.SIGUSR2, signal.SIGQUIT)
global supervisor_daemon_exit_requested
supervisor_daemon_exit_requested = False
global run_init_instead_of_exec
run_init_instead_of_exec = False
set_proc_title('supervisor[%s]: starting' % name)
# Always run supervisor with kernel out-of-memory flags set to hold off on killing us.
# This is reset back up to 0 in the child process (or whatever process_oom_adjustment is set to).
set_process_oom_factor(-15)
supervisor_pid = os.getpid()
child_pid = None
daemon_start_time = int(time.time())
last_start_time = None
start_count = 0
continous_restarts = 0
min_delay_between_continous_restarts = 5
max_delay_between_continous_restarts = 30
restart_delay_jitter = 60 # If we hit max_delay, we'll re-try at some interval between (max_delay - jitter) and (max_delay)
# Define a function that waits for a child pid to exit OR for us to receive a signal:
def _supervisor_daemon_waitpid(pid):
if pid is None or pid < 2:
supervisor_logger.warn("Supervisor[%s]: can't wait on invalid pid %s." % (name, pid))
return -1
try:
# To-do: periodically wake up and check that pid_filename_for_daemon contains our pid, or exit
(wait_pid, wait_exitcode) = os.waitpid(pid, 0)
return (wait_exitcode >> 8) % 256
except OSError:
return -2 # waitpid will throw an OSError when our supervisor recieves a kill signal (i.e. SIGTERM to tell us to exit); our code below will loop and re-call this.
return -3
# Define a function that receives a signal and passes it through to our child process:
def _supervisor_daemon_signal_passthru(signum, frame):
if child_pid is None or child_pid < 2:
# This can happen if the supervised child was *just* killed, or isn't running yet (during a re-spawn).
supervisor_logger.warn("Supervisor: invalid pid %s found during kill -%s of process %s" % (child_pid, signum, name))
return
try:
supervisor_logger.info("_supervisor_daemon_signal_passthru: kill -%s %s" % (signum, child_pid))
os.kill(child_pid, signum)
except Exception as e:
supervisor_logger.error("Supervisor %s[%s/%s managing %s]: unable to send signal %s to pid %s: %s" % (name, supervisor_pid, os.getpid(), child_pid, signum, child_pid, e))
# Define a function that receives a signal and rotates logs:
def _supervisor_daemon_rotate_logs(signum, frame):
supervisor_logger.info("Supervisor %s[%s/%s managing %s]: rotate logs not implemented yet; log_basepath=%s" % (name, supervisor_pid, os.getpid(), child_pid, log_basepath))
# Define a function that receives a signal and cleanly shuts down the server:
def _supervisor_daemon_quit(signum, frame):
# Flag that quit has been requested:
global supervisor_daemon_exit_requested
supervisor_daemon_exit_requested = True
if child_pid is None or child_pid < 2:
# This can happen if the supervised child was *just* killed, or isn't running yet (during a re-spawn).
supervisor_logger.warn("Supervisor: invalid pid %s found during kill -%s of process %s" % (child_pid, signum, name))
return
# Check if we're still in an init phase (can't call stop_func on something that hasn't actually started):
global run_init_instead_of_exec
if run_init_instead_of_exec:
# if we're currently invoking a custom init function, then we need to send the supervisor process the kill signal directly so it exits
return _supervisor_daemon_signal_passthru(signum, frame)
# Run stop function if given, otherwise pass along given kill signal to child process:
if stop_func is not None:
try:
import threading
supervisor_logger.info("Supervisor %s[%s/%s managing %s]: quit request received (sig %s in thread %s); calling stop function" % (name, supervisor_pid, os.getpid(), child_pid, signum, threading.currentThread().name))
ret_val = stop_func(child_pid)
supervisor_logger.info("Supervisor %s[%s/%s managing %s]: quit request received (sig %s in thread %s); stop function done (%s)" % (name, supervisor_pid, os.getpid(), child_pid, signum, threading.currentThread().name, ret_val))
return
except Exception:
supervisor_logger.error("Supervisor %s[%s/%s managing %s]: error in stop function: %s" % (name, supervisor_pid, os.getpid(), child_pid, traceback.format_exc(sys.exc_info()[2])))
else:
supervisor_logger.warn("Supervisor %s[%s/%s managing %s]: no stop function given" % (name, supervisor_pid, os.getpid(), child_pid))
return _supervisor_daemon_signal_passthru(signum, frame)
def _install_signal_functions():
signal.signal(signal.SIGWINCH, _supervisor_daemon_rotate_logs)
signal.signal(signal.SIGTERM, _supervisor_daemon_quit)
for sig in trappable_signals:
if sig not in (signal.SIGWINCH, signal.SIGTERM):
signal.signal(sig, _supervisor_daemon_signal_passthru)
def _remove_signal_functions():
for sig in trappable_signals:
signal.signal(sig, signal.SIG_DFL)
def _sleep_without_signal_functions(duration):
# Because there are cases where *we* need to be interrupted:
_remove_signal_functions()
time.sleep(duration)
_install_signal_functions()
# Install signal functions:
_install_signal_functions()
# chdir() to /, to avoid potentially holding a mountpoint open:
os.chdir('/')
# Reset umask:
os.umask(022)
# Redirect STDOUT/STDERR:
# (Redirects run as separate threads in our supervisor process -- don't move these to the child process; os.exec will wipe them out.)
os.setsid()
stdout_redirector = SupervisorStreamRedirector(supervisor_logger, launcher_get_logpath(config, log_basepath, ''), run_as_user=run_as_user, run_as_group=run_as_group)
stderr_redirector = SupervisorStreamRedirector(supervisor_logger, launcher_get_logpath(config, log_basepath, 'error'), run_as_user=run_as_user, run_as_group=run_as_group)
supervisor_redirector = SupervisorStreamRedirector(supervisor_logger, launcher_get_logpath(config, log_basepath, 'supervisor'), run_as_user=run_as_user, run_as_group=run_as_group)
stdout_redirector.startRedirectThread(sys.stdout)
stderr_redirector.startRedirectThread(sys.stderr)
supervisor_redirector.startRedirectThread(supervisor_logger.logger_fd)
# Close STDIN:
sys.stdin.close()
os.close(0)
new_stdin = open(os.devnull, 'r', 0) # So FD 0 isn't available
#new_stdin = open(os.devnull, 'r', 0)
#try:
# os.dup2(new_stdin.fileno(), sys.stdin.fileno())
#except ValueError:
# print >>sys.stderr, "Can't set up STDIN, was it closed on us?"
# Loop until shutdown requested, handling signals and logs and making sure that our server remains running:
while not supervisor_daemon_exit_requested:
if not is_pid_in_pidfile_our_pid(pid_filename_for_daemon):
supervisor_logger.warn("Supervisor[%s/%s]: Warning: invalid pid %s in lock file %s. Re-checking..." % (supervisor_pid, os.getpid(), get_pid_from_pidfile(pid_filename_for_daemon), pid_filename_for_daemon))
try:
time.sleep(0.5)
except:
pass
if not is_pid_in_pidfile_our_pid(pid_filename_for_daemon):
supervisor_logger.error("Supervisor[%s/%s]: FATAL: invalid pid %s in lock file %s. Exiting now." % (supervisor_pid, os.getpid(), get_pid_from_pidfile(pid_filename_for_daemon), pid_filename_for_daemon))
sys.stdout.flush()
sys.stderr.flush()
time.sleep(0.5) # Need to sleep so that logger threads can write out above stderr message. Gross, but it works.
os._exit(1)
lockfile_pid = get_pid_from_pidfile(pid_filename_for_daemon)
if lockfile_pid is None or supervisor_pid != lockfile_pid:
supervisor_logger.error("Supervisor[%s/%s]: FATAL: lock file %s not owned by current process! (pid is %s) Exiting now." % (supervisor_pid, os.getpid(), pid_filename_for_daemon, lockfile_pid))
os._exit(1)
one_time_run = False
run_init_instead_of_exec = False
if start_count == 0 and init_func is not None:
run_init_instead_of_exec = True
if not restart_daemon_on_exit:
# This is a clever trick: we might want to run a command in the background one-time (i.e. priming a service).
# By passing restart_daemon_on_exit as false from way up above us in the callstack,
# we can use our run logic inside the supervisor process and let it exit cleanly.
# This works by reading one_time_run after we've started and flipping supervisor_daemon_exit_requested to True.
one_time_run = True
try:
log_disk_stats = disk_stats_get_usage_for_path(config['LOG_DIR'])
data_disk_stats = disk_stats_get_usage_for_path(config['DATA_DIR'])
run_disk_stats = disk_stats_get_usage_for_path(config['RUN_DIR'])
if log_disk_stats is not None and data_disk_stats is not None and run_disk_stats is not None:
# Only do this check when we can get stats -- otherwise it's possible to rm -rf log_dir and then have the service die.
if log_disk_stats['free_mb'] < 100 or data_disk_stats['free_mb'] < 100 or run_disk_stats['free_mb'] < 100:
supervisor_logger.error("Supervisor[%s/%s]: insufficent disk space to run %s." % (supervisor_pid, os.getpid(), name))
try:
_sleep_without_signal_functions(10)
except:
supervisor_daemon_exit_requested = True
continue
except Exception as e:
supervisor_logger.error("Supervisor[%s/%s]: disk check failed: %s" % (supervisor_pid, os.getpid(), e))
if child_pid is None and not supervisor_daemon_exit_requested:
if one_time_run:
supervisor_daemon_exit_requested = True
# Then we need to fork and start child process:
try:
sys.stdout.flush() # If we have a ' print "Foo", ' statement (e.g. with trailing comma), the forked process ends up with a copy of it, too.
sys.stderr.flush()
child_pid = os.fork()
if child_pid:
# Parent process:
supervisor_logger.info("Supervisor[%s/%s]: managing process %s running as pid %s" % (supervisor_pid, os.getpid(), name, child_pid))
set_proc_title('supervisor: managing %s[%s]' % (name, child_pid))
prior_child_start_time = last_start_time
last_start_time = time.time()
start_count += 1
if 0 != update_pidfile_data(pid_filename_for_daemon, { \
angel.constants.LOCKFILE_DATA_DAEMON_START_TIME: daemon_start_time, \
angel.constants.LOCKFILE_DATA_PRIOR_CHILD_START_TIME: prior_child_start_time, \
angel.constants.LOCKFILE_DATA_CHILD_START_TIME: int(time.time()), \
angel.constants.LOCKFILE_DATA_CHILD_PID: child_pid, \
angel.constants.LOCKFILE_DATA_START_COUNT: start_count, \
} ):
supervisor_logger.error("Supervisor[%s/%s]: error updating pidfile data in pidfile %s" % (supervisor_pid, os.getpid(), pid_filename_for_daemon))
else:
# Child process:
supervisor_logger.info("Supervisor[%s/%s]: running %s" % (supervisor_pid, os.getpid(), name))
set_proc_title('supervisor: starting %s' % name)
# Set our process_oom_adjustment, as the parent process ALWAYS has it set to a very low value to avoid the supervisor from being killed:
set_process_oom_factor(process_oom_adjustment)
# Drop root privileges (has to be done after oom adjustment):
if 0 != process_drop_root_permissions(run_as_user, run_as_group):
supervisor_logger.error("Supervisor[%s/%s]: error setting user/group to %s/%s in child process." % (supervisor_pid, os.getpid(), run_as_user, run_as_group))
os._exit(1)
# We need to reset the signal handlers so as to NOT trap any signals because exec_func and init_func will have python code that runs within our current process.
# We have to unset this in the child process; if we set it in the "parent" branch of the if statement, then we'd be missing them on the next loop.
_remove_signal_functions()
# If there's an init function, run it instead:
if run_init_instead_of_exec:
set_proc_title('%s worker init' % name)
supervisor_logger.info("Supervisor[%s/%s]: starting init for %s" % (supervisor_pid, os.getpid(), name))
init_okay = True
ret_val = None
try:
ret_val = init_func()
except Exception as e:
supervisor_logger.error("Error in init function: %s; bailing." % e)
init_okay = False
if type(ret_val) is not int:
supervisor_logger.warn("Warning: init_func for %s returned non-int; please return 0 on success; non-zero otherwise; or throw an exception." % (name, ret_val))
else:
if ret_val != 0:
init_okay = False
if not init_okay:
supervisor_logger.error("Supervisor[%s/%s]: FATAL: init failed for %s" % (supervisor_pid, os.getpid(), name))
os.kill(supervisor_pid, signal.SIGTERM)
else:
supervisor_logger.info("Supervisor[%s/%s]: init finished for %s" % (supervisor_pid, os.getpid(), name))
os._exit(ret_val) # Exit child process; supervisor will pick
# Run the exec function:
set_proc_title('%s worker' % name)
try:
exec_func() # This should be a function that calls os.exec and replaces our current process
except Exception as e:
supervisor_logger.error("Error in exec function: %s" % e)
supervisor_logger.error("MAJOR ERROR: Supervisor[%s/%s]: function for %s unexepectedly returned." % (supervisor_pid, os.getpid(), name))
os._exit(2)
except Exception as e:
supervisor_logger.error("Supervisor[%s/%s]: child process failed (%s)." % (supervisor_pid, os.getpid(), e))
try:
_sleep_without_signal_functions(10) # Sleep in child to prevent parent from rapidly re-spawning
except:
pass
continue
if child_pid is None:
supervisor_logger.error("Supervisor[%s/%s]: child process setup failed (supervisor_daemon_exit_requested: %s)." % (supervisor_pid, os.getpid(), supervisor_daemon_exit_requested))
try:
_sleep_without_signal_functions(10) # Sleep in child to prevent parent from rapidly re-spawning
except:
supervisor_daemon_exit_requested = True
continue
# The parent process needs to wait for the child process to exit:
wait_exitcode = _supervisor_daemon_waitpid(child_pid)
set_proc_title('supervisor: managing %s[%s exited %s]' % (name, child_pid, wait_exitcode))
if run_init_instead_of_exec:
supervisor_logger.info("Supervisor[%s/%s]: init function finished." % (supervisor_pid, os.getpid()))
child_pid = None
continue
if supervisor_daemon_exit_requested:
set_proc_title('supervisor: managing %s[%s exited %s for exit]' % (name, child_pid, wait_exitcode))
if one_time_run:
supervisor_logger.info('Supervisor[%s/%s]: %s[%s] exited (exit code %s) for one-time run.' % (supervisor_pid, os.getpid(), name, child_pid, wait_exitcode))
else:
supervisor_logger.info('Supervisor[%s/%s]: %s[%s] exited (exit code %s) for shutdown.' % (supervisor_pid, os.getpid(), name, child_pid, wait_exitcode))
break
# The wait-for-child logic above may have returned early due to a signal that we received and passed off to child or otherwise handled.
# Only reset stuff for a restart if the child process actually exited (i.e. waitpid() returned because the child exited, not because the parent received a signal):
if not is_pid_running(child_pid):
set_proc_title('supervisor: restarting %s' % (name))
this_run_duration = time.time() - last_start_time
# Re-try service starts no faster than some minimum interval, backing off to some maximum interval so lengthy outages don't trigger a sudden spike
delay_until_next_restart = 0
if continous_restarts > 0:
delay_until_next_restart = min_delay_between_continous_restarts + (continous_restarts - 1) * 10 - this_run_duration + 2*random.random()
if delay_until_next_restart < min_delay_between_continous_restarts:
delay_until_next_restart = min_delay_between_continous_restarts + 2*random.random()
if delay_until_next_restart > max_delay_between_continous_restarts:
delay_until_next_restart = max_delay_between_continous_restarts - random.random() * restart_delay_jitter
supervisor_logger.error('Supervisor[%s/%s]: %s[%s] unexpected exit (exit code %s) after %s seconds on run number %s, waiting %s seconds before restarting.' %
(supervisor_pid, os.getpid(), name, child_pid, wait_exitcode, this_run_duration, start_count, delay_until_next_restart))
supervisor_logger.error('Supervisor[%s/%s]: more info: run_init_instead_of_exec: %s; restart_daemon_on_exit: %s' %
(supervisor_pid, os.getpid(), run_init_instead_of_exec, restart_daemon_on_exit))
child_pid = None
if this_run_duration < max_delay_between_continous_restarts:
continous_restarts += 1
try:
time_left = delay_until_next_restart
while time_left > 0:
# spit out a log every few seconds so we can see what's going on in the logs -- otherwise it looks wedged:
supervisor_logger.error('Supervisor[%s/%s]: %s[%s] waiting %s seconds.' % (supervisor_pid, os.getpid(), name, child_pid, int(time_left)))
sleep_time = 5
if sleep_time > time_left:
sleep_time = time_left
_sleep_without_signal_functions(sleep_time)
time_left -= sleep_time
except Exception as e:
supervisor_logger.error('Supervisor[%s/%s]: %s had exception while waiting; bailing (%s).' % (supervisor_pid, os.getpid(), name, e))
supervisor_daemon_exit_requested = True
else:
continous_restarts = 0
# We'll only exit above loop when supervisor_daemon_exit_requested is true.
# We keep running until the child process exits, otherwise there's no way
# for the outside world to send further signals to the process.
while is_pid_running(child_pid):
try:
# While we can still send signals to the supervisor process, wait on it
set_proc_title('supervisor: waiting for exit %s[%s]' % (name, child_pid))
supervisor_logger.info("Supervisor[%s/%s]: waiting for exit %s[%s]" % (supervisor_pid, os.getpid(), name, child_pid))
_supervisor_daemon_waitpid(child_pid)
except OSError:
pass
set_proc_title('supervisor: finished monitoring %s[%s]; closing logfiles' % (name, child_pid))
supervisor_logger.info("Supervisor[%s/%s] finished monitoring %s[%s]; exiting" % (supervisor_pid, os.getpid(), name, child_pid))
if os.path.isfile(pid_filename_for_daemon):
# The pid file really should exist, but if it doesn't, there's not a lot we can do anyway, and logging it wi
os.remove(pid_filename_for_daemon)
else:
supervisor_logger.warn("Supervisor[%s/%s]: no lockfile at %s to remove, oh well." % (supervisor_pid, os.getpid(), pid_filename_for_daemon))
# Stop logging threads:
stdout_redirector.stopRedirectThread()
stderr_redirector.stopRedirectThread()
supervisor_redirector.stopRedirectThread()
# Do not return from this function -- and use os._exit instead of sys.exit to nuke any stray threads:
os._exit(0)
# For use by supervisor only -- please consider this 'private' to supervisor.
class SupervisorLogger():
# Yes, re-inventing the wheel here. Trying to keep the external dependencies down to a minimum.
logger_fd = None
def __init__(self, logger_fd):
self.logger_fd = logger_fd
def info(self, message):
self.log('info', message)
def warn(self, message):
self.log('warn', message)
def error(self, message):
self.log('error', message)
def log(self, level, message):
self.logger_fd.write("%s, %s, %s\n" % (time.time(), level, message))
self.logger_fd.flush()
# For use by supervisor only -- please consider this 'private' to supervisor.
from threading import Thread
class SupervisorStreamRedirector(Thread):
supervisor_logger = None
log_data_source = None
stop_event = None
run_as_user = None
run_as_group = None
logfile_inode = None
logfile_dir = None
logfile_path = None
logfile_fd = None
def __init__(self, supervisor_logger, logfile_path, run_as_user=None, run_as_group=None):
Thread.__init__(self)
self.supervisor_logger = supervisor_logger
self.logfile_path = logfile_path
self.logfile_dir = os.path.dirname(self.logfile_path)
self.run_as_user = run_as_user
self.run_as_group = run_as_group
self._create_logdir()
def startRedirectThread(self, data_stream):
if self.stop_event:
if self.supervisor_logger is not None:
self.supervisor_logger.warn("SupervisorStreamRedirector: redirect already started?")
return -4
self.stop_event = threading.Event()
try:
reader, writer = os.pipe()
self.log_data_source = os.fdopen(reader, 'rb', 0)
original_output_dest = os.fdopen(writer, 'wb', 0)
# Flip on non-blocking, otherwise calls to select.select() will block:
flags = fcntl.fcntl(original_output_dest, fcntl.F_GETFL)
fcntl.fcntl(original_output_dest, fcntl.F_SETFL, flags | os.O_NONBLOCK)
flags = fcntl.fcntl(self.log_data_source, fcntl.F_GETFL)
fcntl.fcntl(self.log_data_source, fcntl.F_SETFL, flags | os.O_NONBLOCK)
data_stream.flush()
os.dup2(original_output_dest.fileno(), data_stream.fileno())
except Exception as e:
if self.supervisor_logger is not None:
self.supervisor_logger.warn("SupervisorStreamRedirector: error setting up file streams for redirect: %s" % e)
return -5
try:
self.start()
except Exception as e:
if self.supervisor_logger is not None:
self.supervisor_logger.warn("SupervisorStreamRedirector: error starting redirect thread: %s" % e)
return -6
return 0
def stopRedirectThread(self):
if self.stop_event:
self.stop_event.set()
else:
if self.supervisor_logger is not None:
self.supervisor_logger.warn("SupervisorStreamRedirector: stop_logger not running? (%s)" % self.stop_event)
def _filter_lines(self, lines):
''' Given an array of lines, return a filtered / altered string as desired. '''
# The intent here is to someday pass an object in that implements the filter, so that
# sensative strings can be filtered out of the log files before getting written to disk
# and then sent across the wire via logstash or what have you.
# For now, we do a no-op.
if len(lines) == 0:
return ''
return '\n'.join(lines) + '\n'
# Here's an example that would timestamp every line:
#if len(lines) == 0:
# return ''
#line_beginning = '%11.1f ' % (time.time())
#line_ending = '\n'
#return line_beginning + (line_ending + line_beginning).join(lines) + line_ending
def _create_logdir(self):
if 0 != create_dirs_if_needed(self.logfile_dir, owner_user=self.run_as_user, owner_group=self.run_as_group):
self.supervisor_logger.error("SupervisorStreamRedirector[%s]: unable to create logdir %s" % (os.getpid(), self.logfile_path))
return -7
return 0
def _reset_logfile(self):
if 0 != self._create_logdir():
return -8
try:
if os.path.exists(self.logfile_path):
if os.path.islink(self.logfile_path) or not os.path.isfile(self.logfile_path):
self.supervisor_logger.error("SupervisorStreamRedirector: invalid file at logfile path %s" % self.logfile_path)
return -9
new_fh = open(self.logfile_path, 'a')
if self.logfile_fd is not None:
self.logfile_fd.close()
self.logfile_fd = new_fh
self.logfile_inode = os.stat(self.logfile_path).st_ino
self.supervisor_logger.info("SupervisorStreamRedirector[%s]: writing to logfile %s" % (os.getpid(), self.logfile_path))
except Exception as e:
self.supervisor_logger.error("SupervisorStreamRedirector[%s]: unable to open logfile %s: %s" % (os.getpid(), self.logfile_path, e))
return -10
return 0
def run(self):
okay_to_run = True
last_read_size = 0
last_remainder = ''
while okay_to_run or last_read_size > 0:
if self.stop_event.is_set():
self.supervisor_logger.info("SupervisorStreamRedirector[%s]: stopping logger at %s" % (os.getpid(), self.logfile_path))
okay_to_run = False
self.stop_event.clear()
try:
# Don't use readline() -- it blocks, and there's no way for the main thread
# to tell the logger thread to exit while the i/o call is blocked. Sigh.
[rlist, wlist, xlist] = select.select([self.log_data_source], [], [], 0.25)
if not os.path.exists(self.logfile_dir):
# Re-create the logdir if it goes missing -- do this check every pass through,
# so that if logdir gets completely reset we instantly recreate the path for other
# processes which might also depend on it.
self._create_logdir()
if not rlist:
last_read_size = 0
else:
data = self.log_data_source.read(1024)
last_read_size = len(data)
# We split the data into lines so that we can filter sensative stings out, and potentially do some line-based formatting.
# Because we're not using readline (due to blocking reasons), we have to split the data into lines, and carry over the remainder
# of the last line (if it's mid-line) to the next pass through the loop.
lines = data.split('\n')
if data.endswith('\n'):
lines = lines[:-1]
if len(last_remainder):
lines[0] = last_remainder + lines[0]
last_remainder = ''
if not data.endswith('\n'):
last_remainder = lines[-1]
lines = lines[:-1]
try:
current_inode = os.stat(self.logfile_path).st_ino
if self.logfile_inode != current_inode:
self._reset_logfile()
except:
self._reset_logfile()
if self.logfile_fd is not None:
self.logfile_fd.write(self._filter_lines(lines))
if not okay_to_run and len(last_remainder):
# Then it's our last loop through -- purge out the remainder:
self.logfile_fd.write(self._filter_lines(last_remainder,))
last_remainder = ''
self.logfile_fd.flush()
except Exception as e:
self.supervisor_logger.error("SupervisorStreamRedirector: error in log thread: %s" % e)
self.supervisor_logger.info("SupervisorStreamRedirector stopping; closing %s." % self.logfile_path)
self.logfile_fd.flush()
self.logfile_fd.close()
self.stop_event = None
```
#### File: lib/devops/version_map_helpers.py
```python
import os,sys
from fcntl import flock, LOCK_EX, LOCK_UN
def add_entries_to_static_version_mapping(static_map_filepath, entries):
parent_dir = os.path.dirname(os.path.abspath(static_map_filepath))
try:
if not os.path.isdir(parent_dir): os.makedirs(parent_dir)
except:
# It's possible another process will have made the dir inbetween when we check its existance and call makedirs.
# (Yes, this has happened.)
if not os.path.isdir(parent_dir):
print >>sys.stderr, "Error: unable to make dir '%s' in add_entries_to_static_version_mapping." % parent_dir
return 1
try:
md5_map_file = open(static_map_filepath, "a")
# Try to flock, although this seems to fail on ec2:
flock(md5_map_file, LOCK_EX)
# Flush to check if we're still at beginning of file (race-condition), and if so, write the preamble:
md5_map_file.flush()
if 0 == md5_map_file.tell():
md5_map_file.write("hsn_static_to_cdn_map = {}\n")
md5_map_file.flush()
for keyname in entries:
md5_map_file.write("hsn_static_to_cdn_map['%s'] = '%s'\n" % (keyname, entries[keyname]))
md5_map_file.flush() # flush after every write, so in case other concurrent processes are writing and locks failed, our lines are at least intact.
flock(md5_map_file, LOCK_UN)
md5_map_file.close()
except Exception as e:
print >>sys.stderr, 'Error writing to static map "%s"' % static_map_filepath
print >>sys.stderr, e
return 1
return 0
``` |
{
"source": "j-potter/ConditionOne",
"score": 3
} |
#### File: j-potter/ConditionOne/conflicts.py
```python
def FindConflicts(givenSchedule, printing):
userSchedule = []
for talk in givenSchedule:
userSchedule.append(talk)
if userSchedule.__len__() == 0:
if printing:
print "No talks currently in schedule."
return 0
conflicts = 0
conflictingTalks = []
index = 0
while index < userSchedule.__len__():
for talk in userSchedule[(index+1):]:
if talk.day == userSchedule[index].day and talk.time[:2] == userSchedule[index].time[:2]:
conflictingTalks.append(talk)
conflictingTalks.append(userSchedule[index])
conflicts += 1
index += 1
if conflicts == 1:
conflicts = 2
if conflicts > 0:
if printing:
print "Warning: " + str(conflicts) + " conflicts found."
print "\nTime Day ID Title"
conflictingTalks = list(set(conflictingTalks))
for talk in conflictingTalks:
print talk.time + " " + talk.day[:3] + " " + "%03d" % int(talk.idNum) + " " + talk.title
print""
elif conflicts == 0:
print "You have scheduled " + str(userSchedule.__len__()) + " talk(s) with 0 conflicts."
return conflicts
```
#### File: j-potter/ConditionOne/DefconParse.py
```python
import string
import talk
DEBUGGING = False
masterSchedule = []
talkPRE = '<h3 class="talkTitle">'
talkPOST = '</h3>'
dayTimeTrackPRE = '<p class="abstract">'
dayTimeTrackPOST = '</p>'
speakerPRE = '<h4 class="speaker">'
speakerPOST = '<span'
descriptionPRE = '<p class="abstract">'
descriptionPOST = '</p>'
def Parse(html):
htmlString = html.read()
idNum = 0
while len(htmlString) != 0 and string.find(htmlString, "day at") != -1: #crude, but that string existing means that there's another talk to parse
#Get Talk Title
start = string.find(htmlString, talkPRE) + len(talkPRE) # Establishes start index as first letter in title
htmlString = htmlString[start:] #Trims down the file
stop = string.find(htmlString, talkPOST) #Establishes stop index as last letter in title
title = htmlString[:stop] #Establishes talk title
htmlString = htmlString[stop:]
#Get talk day
start = string.find(htmlString, dayTimeTrackPRE) + len(dayTimeTrackPRE) #Establishes start as first letter in day / time / track string
htmlString = htmlString[start:] #Trims down the file
stop = string.find(htmlString, 'day') + 3 #Establishes stop as end of day
day = htmlString[:stop]
if len(day) > 8:
continue
htmlString = htmlString[stop:]
#Get talk time
start = string.find(htmlString, ':') - 2 #Set start index as 2 chars before ':' which will always work as earliest talk is at 10:00
if start - stop > 25: #ugly hack to fix annoying bug
continue
htmlString = htmlString[start:] #Trims string
time = htmlString[:5] #Establishes talk time
#Get talk track
if string.find(htmlString[:25], '101') != -1: #if '101' appears, making it the track
track = 101
else:
track = htmlString[15] #Probably shouldn't be hardcoded but it's more efficient that hunting for the next digit
if not str(track).isdigit(): #Special cases such as "20:00 - 22:00 in Modena Room" are rare and can be their own thing implemented later. They only come up ~4 times.
continue
#Get speaker
start = string.find(htmlString, speakerPRE) + len(speakerPRE)
stop = string.find(htmlString, speakerPOST)
speaker = htmlString[start:stop] #sets speaker value
htmlString = htmlString[stop:] #trims down file (I know I'm inconsistent with file vs string. Sorry.)
#Get description - KNOWN BUG - LINE BREAKS (<br>) ARE STILL IN TEXT
start = string.find(htmlString, descriptionPRE) + len(descriptionPRE)
htmlString = htmlString[start:]
stop = string.find(htmlString, descriptionPOST)
description = htmlString[:stop]
if DEBUGGING:
print "Title: " + title
print "Day: " + day
print "Time: " + time
print "Track: " + str(track)
print "Speaker(s): " + speaker
#print "Description: " + description
masterSchedule.append(talk.talk(day, time, track, title, speaker, description, idNum)) #Add the talk to the list
idNum += 1 #Increment identifier
return masterSchedule
```
#### File: j-potter/ConditionOne/FreeBooked.py
```python
import conflicts
def Outline(userSchedule):
#day 10 11 12 13 14 15 16 17
thurs = [0, 0, 0, 0, 0, 0, 0, 0]
fri = [0, 0, 0, 0, 0, 0, 0, 0]
sat = [0, 0, 0, 0, 0, 0, 0, 0]
sun = [0, 0, 0, 0, 0, 0, 0, 0]
for talk in userSchedule:
if talk.day == "Thursday":
thurs[int(talk.time[:2]) - 10] = talk
elif talk.day == "Friday":
fri[int(talk.time[:2]) - 10] = talk
elif talk.day == "Saturday":
sat[int(talk.time[:2]) - 10] = talk
elif talk.day == "Sunday":
sun[int(talk.time[:2]) - 10] = talk
days = [thurs, fri, sat, sun]
fullDay = ["Thursday", "Friday", "Saturday", "Sunday"]
dayIndex = 0
NumberOfConflicts = conflicts.FindConflicts(userSchedule, False)
if NumberOfConflicts > 0:
print "Error: " + str(NumberOfConflicts) + " conflicts detected."
print "Outline cannot be generated until all conflicts are resolved."
print "Run \"conflicts\" to see current conflicts that must be resolved."
return
print ""
for day in days:
hour = 10
print fullDay[dayIndex]
for time in day:
if time == 0:
print str(hour) + ":00\tFree"
else:
print str(hour) + ":00\t" + "Track " + "%03d," %int(time.track) + " TalkID %03d: " % int(time.idNum) + str(time.title)
hour += 1
print ""
dayIndex += 1
def happening(masterSchedule, day, time):
if ["th", "thurs", "thursday"].__contains__(str(day).lower()): #standardize day format
day = "Thursday"
elif ["f", "fri", "friday"].__contains__(str(day).lower()):
day = "Friday"
elif ["sat", "saturday"].__contains__(str(day).lower()):
day = "Saturday"
elif ["sun", "sunday"].__contains__(str(day).lower()):
day = "Sunday"
notYetPrinted = True
for talk in masterSchedule:
if talk.day == day and int(talk.time[:2]) == int(time): #Add talks that match given date and time
if notYetPrinted: #Column titles appear once if necessary
print "\nTime Day Track\tID Title"
notYetPrinted = False
print talk.time + " " + talk.day[:3] + " " + "%03d\t" % int(talk.track)+ "%03d" % int(talk.idNum) + " " + talk.title
def export(userSchedule):
# day 10 11 12 13 14 15 16 17
thurs = [0, 0, 0, 0, 0, 0, 0, 0]
fri = [0, 0, 0, 0, 0, 0, 0, 0]
sat = [0, 0, 0, 0, 0, 0, 0, 0]
sun = [0, 0, 0, 0, 0, 0, 0, 0]
for talk in userSchedule:
if talk.day == "Thursday":
thurs[int(talk.time[:2]) - 10] = talk
elif talk.day == "Friday":
fri[int(talk.time[:2]) - 10] = talk
elif talk.day == "Saturday":
sat[int(talk.time[:2]) - 10] = talk
elif talk.day == "Sunday":
sun[int(talk.time[:2]) - 10] = talk
days = [thurs, fri, sat, sun]
fullDay = ["Thursday", "Friday", "Saturday", "Sunday"]
dayIndex = 0
NumberOfConflicts = conflicts.FindConflicts(userSchedule, False)
exportFile = open("DEFCON 25 Schedule.txt", 'w')
if NumberOfConflicts > 0:
print "Error: " + str(NumberOfConflicts) + " conflicts detected."
print "Outline cannot be generated until all conflicts are resolved."
print "Run \"conflicts\" to see current conflicts that must be resolved."
return
for day in days:
hour = 10
exportFile.write("\n" + fullDay[dayIndex] + "\n")
for time in day:
if time == 0:
exportFile.write( str(hour) + ":00\tFree\n")
else:
exportFile.write( str(hour) + ":00\t" + "Track " + str(time.track) + \
", TalkID %03d: " % int(time.idNum) + str(time.title) + "\n")
hour += 1
exportFile.write("")
dayIndex += 1
```
#### File: j-potter/ConditionOne/talk.py
```python
class talk:
def __init__(self, GivenDay, GivenTime, GivenTrack, GivenTitle, GivenSpeakers, GivenDescription, GivenIDNum):
self.day = GivenDay
self.time = GivenTime
self.track = GivenTrack
self.title = GivenTitle
self.speakers = GivenSpeakers
self.description = GivenDescription
self.idNum = GivenIDNum
def ShowInfo(self):
print "Talk " + str(self.idNum) + " On " + self.day + " at " + self.time + " in track " + str(self.track) + ": " + self.title
def ShowDescription(self):
print ""
print "Title: " + self.title + "\n"
print "Speaker(s): " + self.speakers + "\n"
print self.description + "\n"
``` |
{
"source": "jpotwor/multi_lan_ner",
"score": 3
} |
#### File: jpotwor/multi_lan_ner/multi_lan_ner.py
```python
import spacy
def find_entities(input_phrase, language):
models = {
'en': 'en_core_web_sm',
'pl': 'pl_core_news_sm',
'fr': 'fr_core_news_sm',
'de': 'de_core_news_sm',
'it': 'it_core_news_sm',
}
if language in models:
nlp = spacy.load(models[language])
doc = nlp(input_phrase)
res = []
for ent in doc.ents:
res.append({'text': ent.text, 'start_pos': ent.start_char, 'end_pos': ent.end_char, 'type': ent.label_})
return res
else:
raise FileNotFoundError('model %s not found, please download' % language)
if __name__ == "__main__":
print(find_entities("As I had only one hour to write this on my old Dell computer, I am aware there is space for improvement.", 'en'))
``` |
{
"source": "jpouderoux/paraview-superbuild",
"score": 2
} |
#### File: paraview-superbuild/Scripts/pvw-setup.py
```python
import urllib2, sys, os, zipfile, subprocess, json, shutil, argparse, re, hashlib
# ===================================================================
# ParaView files / URLs
# ===================================================================
version = { "release" : { "url": "http://paraview.org/files/v4.1/",
"application": { "osx" : "ParaView-4.1.0-Darwin-64bit-Lion-Python27.dmg",
"linux32": "ParaView-4.1.0-Linux-32bit-glibc-2.3.6.tar.gz",
"linux64": "ParaView-4.1.0-Linux-64bit-glibc-2.3.6.tar.gz",
"win32" : "ParaView-4.1.0-Windows-32bit.zip",
"win64" : "ParaView-4.1.0-Windows-64bit.zip" },
"documentation": "http://paraview.org/files/v4.1/ParaView-API-docs-v4.1.zip"},
"nightly" : { "url": "http://paraview.org/files/nightly/",
"application": { "osx" : "ParaView-Darwin-64bit-Lion-Python27-NIGHTLY.dmg",
"linux32": "ParaView-Linux-32bit-glibc-2.3.6-NIGHTLY.tar.gz",
"linux64": "ParaView-Linux-64bit-glibc-2.3.6-NIGHTLY.tar.gz",
"win32" : "ParaView-Windows-32bit-NIGHTLY.zip",
"win64" : "ParaView-Windows-64bit-NIGHTLY.zip" },
"documentation": "http://www.paraview.org/files/nightly/ParaView-doc.tar.gz" } }
data = "http://paraview.org/files/v4.1/ParaViewData-v4.1.0.zip"
# ===================================================================
# Download helper
# ===================================================================
def download(url, file_name):
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "\nDownloading: %s Bytes: %s" % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
f.close()
# ===================================================================
def unzip(file, destination):
zfile = zipfile.ZipFile(file)
for name in zfile.namelist():
fullPath = os.path.join(destination, name)
if name.endswith('/'):
os.makedirs(fullPath)
else:
if not os.path.exists(os.path.dirname(fullPath)):
os.makedirs(os.path.dirname(fullPath))
fd = open(fullPath,"w")
fd.write(zfile.read(name))
fd.close()
status = r" Unzip "
if len(name) > 70:
status += "[..]" + name[-70:]
else:
status += name
status += " "*(80-len(status))
status = status + chr(8)*(len(status)+1)
print status,
print
# ===================================================================
def simpleDirectoryHash(rootPath):
# Use the tar program to create an archive without writing
# one to disk, pipe it to the md5sum program. The -P option
# asks tar not to remove leading slashes, which results in
# less output to have to parse.
cmd = "tar -cP " + rootPath + " | md5sum"
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
regex = re.compile('^([^\s]+)\s+')
m = regex.search(output)
returnVal = 'ERROR getting md5sum on directory'
if m:
returnVal = m.group(1)
return returnVal
# ===================================================================
def getCommandLineOptions():
p = argparse.ArgumentParser()
p.add_argument("-m",
"--mode",
type=str,
default="interactive",
help="Either 'interactive' or 'arguments', interactive prompts for input")
p.add_argument("-v",
"--installversion",
type=str,
default="release",
help="Either 'release' or 'nightly', no data or docs installed with nightly")
p.add_argument("-p",
"--installpath",
type=str,
default=os.getcwd(),
help="Full path to directory which should contain installation")
p.add_argument("-t",
"--apptype",
type=str,
default="linux64",
help="One of 'osx', 'linux32', 'linux64', 'win32', or 'win64'")
p.add_argument("-n",
"--noconfigure",
default=True,
help="If provided, this option specifies not to configure local instace",
action='store_false')
p.add_argument("-s",
"--storehashpath",
default="",
help="Full path with file name, where you wish to store json file containing binary hash")
return p.parse_args()
# ===================================================================
# Get data locally
# ===================================================================
def mainProgram():
print
args = getCommandLineOptions()
install_path = args.installpath
mode = args.mode
app_type = args.apptype
do_config = args.noconfigure
v = version[args.installversion]
url = v['url']
application = v['application']
documentation = v['documentation']
pvdataname = data[data.rfind('/')+1:]
pvdocname = documentation[documentation.rfind('/')+1:]
if mode == 'interactive':
q = ''
while q != 'y' and q != 'yes':
if q == 'n' or q == 'no':
install_path = raw_input("Enter ParaViewWeb install path: ")
if q == 'q' or q == 'quit':
sys.exit("We did nothing")
q = raw_input("Is ParaViewWeb install path correct? (%s) yes/no/quit: " % install_path)
print "\nInstalling ParaViewWeb inside:", install_path
if not os.path.exists(install_path):
os.makedirs(install_path)
download_path = os.path.join(install_path, "download")
if not os.path.exists(download_path):
os.makedirs(download_path)
# Download data + doc
data_file = os.path.join(download_path, pvdataname)
documentation_file = os.path.join(download_path, pvdocname)
if not os.path.exists(data_file):
download(data, data_file)
if not os.path.exists(documentation_file):
download(documentation, documentation_file)
if mode == 'interactive':
app_type = raw_input("\nWhich system? [osx, linux32, linux64, win32, win64, all]: ")
# Download only for all OS for future setup
if app_type == 'all':
print "\nThis will only download all OS files for future install."
for app_type in application:
app_file = os.path.join(download_path, application[app_type])
if not os.path.exists(app_file):
download(url + application[app_type], app_file)
sys.exit("Downloading done")
else:
# Check files and download them if needed
app_file = os.path.join(download_path, application[app_type])
if not os.path.exists(app_file):
download(url + application[app_type], app_file)
print
# ===================================================================
# Unpack data
# ===================================================================
if app_type == 'osx':
if not os.path.exists(os.path.join(install_path, 'paraview.app')):
print " => Unpack ParaView"
# Mount DMG
retvalue = subprocess.check_output(['hdiutil', 'attach', app_file])
list = retvalue.split()
dir_path = list[-1]
dmg_mount = list[-3]
# Copy application
os.system("cp -r %s/paraview.app %s/paraview.app" % (dir_path, install_path))
# Unmount dmg
subprocess.check_output(["hdiutil", "detach", dmg_mount])
elif not os.path.exists(os.path.join(install_path, 'paraview')):
print " => Unpack ParaView"
if app_type == 'linux32':
os.system("cd %s;tar xvzf %s" % (install_path, app_file))
os.rename(os.path.join(install_path, "ParaView-4.1.0-Linux-32bit"), os.path.join(install_path, "paraview"))
elif app_type == 'linux64':
os.system("cd %s;tar xvzf %s" % (install_path, app_file))
os.rename(os.path.join(install_path, "ParaView-4.2.0-Linux-64bit"), os.path.join(install_path, "paraview"))
else:
# Unzip app
unzip(app_file, install_path)
if app_type == 'win64':
os.rename(os.path.join(install_path, "ParaView-4.2.0-Windows-64bit"), os.path.join(install_path, "paraview"))
if app_type == 'win32':
os.rename(os.path.join(install_path, "ParaView-4.2.0-Windows-32bit"), os.path.join(install_path, "paraview"))
# ===================================================================
# Structure directories
# ===================================================================
# /data
if data != '' and not os.path.exists(os.path.join(install_path, 'data')):
print " => Unpack data"
unzip(data_file, install_path)
src = os.path.join(install_path, pvdataname[:-6], "Data")
dst = os.path.join(install_path, 'data')
os.rename(src, dst)
shutil.rmtree(os.path.join(install_path, pvdataname[:-6]))
# /www
if documentation != '' and not os.path.exists(os.path.join(install_path, 'www')):
print " => Unpack Web"
if documentation_file.endswith(".zip"):
unzip(documentation_file, install_path)
else:
# FIXME: instead of unzipping, we need to uncompress/untar the
# doc file because for some reason it exists as a tar zip. Then
# the rest of this code relies on how unzip works, so we need to
# keep that working.
os.system("cd %s;tar xvzf %s" % (install_path, documentation_file))
matcher = re.compile('(.+)\.(tar\.gz|tgz)')
m = matcher.search(pvdocname)
newdirname = pvdocname
if m:
newdirname = m.group(1)
pvdocname = newdirname + 'xxxx'
os.system("cd %s; mv www %s" % (install_path, newdirname))
src = os.path.join(install_path, pvdocname[:-4], 'js-doc')
dst = os.path.join(install_path, 'www')
os.rename(src, dst)
src = os.path.join(install_path, pvdocname[:-4], 'lib')
dst = os.path.join(install_path, 'www/lib')
os.rename(src, dst)
src = os.path.join(install_path, pvdocname[:-4], 'ext')
dst = os.path.join(install_path, 'www/ext')
os.rename(src, dst)
src = os.path.join(install_path, pvdocname[:-4], 'apps')
dst = os.path.join(install_path, 'www/apps')
os.rename(src, dst)
print " => Clean web directory"
shutil.rmtree(os.path.join(install_path, pvdocname[:-4]))
if do_config == True:
# /bin
if not os.path.exists(os.path.join(install_path, 'bin')):
os.makedirs(os.path.join(install_path, 'bin'))
# /conf
if not os.path.exists(os.path.join(install_path, 'conf')):
os.makedirs(os.path.join(install_path, 'conf'))
# /logs
if not os.path.exists(os.path.join(install_path, 'logs')):
os.makedirs(os.path.join(install_path, 'logs'))
# ===================================================================
# Configure
# ===================================================================
if do_config == True:
print " => Configure local instance"
python_exec = ''
base_python_path = ''
if app_type == 'osx':
python_exec = os.path.join(install_path, 'paraview.app/Contents/bin/pvpython')
base_python_path = os.path.join(install_path, 'paraview.app/Contents/Python/')
elif app_type == 'linux32' or app_type == 'linux64':
python_exec = os.path.join(install_path, 'paraview/bin/pvpython')
base_python_path = os.path.join(install_path, 'paraview/lib/paraview-4.1/site-packages/')
elif app_type == 'win32' or app_type == 'win64':
python_exec = os.path.join(install_path, 'paraview/bin/pvpython.exe')
base_python_path = os.path.join(install_path, 'paraview/lib/paraview-4.1/site-packages/')
default_launcher_config = {
"sessionData" : {
"updir": "/Home"
},
"resources" : [ { "host" : "localhost", "port_range" : [9001, 9003] } ],
"properties" : {
"python_exec": python_exec,
"python_path": base_python_path,
"data": os.path.join(install_path, 'data'),
},
"configuration": {
"host": "localhost",
"port": 8080,
"endpoint": "paraview",
"content": os.path.join(install_path, 'www'),
"proxy_file": os.path.join(install_path, 'conf/proxy.conf'),
"sessionURL" : "ws://${host}:${port}/ws",
"timeout" : 15,
"log_dir" : os.path.join(install_path, 'logs'),
"upload_dir" : os.path.join(install_path, 'data'),
"fields" : ["file", "host", "port", "updir"]
},
"apps": {
"pipeline": {
"cmd": ["${python_exec}", "${python_path}/paraview/web/pv_web_visualizer.py", "--port", "${port}", "--data-dir", "${data}", "-f" ],
"ready_line" : "Starting factory"
},
"visualizer": {
"cmd": ["${python_exec}", "${python_path}/paraview/web/pv_web_visualizer.py", "--port", "${port}", "--data-dir", "${data}", "-f", "--any-readers" ],
"ready_line" : "Starting factory"
},
"loader": {
"cmd": ["${python_exec}", "${python_path}/paraview/web/pv_web_file_loader.py", "--port", "${port}", "--data-dir", "${data}", "-f" ],
"ready_line" : "Starting factory"
},
"data_prober": {
"cmd": ["${python_exec}", "${python_path}/paraview/web/pv_web_data_prober.py", "--port", "${port}", "--data-dir", "${data}", "-f" ],
"ready_line" : "Starting factory"
}
}
}
with open(os.path.join(install_path, 'conf/launch.json'), "w") as config_file:
config_file.write(json.dumps(default_launcher_config))
web_exec = ''
if app_type.startswith('win'):
web_exec = os.path.join(install_path, 'bin/start.bat')
with open(web_exec, "w") as run_file:
run_file.write("%s %s %s" % (python_exec, os.path.join(base_python_path, 'vtk/web/launcher.py'), os.path.join(install_path, 'conf/launch.json')))
else:
web_exec = os.path.join(install_path, 'bin/start.sh')
with open(web_exec, "w") as run_file:
run_file.write("%s %s %s" % (python_exec, os.path.join(base_python_path, 'vtk/web/launcher.py'), os.path.join(install_path, 'conf/launch.json')))
os.chmod(web_exec, 0750)
# ===================================================================
# Enable ParaViewWeb application inside index.html
# ===================================================================
index_html = os.path.join(install_path,"www/index.html")
index_origin = os.path.join(install_path,"www/index.origin")
os.rename(index_html, index_origin)
with open(index_origin, "r") as fr:
with open(index_html, "w") as fw:
for line in fr:
if not "DEMO-APPS" in line:
fw.write(line)
# ===================================================================
# Print help
# ===================================================================
print
print "To start ParaViewWeb web server just run:"
print " "*5, web_exec
print
print "And go in your Web browser (Safari, Chrome, Firefox) to:"
print " "*5, "http://localhost:8080/"
print
print
if args.storehashpath != '' :
# Hash the downloaded file
hashedValue = simpleDirectoryHash(os.path.join(download_path, application[app_type]))
print os.path.join(download_path, application[app_type]) + ': ' + hashedValue
hashObj = { os.path.join(download_path, application[app_type]) : hashedValue }
with open(args.storehashpath, 'w') as hashfile :
hashfile.write(json.dumps(hashObj))
###
### Main program entry point. Any exceptions in the script will get caught
### and result in a non-zero exit status, which we can catch from the bash
### script which might be running this program.
###
if __name__ == "__main__":
try :
mainProgram()
except Exception as inst:
print 'Caught exception'
print inst
sys.exit(1)
sys.exit(0)
``` |
{
"source": "jpoullet2000/emelem",
"score": 2
} |
#### File: emelem/tests/test_emelem_api.py
```python
import unittest
import logging
from flask import redirect, request, session
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')
logging.getLogger().setLevel(logging.DEBUG)
DEFAULT_ADMIN_USER = 'jbp'
DEFAULT_ADMIN_PASSWORD = '<PASSWORD>'
# DEFAULT_ADMIN_USER = 'admin'
# DEFAULT_ADMIN_PASSWORD = '<PASSWORD>'
log = logging.getLogger(__name__)
class EmelemAPITest(unittest.TestCase):
def setUp(self):
from flask import Flask
from flask_appbuilder import AppBuilder
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.views import ModelView
self.app = Flask(__name__)
self.basedir = os.path.abspath(os.path.dirname(__file__))
self.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'
self.app.config['CSRF_ENABLED'] = False
self.app.config['SECRET_KEY'] = 'thisismyscretkey'
self.app.config['WTF_CSRF_ENABLED'] = False
self.db = SQLA(self.app)
self.appbuilder = AppBuilder(self.app, self.db.session)
sess = PSSession()
class PSView(ModelView):
datamodel = GenericInterface(PSModel, sess)
base_permissions = ['can_list', 'can_show']
list_columns = ['UID', 'C', 'CMD', 'TIME']
search_columns = ['UID', 'C', 'CMD']
def login(self, client, username, password):
# Login with default admin
return client.post('/login/',
data=dict(
username=username,
password=password
), follow_redirects=True)
def logout(self, client):
return client.get('/logout/')
def test_back(self):
"""
Test Back functionality
"""
with self.app.test_client() as c:
self.login(c, DEFAULT_ADMIN_USER, DEFAULT_ADMIN_PASSWORD)
rv = c.get('/mlmmodelview/list/?_flt_0_field_string=f')
rv = c.get('/mlmmodelview/list/')
rv = c.get('/back', follow_redirects=True)
assert request.args['_flt_0_field_string'] == u'f'
assert '/mlmmodelview/list/' == request.path
``` |
{
"source": "jpoulletXaccount/tf-gnn-samples",
"score": 2
} |
#### File: tf-gnn-samples/tasks/number_vehicle_task.py
```python
import tensorflow as tf
import numpy as np
from dpu_utils.utils import RichPath, LocalPath
from collections import namedtuple
from typing import Any, Dict, Tuple, List, Iterable,Iterator
from .sparse_graph_task import Sparse_Graph_Task,DataFold,MinibatchData
from utils.number_vehicles import created_dataset_utils
StopsData = namedtuple('StopsData', ['adj_lists','type_to_node_to_num_incoming_edges', 'num_stops', 'node_features', 'label'])
class Nb_Vehicles_Task(Sparse_Graph_Task):
"""
Instancie une task de classification en nombre de vehicles
"""
def __init__(self, params: Dict[str, Any]):
super().__init__(params)
# Things that will be filled once we load data:
self.__num_edge_types = 10
self.__initial_node_feature_size = 0
self.__num_output_classes = 5
# specific map from taks to helpers
self._mapping = {'created_dataset': created_dataset_utils.CreatedDatasetUtils}
@classmethod
def default_params(cls):
"""
Applied to the class object, return the a list of specific param
:return:
"""
params = super().default_params()
params.update({
'add_self_loop_edges': True,
'use_graph': True,
'activation_function': "tanh",
'out_layer_dropout_keep_prob': 1.0,
})
return params
@staticmethod
def name() -> str:
return "Nb_Vehicles"
@staticmethod
def default_data_path() -> str:
return "data/number_vehicles"
def get_metadata(self) -> Dict[str, Any]:
"""
:return: a dict with all the params related to the task
"""
metadata = super().get_metadata()
metadata['initial_node_feature_size'] = self.__initial_node_feature_size
metadata['num_output_classes'] = self.__num_output_classes
metadata['num_edge_types'] = self.__num_edge_types
return metadata
def restore_from_metadata(self, metadata: Dict[str, Any]) -> None:
"""
From a dict of parameters, restore it
:param metadata:
"""
super().restore_from_metadata(metadata)
self.__initial_node_feature_size = metadata['initial_node_feature_size']
self.__num_output_classes = metadata['num_output_classes']
self.__num_edge_types = metadata['num_edge_types']
@property
def num_edge_types(self) -> int:
return self.__num_edge_types
@property
def initial_node_feature_size(self) -> int:
return self.__initial_node_feature_size
# -------------------- Data Loading --------------------
def load_data(self, path: RichPath) -> None:
"""
Main function to load training and validation data
:param path: the path to load the data
"""
train_data, valid_data, test_data = self.__load_data(path)
self._loaded_data[DataFold.TRAIN] = train_data
self._loaded_data[DataFold.VALIDATION] = valid_data
self._loaded_data[DataFold.TEST] = test_data
def __load_data(self, data_directory: RichPath):
assert isinstance(data_directory, LocalPath), "NumberVehiclesTask can only handle local data"
data_path = data_directory.path
print(" Loading NumberVehicles data from %s." % (data_path,))
helper_loader = self._mapping[self.params['data_kind']](data_path,self.num_edge_types)
all_dist_matrix,all_type_num, all_features, all_labels = helper_loader.load_data()
self.__initial_node_feature_size = helper_loader.number_features
self.__num_output_classes = helper_loader.number_labels
train_data = self._process_raw_data(all_dist_matrix[DataFold.TRAIN],all_type_num[DataFold.TRAIN],all_features[DataFold.TRAIN],all_labels[DataFold.TRAIN])
valid_data = self._process_raw_data(all_dist_matrix[DataFold.VALIDATION],all_type_num[DataFold.VALIDATION],all_features[DataFold.VALIDATION],all_labels[DataFold.VALIDATION])
test_data = self._process_raw_data(all_dist_matrix[DataFold.TEST],all_type_num[DataFold.TEST],all_features[DataFold.TEST],all_labels[DataFold.TEST])
return train_data, valid_data, test_data
def _process_raw_data(self,dist_matrix,type_num, features, labels):
"""
Process the data to put it into right format
:return: data under the form of lists of StopData
"""
processed_data = []
for i in range(0,len(features)):
processed_data.append(StopsData(adj_lists=dist_matrix[i],
type_to_node_to_num_incoming_edges=type_num[i],
num_stops=len(features[i]),
node_features=features[i],
label=labels[i]))
return processed_data
def make_task_output_model(self,
placeholders: Dict[str, tf.Tensor],
model_ops: Dict[str, tf.Tensor],
) -> None:
"""
Create task-specific output model. For this, additional placeholders
can be created, but will need to be filled in the
make_minibatch_iterator implementation.
This method may assume existence of the placeholders and ops created in
make_task_input_model and of the following:
model_ops['final_node_representations']: a float32 tensor of shape
[V, D], which holds the final node representations after the
GNN layers.
placeholders['num_graphs']: a int32 scalar holding the number of
graphs in this batch.
Order of nodes is preserved across all tensors.
This method has to define model_ops['task_metrics'] to a dictionary,
from which model_ops['task_metrics']['loss'] will be used for
optimization. Other entries may hold additional metrics (accuracy,
MAE, ...).
Arguments:
placeholders: Dictionary of placeholders used by the model,
pre-populated by the generic graph model values, and to
be extended with task-specific placeholders.
model_ops: Dictionary of named operations in the model,
pre-populated by the generic graph model values, and to
be extended with task-specific operations.
"""
placeholders['labels'] = tf.placeholder(tf.int32,shape=[None], name='labels')
placeholders['graph_nodes_list'] = \
tf.placeholder(dtype=tf.int32, shape=[None], name='graph_nodes_list')
placeholders['out_layer_dropout_keep_prob'] =\
tf.placeholder_with_default(input=tf.constant(1.0, dtype=tf.float32),
shape=[],
name='out_layer_dropout_keep_prob')
final_node_representations = \
tf.nn.dropout(model_ops['final_node_representations'],
rate=1.0 - placeholders['out_layer_dropout_keep_prob'])
output_label_logits = \
tf.keras.layers.Dense(units=self.__num_output_classes,
use_bias=False,
activation=None,
name="OutputDenseLayer",
)(final_node_representations) # Shape [nb_node, Classes]
# Sum up all nodes per-graph
per_graph_outputs = tf.unsorted_segment_sum(data=output_label_logits,
segment_ids=placeholders['graph_nodes_list'],
num_segments=placeholders['num_graphs'])
correct_preds = tf.equal(tf.argmax(per_graph_outputs, axis=1, output_type=tf.int32),
placeholders['labels'])
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=per_graph_outputs,
labels=placeholders['labels'])
total_loss = tf.reduce_sum(losses)
number_correct_preds = tf.reduce_sum(tf.cast(correct_preds,tf.float32))
number_of_predictions = tf.cast(placeholders['num_graphs'],tf.float32)
accuracy = number_correct_preds / number_of_predictions
tf.summary.scalar('accuracy', accuracy)
model_ops['task_metrics'] = {
'loss': total_loss / number_of_predictions,
'total_loss': total_loss,
'accuracy': accuracy,
}
def make_minibatch_iterator(self,
data: Iterable[Any],
data_fold: DataFold,
model_placeholders: Dict[str, tf.Tensor],
max_nodes_per_batch: int,
) -> Iterator[MinibatchData]:
"""
Create minibatches for a sparse graph model, usually by flattening
many smaller graphs into one large graphs of disconnected components.
This should produce one epoch's worth of minibatches.
Arguments:
data: Data to iterate over, created by either load_data or
load_eval_data_from_path.
data_fold: Fold of the loaded data to iterate over.
model_placeholders: The placeholders of the model that need to be
filled with data. Aside from the placeholders introduced by the
task in make_task_input_model and make_task_output_model.
max_nodes_per_batch: Maximal number of nodes that can be packed
into one batch.
Returns:
Iterator over MinibatchData values, which provide feed dicts
as well as some batch statistics.
"""
if data_fold == DataFold.TRAIN:
np.random.shuffle(data)
out_layer_dropout_keep_prob = self.params['out_layer_dropout_keep_prob']
else:
out_layer_dropout_keep_prob = 1.0
# Pack until we cannot fit more graphs in the batch
num_graphs = 0
while num_graphs < len(data):
num_graphs_in_batch = 0
batch_node_features = []
batch_node_labels = []
batch_adjacency_lists = [[] for _ in range(self.num_edge_types)]
batch_type_to_num_incoming_edges = []
batch_graph_nodes_list = []
node_offset = 0
while num_graphs < len(data) and node_offset + len(data[num_graphs].node_features) < max_nodes_per_batch:
cur_graph = data[num_graphs]
num_nodes_in_graph = len(cur_graph.node_features)
batch_node_features.extend(cur_graph.node_features)
batch_graph_nodes_list.append(np.full(shape=[num_nodes_in_graph],
fill_value=num_graphs_in_batch,
dtype=np.int32))
for i in range(self.num_edge_types):
batch_adjacency_lists[i].append(cur_graph.adj_lists[i] + node_offset)
batch_type_to_num_incoming_edges.append(np.array(cur_graph.type_to_node_to_num_incoming_edges))
batch_node_labels.append(cur_graph.label)
num_graphs += 1
num_graphs_in_batch += 1
node_offset += num_nodes_in_graph
batch_feed_dict = {
model_placeholders['initial_node_features']: np.array(batch_node_features),
model_placeholders['type_to_num_incoming_edges']: np.concatenate(batch_type_to_num_incoming_edges, axis=1),
model_placeholders['graph_nodes_list']: np.concatenate(batch_graph_nodes_list),
model_placeholders['labels']: np.array(batch_node_labels),
model_placeholders['out_layer_dropout_keep_prob']: out_layer_dropout_keep_prob,
}
# Merge adjacency lists:
num_edges = 0
for i in range(self.num_edge_types):
if len(batch_adjacency_lists[i]) > 0:
adj_list = np.concatenate(batch_adjacency_lists[i])
else:
adj_list = np.zeros((0, 2), dtype=np.int32)
num_edges += adj_list.shape[0]
batch_feed_dict[model_placeholders['adjacency_lists'][i]] = adj_list
yield MinibatchData(feed_dict=batch_feed_dict,
num_graphs=num_graphs_in_batch,
num_nodes=node_offset,
num_edges=num_edges)
# def make_minibatch_iterator(self,
# data: Iterable[Any],
# data_fold: DataFold,
# model_placeholders: Dict[str, tf.Tensor],
# max_nodes_per_batch: int,
# ) -> Iterator[MinibatchData]:
# """
# Create minibatches for a sparse graph model, usually by flattening
# many smaller graphs into one large graphs of disconnected components.
# This should produce one epoch's worth of minibatches.
#
# Arguments:
# data: Data to iterate over, created by either load_data or
# load_eval_data_from_path.
# data_fold: Fold of the loaded data to iterate over.
# model_placeholders: The placeholders of the model that need to be
# filled with data. Aside from the placeholders introduced by the
# task in make_task_input_model and make_task_output_model.
# max_nodes_per_batch: Maximal number of nodes that can be packed
# into one batch.
#
# Returns:
# Iterator over MinibatchData values, which provide feed dicts
# as well as some batch statistics.
# """
# if data_fold == DataFold.TRAIN:
# np.random.shuffle(data)
# out_layer_dropout_keep_prob = self.params['out_layer_dropout_keep_prob']
# else:
# out_layer_dropout_keep_prob = 1.0
#
# # Pack until we cannot fit more graphs in the batch
# for cur_graph in data:
# batch_feed_dict = {
# model_placeholders['initial_node_features']: np.array(cur_graph.node_features),
# model_placeholders['type_to_num_incoming_edges']: np.array(cur_graph.type_to_node_to_num_incoming_edges),
# # model_placeholders['graph_nodes_list']: np.concatenate(batch_graph_nodes_list),
# model_placeholders['labels']: np.expand_dims(cur_graph.label,axis=0),
# model_placeholders['out_layer_dropout_keep_prob']: out_layer_dropout_keep_prob,
# }
#
# # Merge adjacency lists:
# num_edges = 0
# for i in range(self.num_edge_types):
# if len(cur_graph.adj_lists[i]) > 0:
# adj_list = cur_graph.adj_lists[i]
# else:
# adj_list = np.zeros((0, 2), dtype=np.int32)
#
# batch_feed_dict[model_placeholders['adjacency_lists'][i]] = adj_list
# num_edges += adj_list.shape[0]
#
# yield MinibatchData(feed_dict=batch_feed_dict,
# num_graphs=1,
# num_nodes=len(cur_graph.node_features),
# num_edges=num_edges)
def early_stopping_metric(self,
task_metric_results: List[Dict[str, np.ndarray]],
num_graphs: int,
) -> float:
"""
Given the results of the task's metric for all minibatches of an
epoch, produce a metric that should go down (e.g., loss). This is used
for early stopping of training.
Arguments:
task_metric_results: List of the values of model_ops['task_metrics']
(defined in make_task_model) for each of the minibatches produced
by make_minibatch_iterator.
num_graphs: Number of graphs processed in this epoch.
Returns:
Numeric value, where a lower value indicates more desirable results.
"""
# Early stopping based on average loss:
return np.sum([m['total_loss'] for m in task_metric_results]) / num_graphs
def pretty_print_epoch_task_metrics(self,
task_metric_results: List[Dict[str, np.ndarray]],
num_graphs: int,
) -> str:
"""
Given the results of the task's metric for all minibatches of an
epoch, produce a human-readable result for the epoch (e.g., average
accuracy).
Arguments:
task_metric_results: List of the values of model_ops['task_metrics']
(defined in make_task_model) for each of the minibatches produced
by make_minibatch_iterator.
num_graphs: Number of graphs processed in this epoch.
Returns:
String representation of the task-specific metrics for this epoch,
e.g., mean absolute error for a regression task.
"""
print("length of the metric ", len(task_metric_results))
return "Acc: %.2f%%" % (np.mean([task_metric_results[i]['accuracy'] for i in range(0,len(task_metric_results))]) * 100,)
```
#### File: stops/manager/stops_manager_cvrptw_ups.py
```python
from utils.number_vehicles.stops.manager import stops_manager_cvrptw
from utils.number_vehicles.stops.objects import stops_cvrptw_ups
from utils.number_vehicles.stops import classDepot
import pandas as pd
import math
class StopsManagerCVRPTW_UPS(stops_manager_cvrptw.StopsManagerCVRPTW):
"""
Class of manager for stops_cvrptw
"""
def __init__(self,depot = None):
super(StopsManagerCVRPTW_UPS,self).__init__(depot)
@classmethod
def from_ups_file(cls,filename,date):
"""
Create a stop manager filled
:param filename: the file from which we should read the stops
:param date: the data considered
:return: an object of this class
"""
manager = cls()
manager._create_depot(line=None)
df = pd.read_csv(filename,converters={'Date':int})
df = df[df['Date'] == date]
for i,row in df.iterrows():
manager._create_stop(row)
manager._check_time_windows()
manager._check_feasibility_demand()
return manager
def _create_stop(self,row):
"""
From the line of the file, create a stop with the corresponding
:param row: a row of the df
:return:
"""
guid = row['ID']
self[guid] = stops_cvrptw_ups.Stop_ups(guid, row['Latitude'], row['Longitude'], row['Npackage'], row['Start Commit Time'],row['End Commit Time'], row['Stop type'],row['Date'])
def _create_depot(self,line):
"""
From the line of the file create the corresponding depot
:return:
"""
self.depot = classDepot.Depot(42.3775, -71.0796, 1200) # we set up a shift of 12h
def _check_time_windows(self):
"""
Check that all stops are feasible: if not then remove them from the manager
:return:
"""
depot_due_date = self.depot.due_date
list_remove = []
for stop in self.values():
dist = stop.get_distance_to_another_stop(self.depot)
dist = 1.2* 1.609 * dist
speed = 4.115 + 13.067 * (1 - math.exp(-4.8257 * dist))
# Convert to km per click
speed = speed / 100
time_needed = dist / speed
# Check if we have time
if time_needed >= stop.endTW:
list_remove.append(stop.guid)
if time_needed + stop.beginTW + stop.service_time >= depot_due_date:
list_remove.append(stop.guid)
for stop_guid in list_remove:
del self[stop_guid]
print("Number of infeasible stops remove ", len(list_remove))
def _check_feasibility_demand(self):
"""
Check that the demand of the stops is lower than the total capacity of the truck,
so far set up to 350. Remove them if infeasible
:return:
"""
max_cap = 350
list_remove = []
for stop in self.values():
if stop.demand >= max_cap:
list_remove.append(stop.guid)
for stop_guid in list_remove:
del self[stop_guid]
print("Number of infeasible stops remove ", len(list_remove))
``` |
{
"source": "jpovedano/rticonnextdds-connector-py",
"score": 2
} |
#### File: test/python/test_rticonnextdds_threading.py
```python
import pytest,time,sys,os,ctypes,json
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../../")
import rticonnextdds_connector as rti
from test_utils import send_data, wait_for_data, open_test_connector
import threading
# In order to be able to catch failures in threads, we need to wrap the
# threading.Thread class such that it re-raises exceptions
class ConnectorCreationThread(threading.Thread):
def __init__(self, target):
threading.Thread.__init__(self, target=target)
self.error = None
def run(self):
try:
threading.Thread.run(self)
except BaseException as e:
self.error = e
def join(self, timeout=None):
super(ConnectorCreationThread, self).join(timeout)
if self.error is not None:
raise self.error
class TestThreading:
"""
Connector is not thread-safe, meaning that it is not supported to make concurrent
calls to the native library. However, protecting these calls with a 3rd-party
threading library (such as Python's 'Threading') is supported. Here we test
that this works as intended.
"""
# In this test we create two Connector objects in separate threads. From one
# of the connectors we create an input, from the other an output and check
# that communication can occur.
# In order to ensure we are testing for CON-163 bug, the XML file does not
# contain a <participant_qos> tag.
def test_creation_of_multiple_connectors(self):
sem = threading.Semaphore()
xml_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../xml/TestConnector3.xml")
def output_thread():
with sem:
with rti.open_connector(
config_name="MyParticipantLibrary::MyParticipant",
url=xml_path) as connector:
assert connector is not None
the_output = connector.getOutput("MyPublisher::MyWriter")
assert the_output is not None
def input_thread():
with sem:
with rti.open_connector(
config_name="MyParticipantLibrary::MyParticipant",
url=xml_path) as connector:
assert connector is not None
the_input = connector.getInput("MySubscriber::MyReader")
assert the_input is not None
input_thread = ConnectorCreationThread(input_thread)
output_thread = ConnectorCreationThread(output_thread)
input_thread.start()
output_thread.start()
input_thread.join(5000)
output_thread.join(5000)
``` |
{
"source": "jpowersdevtech/pressgloss",
"score": 2
} |
#### File: pressgloss/pressgloss/__init__.py
```python
import flask
__version__ = "0.0.1"
def create_app(): # type: () -> flask.Flask
"""
Application context initialization. This method MUST
have this name.
:return: the contextualized flask app
:rtype: flask.Flask
"""
app = flask.Flask(__name__, instance_relative_config=False)
with app.app_context():
from . import daideapp
app.register_blueprint(daideapp.landingpage)
app.register_blueprint(daideapp.theapi)
return app
``` |
{
"source": "jpoweseas/games",
"score": 2
} |
#### File: jpoweseas/games/reference_ai.py
```python
import csv
WIN_VALUE = 10000
LOSE_VALUE = -10000
TIE_VALUE = 0
def resolve_random(state_with_probability_list, debug_mode=False, depth_limit=0, use_pruning=True):
return sum([prob * negamax(state, depth_limit=(depth_limit - 1)) for state in state_with_probability_list], use_pruning=use_pruning)
def evaluate_player_node(current_state, state_choices, invert, debug_mode=False, depth_limit=0):
invert_mult = -1 if invert else 1
if debug_mode:
children_in_eval_order = []
max_value_so_far = invert_mult * negamax(state_choices[0], debug_mode, depth_limit=(depth_limit - 1), use_pruning=True)
best_state = state_choices[0]
if debug_mode:
children_in_eval_order = []
for state in state_choices[1:]:
if debug_mode:
children_in_eval_order.append(str(state.hash()))
value = invert_mult * negamax(state, debug_mode=debug_mode, depth_limit=(depth_limit - 1), use_pruning=True)
if value > max_value_so_far:
max_value_so_far = value
best_state = state
if debug_mode:
debug_mode.writerow([
current_state.hash(),
current_state.to_reversible_format(),
invert_mult * max_value_so_far,
None, #alpha
None, #beta
'|'.join(children_in_eval_order),
None #is_cutoff
])
return { 'value' : invert_mult * max_value_so_far, 'state' : best_state }
# Returns A's value of the subgame
def negamax(state, debug_mode=False, depth_limit=0, use_pruning=True):
if depth_limit < 0:
assert False
elif depth_limit == 0:
return state.evaluate()
node_type, node = state.next_node()
if node_type == "Random":
return resolve_random(state, debug_mode, depth_limit=depth_limit, use_pruning=use_pruning)
elif node_type == "Terminal":
if node == 'A':
ret = WIN_VALUE
elif node == 'B':
ret = LOSE_VALUE
elif node == 'tie':
ret = TIE_VALUE
else:
assert False
if debug_mode:
debug_mode.writerow([
state.hash(),
state.to_reversible_format(),
ret,
# alpha,
# beta,
# '|'.join(children_in_eval_order),
# is_cutoff
None, None, None, None
])
return ret
elif node_type == 'A' or node_type == 'B':
state_choices = [state.resolve_choice(choice) for choice in node.values()]
result = evaluate_player_node(state, state_choices, invert=(node_type == 'B'), debug_mode=debug_mode, depth_limit=depth_limit)
return result['value']
else:
assert False
class SimpleAIPlayer:
def __init__(self, playing_as, evaluate=None):
self.playing_as = playing_as
self.evaluate = evaluate
def choose_move(self, choices, current_state, debug_mode=False, depth_limit=6):
if debug_mode:
csvfile = open('out.csv', 'w', newline='')
csv_writer = csv.writer(csvfile)
else:
csv_writer = None
state_choices = [current_state.resolve_choice(choice) for choice in choices.values()]
if self.evaluate:
for state in state_choices:
state.evaluate = self.evaluate
choice = evaluate_player_node(current_state, state_choices, invert=(self.playing_as == 'B'), debug_mode=csv_writer, depth_limit=depth_limit)['state']
return choice
```
#### File: jpoweseas/games/tictactoe.py
```python
import sys, random, unittest
from ai import AIPlayer
from reference_ai import SimpleAIPlayer
LINES = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 4, 8], [2, 4, 6], [0, 3, 6], [1, 4, 7], [2, 5, 8]]
INVERSE_LINES = [[i for (i, line) in enumerate(LINES) if x in line] for x in range(9)]
SYMMETRIES = [
[2, 1, 0, 5, 4, 3, 8, 7, 6],
[6, 7, 8, 3, 4, 5, 0, 1, 2],
[0, 3, 6, 1, 4, 7, 2, 5, 8],
[8, 5, 2, 7, 4, 1, 6, 3, 0],
[6, 3, 0, 7, 4, 1, 8, 5, 2],
[8, 7, 6, 5, 4, 3, 2, 1, 0],
[2, 5, 8, 1, 4, 7, 0, 3, 6]
]
class TicTacToe:
WIN_VALUE = 10000
LOSE_VALUE = -10000
TIE_VALUE = 0
LINES = LINES
INVERSE_LINES = INVERSE_LINES
SYMMETRIES = SYMMETRIES
def __init__(self, board=None, a_turn = True, memory = None):
board = board if board else [None for _ in range(9)]
self.board = board
self.a_turn = a_turn
if not memory:
num_as = [0 for _ in LINES]
num_bs = [0 for _ in LINES]
self.memory = { 'num_as' : num_as, 'num_bs' : num_bs, 'winner' : None, 'hash' : 0, 'sym_hashes' : [0 for _ in range(7)] }
else:
self.memory = memory
def get_player_to_play(self):
return 'A' if self.a_turn else 'B'
def old_winner(self):
for line in TicTacToe.LINES:
board_line = [self.board[i] for i in line]
if board_line[0] is not None and board_line[0] == board_line[1] and board_line[1] == board_line[2]:
return board_line[0]
if None not in self.board:
return 'tie'
def winner(self):
winner = self.memory['winner']
if winner:
return winner
elif None not in self.board:
return 'tie'
else:
return None
# assumes space is valid
def add_new_mark_and_flip_turn(self, space):
board = self.board.copy()
player_to_play = self.get_player_to_play()
board[space] = self.get_player_to_play()
memory = self.memory.copy()
if player_to_play == 'A':
memory['num_as'] = memory['num_as'].copy()
memory['sym_hashes'] = memory['sym_hashes'].copy()
memory['hash'] = (3 ** space) + memory['hash']
for sym_idx, sym in enumerate(TicTacToe.SYMMETRIES):
memory['sym_hashes'][sym_idx] = (3 ** sym[space]) + memory['sym_hashes'][sym_idx]
for line_idx in TicTacToe.INVERSE_LINES[space]:
memory['num_as'][line_idx] += 1
if memory['num_as'][line_idx] == 3:
memory['winner'] = 'A'
elif player_to_play == 'B':
memory['num_bs'] = memory['num_bs'].copy()
memory['hash'] = 2 * (3 ** space) + memory['hash']
for sym_idx, sym in enumerate(TicTacToe.SYMMETRIES):
memory['sym_hashes'][sym_idx] = 2 * (3 ** sym[space]) + memory['sym_hashes'][sym_idx]
for line_idx in TicTacToe.INVERSE_LINES[space]:
memory['num_bs'][line_idx] += 1
if memory['num_bs'][line_idx] == 3:
memory['winner'] = 'B'
return TicTacToe(board, a_turn = not self.a_turn, memory = memory)
def next_node(self):
open_spaces = [i for i in range(9) if self.board[i] is None]
winner = self.winner()
if winner is not None:
return 'Terminal', winner
elif self.a_turn:
return 'A', {str(i) : i for i in open_spaces}
else:
return 'B', {str(i) : i for i in open_spaces}
def resolve_choice(self, choice):
space = choice
return self.add_new_mark_and_flip_turn(space)
def other_player(player):
if player == 'A':
return 'B'
elif player == 'B':
return 'A'
else:
assert False
def open_lines(self, player):
open_lines = 0
other_player = TicTacToe.other_player(player)
for line in TicTacToe.LINES:
board_line = [self.board[i] for i in line]
if other_player not in board_line:
open_lines += 1
return open_lines
def winner_score(self):
winner = self.winner()
if not winner:
return None
elif winner == 'A':
return TicTacToe.WIN_VALUE
elif winner == 'B':
return TicTacToe.LOSE_VALUE
elif winner == 'tie':
return TicTacToe.TIE_VALUE
else:
assert False
# assumes no winner
# TODO: we now assume that this does do winners properly
def old_evaluate(self):
winner_score = self.winner_score()
if winner_score:
return winner_score
return self.open_lines('A') - self.open_lines('B')
# assumes no winner
# TODO: Test it's the same
def evaluate(self):
winner_score = self.winner_score()
if winner_score:
return winner_score
return self.memory['num_bs'].count(0) - self.memory['num_as'].count(0)
# TODO: make this faster, make this version to_reversible_format
def old_hash(self):
return sum([(0 if self.board[i] is None else 1 if self.board[i] == 'A' else 2) * (3 ** i) for i in range(9)])
def hash(self):
return self.memory['hash']
def old_symmetric_hashes(self):
out = [self.hash()]
for sym in TicTacToe.SYMMETRIES:
out.append(sum([(0 if self.board[sym[i]] is None else 1 if self.board[sym[i]] == 'A' else 2) * (3 ** i) for i in range(9)]))
return out
def symmetric_hashes(self):
return self.memory['sym_hashes']
def unique_hash(self):
hashes = self.symmetric_hashes()
return min(hashes)
def to_reversible_format(self):
return str(self.hash())
def from_reversible_format(fmt):
fmt = int(fmt)
board = []
num_pieces = 0
for i in range(9):
value = fmt % 3
fmt = fmt // 3
if value == 0:
board.append(None)
elif value == 1:
board.append('A')
num_pieces += 1
elif value == 2:
board.append('B')
num_pieces += 1
assert (fmt == 0)
return TicTacToe(board=board, a_turn=(num_pieces % 2 == 0))
def __repr__(self):
def print_cell(x):
if x is None:
return ' '
elif x == 'A':
return 'X'
elif x == 'B':
return 'O'
else:
assert False
return '\n-----\n'.join([('|'.join([print_cell(x) for x in self.board[(3 * i):(3 * i + 3)]])) for i in range(3)])
def print_for_human_input(self):
def print_cell(x, i):
if x is None:
return f'{i}'
elif x == 'A':
return 'X'
elif x == 'B':
return 'O'
else:
assert False
print('\n-----\n'.join([('|'.join([print_cell(x, 3*i + j) for j, x in enumerate(self.board[(3 * i):(3 * i + 3)])])) for i in range(3)]))
# Unused
class RandomAIPlayer:
def __init__(self, playing_as):
pass
def choose_move(self, choices, current_state, debug_mode=False, depth_limit=6):
return random.choice(choices)
class Tests(unittest.TestCase):
def setUp(self):
pass
def test_reversible_format(self):
init_state = TicTacToe()
# TODO: rewrite to use for_each_state
states_to_check = [init_state]
while len(states_to_check) > 0:
state = states_to_check.pop()
node_type, node = state.next_node()
if node_type == 'Terminal':
continue
self.assertEqual(state.board, TicTacToe.from_reversible_format(state.to_reversible_format()).board)
states_to_check.extend([state.resolve_choice(choice) for choice in node.values()])
def test_hash(self):
init_state = TicTacToe()
# TODO: rewrite to use for_each_state
states_to_check = [init_state]
while len(states_to_check) > 0:
state = states_to_check.pop()
node_type, node = state.next_node()
if node_type == 'Terminal':
continue
self.assertEqual(state.old_hash(), state.hash())
states_to_check.extend([state.resolve_choice(choice) for choice in node.values()])
if __name__ == '__main__':
unittest.main()
if len(sys.argv) > 1 and sys.argv[1] == 'test':
pass
elif len(sys.argv) > 1 and sys.argv[1] == 'sym':
state = TicTacToe()
for move in [4, 1, 0, 8]:
state = state.add_new_mark_and_flip_turn(move)
print(state)
for sym_hash in state.symmetric_hashes():
# Uses the fact that hash = rev format for TicTacToe
new_state = TicTacToe.from_reversible_format(sym_hash)
print(new_state)
elif len(sys.argv) > 1 and sys.argv[1] == 'ref':
state = TicTacToe()
for move in [4, 1, 0]:
state = state.add_new_mark_and_flip_turn(move)
ref_ai = SimpleAIPlayer(playing_as=True)
node_type, choices = state.next_node()
ref_ai.choose_move(choices, state, debug_mode=True, depth_limit=100)
else:
state = TicTacToe()
# for move in [4, 1, 0]:
# state = state.add_new_mark_and_flip_turn(move)
ai = AIPlayer(playing_as='A')
node_type, choices = state.next_node()
ai.choose_move(choices, state, debug_mode=True, depth_limit=6)
``` |
{
"source": "jpoweseas/oh-queue",
"score": 2
} |
#### File: jpoweseas/oh-queue/manage.py
```python
import datetime
import functools
import random
import sys
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
import names
from oh_queue import app, socketio
from oh_queue.models import db, Ticket, User, TicketStatus
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
def not_in_production(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
if app.config.get('ENV') == 'prod':
print('this commend should not be run in production. Aborting')
sys.exit(1)
return f(*args, **kwargs)
return wrapper
@manager.command
@not_in_production
def seed():
print('Seeding...')
for i in range(10):
real_name = names.get_full_name()
first_name, last_name = real_name.lower().split(' ')
pennkey = '{0}{1}'.format(
random.choice([first_name[0], first_name]),
random.choice([last_name[0], last_name]),
)
student = User.query.filter_by(pennkey=pennkey).one_or_none()
if not student:
student = User(name=real_name, pennkey=pennkey)
db.session.add(student)
db.session.commit()
delta = datetime.timedelta(minutes=random.randrange(0, 30))
ticket = Ticket(
user=student,
status=TicketStatus.pending,
created=datetime.datetime.utcnow() - delta,
assignment=random.choice(['Hog', 'Scheme']),
description=random.choice(['', 'SyntaxError on Line 5']),
question=random.randrange(1, 6),
location=random.choice(['109 Morgan', '247 Cory']),
)
db.session.add(ticket)
db.session.commit()
@manager.command
@not_in_production
def resetdb():
print('Dropping tables...')
db.drop_all(app=app)
print('Creating tables...')
db.create_all(app=app)
seed()
@manager.command
@not_in_production
def server():
socketio.run(app)
if __name__ == '__main__':
manager.run()
```
#### File: oh-queue/oh_queue/views.py
```python
import datetime
import functools
import collections
import pytz
from flask import render_template, url_for
from flask_login import current_user
from flask_socketio import emit
from oh_queue import app, db, socketio
from oh_queue.auth import refresh_user
from oh_queue.models import Ticket, TicketStatus, TicketEvent, TicketEventType
def user_json(user):
return {
'id': user.id,
'pennkey': user.pennkey,
'name': user.name,
'isStaff': user.is_staff,
}
def student_json(user):
""" Only send student information to staff. """
can_see_details = (current_user.is_authenticated
and (current_user.is_staff or user.id == current_user.id))
if not can_see_details:
return {}
return user_json(user)
def ticket_json(ticket):
return {
'id': ticket.id,
'status': ticket.status.name,
'user': student_json(ticket.user),
'created': ticket.created.isoformat(),
'location': ticket.location,
'assignment': ticket.assignment,
'description': ticket.description,
'question': ticket.question,
'helper': ticket.helper and user_json(ticket.helper),
}
def emit_event(ticket, event_type):
ticket_event = TicketEvent(
event_type=event_type,
ticket=ticket,
user=current_user,
)
db.session.add(ticket_event)
db.session.commit()
socketio.emit('event', {
'type': event_type.name,
'ticket': ticket_json(ticket),
})
def emit_presence(data):
socketio.emit('presence', {k: len(v) for k,v in data.items()})
user_presence = collections.defaultdict(set) # An in memory map of presence.
@app.route('/presence')
@app.route('/')
@app.route('/<int:ticket_id>/')
def index(*args, **kwargs):
if not current_user:
refresh_user()
return render_template('index.html')
def socket_error(message, category='danger', ticket_id=None):
return {
'messages': [
{
'category': category,
'text': message,
},
],
'redirect': url_for('index', ticket_id=ticket_id),
}
def socket_redirect(ticket_id=None):
return {
'redirect': url_for('index', ticket_id=ticket_id),
}
def socket_unauthorized():
return socket_error("You don't have permission to do that")
def logged_in(f):
@functools.wraps(f)
def wrapper(*args, **kwds):
if not current_user.is_authenticated:
return socket_unauthorized()
return f(*args, **kwds)
return wrapper
def is_staff(f):
@functools.wraps(f)
def wrapper(*args, **kwds):
if not (current_user.is_authenticated and current_user.is_staff):
return socket_unauthorized()
return f(*args, **kwds)
return wrapper
@socketio.on('connect')
def connect():
if not current_user.is_authenticated:
pass
elif current_user.is_staff:
user_presence['staff'].add(current_user.pennkey)
else:
user_presence['students'].add(current_user.pennkey)
tickets = Ticket.query.filter(
Ticket.status.in_([TicketStatus.pending, TicketStatus.assigned])
).all()
emit('state', {
'tickets': [ticket_json(ticket) for ticket in tickets],
'currentUser':
user_json(current_user) if current_user.is_authenticated else None,
})
emit_presence(user_presence)
@socketio.on('disconnect')
def disconnect():
if not current_user.is_authenticated:
pass
elif current_user.is_staff:
if current_user.pennkey in user_presence['staff']:
user_presence['staff'].remove(current_user.pennkey)
else:
if current_user.pennkey in user_presence['students']:
user_presence['students'].remove(current_user.pennkey)
emit_presence(user_presence)
@socketio.on('refresh')
def refresh(ticket_ids):
tickets = Ticket.query.filter(Ticket.id.in_(ticket_ids)).all()
return {
'tickets': [ticket_json(ticket) for ticket in tickets],
}
@socketio.on('create')
@logged_in
def create(form):
"""Stores a new ticket to the persistent database, and emits it to all
connected clients.
"""
my_ticket = Ticket.for_user(current_user)
if my_ticket:
return socket_error(
'You are already on the queue',
category='warning',
ticket_id=my_ticket.ticket_id,
)
# Create a new ticket and add it to persistent storage
if not (form.get('assignment') and form.get('question')
and form.get('location')):
return socket_error(
'You must fill out all the fields',
category='warning',
)
ticket = Ticket(
status=TicketStatus.pending,
user=current_user,
assignment=form.get('assignment'),
question=form.get('question'),
location=form.get('location'),
)
db.session.add(ticket)
db.session.commit()
emit_event(ticket, TicketEventType.create)
return socket_redirect(ticket_id=ticket.id)
def get_tickets(ticket_ids):
return Ticket.query.filter(Ticket.id.in_(ticket_ids)).all()
def get_next_ticket():
"""Return the user's first assigned but unresolved ticket.
If none exist, return to the first unassigned ticket.
"""
ticket = Ticket.query.filter(
Ticket.helper_id == current_user.id,
Ticket.status == TicketStatus.assigned).first()
if not ticket:
ticket = Ticket.query.filter(
Ticket.status == TicketStatus.pending).first()
if ticket:
return socket_redirect(ticket_id=ticket.id)
else:
return socket_redirect()
@socketio.on('next')
@is_staff
def next_ticket(ticket_ids):
return get_next_ticket()
@socketio.on('delete')
@logged_in
def delete(ticket_ids):
tickets = get_tickets(ticket_ids)
for ticket in tickets:
if not (current_user.is_staff or ticket.user.id == current_user.id):
return socket_unauthorized()
ticket.status = TicketStatus.deleted
emit_event(ticket, TicketEventType.delete)
db.session.commit()
@socketio.on('resolve')
@logged_in
def resolve(ticket_ids):
tickets = get_tickets(ticket_ids)
for ticket in tickets:
if not (current_user.is_staff or ticket.user.id == current_user.id):
return socket_unauthorized()
ticket.status = TicketStatus.resolved
emit_event(ticket, TicketEventType.resolve)
db.session.commit()
return get_next_ticket()
@socketio.on('assign')
@is_staff
def assign(ticket_ids):
tickets = get_tickets(ticket_ids)
for ticket in tickets:
ticket.status = TicketStatus.assigned
ticket.helper_id = current_user.id
emit_event(ticket, TicketEventType.assign)
db.session.commit()
@socketio.on('unassign')
@is_staff
def unassign(ticket_ids):
tickets = get_tickets(ticket_ids)
for ticket in tickets:
ticket.status = TicketStatus.pending
ticket.helper_id = None
emit_event(ticket, TicketEventType.unassign)
db.session.commit()
@socketio.on('load_ticket')
@is_staff
def load_ticket(ticket_id):
ticket = Ticket.query.get(ticket_id)
if ticket:
return ticket_json(ticket)
@socketio.on('describe')
def describe(description):
ticket_id, description = description['id'], description['description']
ticket = Ticket.query.filter(Ticket.id == ticket_id).first()
ticket.description = description
emit_event(ticket, TicketEventType.describe)
db.session.commit()
``` |
{
"source": "jpozin/Math-Projects",
"score": 4
} |
#### File: jpozin/Math-Projects/StatisticalMoments.py
```python
import scipy.stats as stats
from math import sqrt
def moments(rv):
first = stats.moment(rv, moment=1)
second = stats.moment(rv, moment=2)
third = stats.moment(rv, moment=3)
fourth = stats.moment(rv, moment=4)
return [first, second, third, fourth]
def mean(rv):
"""Return the mean of the random variable"""
return moments(rv)[0]
def var(rv):
"""Return the variance of the random variable"""
mean = mean(rv)
return moments(rv)[1] - mean**2
def sd(rv):
"""Return the standard deviation of the random variable"""
return sqrt(var(rv))
def skew(rv):
"""Return the skewness of the random variable"""
moments = moments(rv)
mean = moments[0]
sd = sqrt(moments[1] - moments[0]**2)
third = moments(rv)[2]
return (third - 3*mean*sd**2 - mean**3) / sd**3
def kurt(rv):
"""Return the kurtosis of the random variable"""
moments = moments(rv)
mean = moments[0]
sd = sqrt(moments[1] - moments[0]**2)
third = moments[2]
fourth = moments[3]
return (fourth - 4*mean*third + 6*mean**2*sd**2 + 3*mean**4) / sd**4
``` |
{
"source": "jppaiement/fSEIR_simulator",
"score": 2
} |
#### File: jppaiement/fSEIR_simulator/fSEIR.py
```python
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib
import getopt
import os
import random
import sys
import time
import cartopy.crs as ccrs
import seaborn as sns
import csv
import numpy as np
import pandas as pd
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.preprocessing import StandardScaler
from sklearn import preprocessing
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.cluster.hierarchy import fcluster
from scipy.stats import gaussian_kde
from scipy import stats
from sklearn.neighbors import KernelDensity
global VISUALIZE
#%%
#DEFINE SIMULATION PARAMETERS
# Make a directory for the data, and change into that directory.
os.chdir(r"C:\...............................")
currenttime = time.strftime("%Y-%m-%dT%H%M%S", time.gmtime())
os.makedirs(currenttime)
os.chdir(currenttime)
# Number of simulations to run
NUM_SIMULATIONS = 5 #Number of simulation to run
#SEIR Model name
MName = "Predictive spread model simulation"
#Learning features from demographic data
used_columns = ['GDP', 'PopDen', 'HealthExp', 'TouristArrival', 'sanitaryServie_pct', 'pop_above65', 'pop_15-64', 'pop_0-14', 'perc_male', 'UrbanPop_perc', 'Pop_growth', 'TouristUSD', 'CO2Emiss', 'ChineseDiasp']
#Ranfom Forest Parameters
nestimators = 150
nmax_depth = 25
nmin_samples_split = 2
nmin_samples = 5
#Number of country demographic clusters
n = 15
#choose 1 prediction method
use_trueevol = False
use_pred = True #Use the predicted desease evolution from the random forest model
#choose 1 weighting method
use_demo = False #Use demographic distance to predict next exposed node
use_weightP = True #Use ramdom forest feature importance weight to predcit next exposed node
#SEIR input parameters
beta = 35 #The parameter controlling how many nodes are could be infected by an infected passenger
gamma = 80 #The rate an infected recovers and moves into the resistant phase
alpha = 27 #The rate at which an exposed node becomes infective
panday = 220 #Number of day to which the pandemic spreads
#Define Simulation parameters defaults
start = 3376 #node ID to start infection
target = 'Wuhan' # Name of initial infected city
VISUALIZE = True #Vizualize map during simulation
INTERNATIONAL = True #Consider International airports
DOMESTIC = True #Consider Domestic airports
#Stem propagation efforts strategy
#Vaccination Strategy
Vaccinated_Countries = ['Italy', 'Spain', 'United States of America', 'United Kingdom', 'Germany', 'France'] #List of countries to vaccinate
DELAY = 60 #Delay in starting the vaccination program
#Flight Cancellation Strategy
strategy= 'none' #cancellation strategy ("clustering", "betweenness", "random" or "custom" )
custom_cancel = [] #List of countrie to cancel flights
INTER_cancel = False #Cancel international flights
DOM_cancel = False #Cancel domestic flights
Cancel_Delay = 0 #on which step to start cancelling flights
#Stemming efforts (in pervent)
efforts = (0,) #Cancellation effort to apply (percentages of cancelled flights from list)
#%% PREPARE NODE DATA (impute and wieghts from ML model)
print('Building predictive model for day zero and calculation weights')
print('****************************************************************************************')
#Import data
DFimp = pd.read_csv(r".....................................................",encoding="ANSI")
print(list(DFimp.columns.values))
DF = DFimp.drop_duplicates(subset='Country', keep="first")
#Create Learning and Test sets
#DFlearn = DF.loc[:,6:19]
#print(DFlearn)
#columns to be used to as target
DFlearn = DF[DF['DayZero']<1000]
selected_columns = used_columns
def select_columns(data_frame, column_names):
new_frame = data_frame.loc[:, column_names]
return new_frame
voxetL = select_columns(DFlearn, selected_columns)
# Create Training Features and Labels Set
X = voxetL
y = np.ravel(np.c_[DFlearn['DayZero']])
#Ipute missing
imp = IterativeImputer(missing_values=np.nan, max_iter=100, random_state=0)
Xt= pd.DataFrame(imp.fit_transform(X))
Xt.columns = used_columns
Xt['GDP'] = Xt['GDP']/10000000000
Xt['TouristArrival'] = Xt['TouristArrival']/1000000
Xt['pop_above65'] = Xt['pop_above65']/1000000
Xt['pop_15-64'] = Xt['pop_15-64']/1000000
Xt['pop_0-14'] = Xt['pop_0-14']/1000000
scaler = StandardScaler()
scaler.fit(Xt)
Xt = scaler.transform(Xt)
#bins = np.linspace(0, max(y), StratiBins)
X_train, X_test, y_train, y_test = train_test_split(Xt, y, test_size=0.3)
print('TEST AND LEARN COUNTS')
print('Number of observations in the training data:', len(X_train))
print('Number of observations in the test data:',len(X_test))
print('Number of observations in the target training data:',len(y_train))
print('Number of observations in the target test data:',len(y_test))
print('Number of features:', Xt.shape[1])
while True:
# Create Random forest Classifier & Train on data
X_train, X_test, y_train, y_test = train_test_split(Xt, y, test_size=0.3)
model = RandomForestRegressor(n_estimators=nestimators, max_depth=nmax_depth, min_samples_split=nmin_samples_split, min_samples_leaf=nmin_samples, verbose=3)
model.fit(X_train, y_train.ravel())
train_Y = model.predict(X_train)
test_Y = model.predict(X_test)
R2 = r2_score(y_test, test_Y)
print('R2_score:', R2)
if R2>=0.5:
break
print('****************************************************************************************')
plot = (sns.jointplot(x=y_test, y=test_Y, kind='reg', color='blue', height=8, scatter_kws={"s": 10})
.plot_joint(sns.kdeplot, color='blue', shade= False, alpha=0.5)
)
plot.x = y_train
plot.y = train_Y
plot.plot_joint(plt.scatter, marker='x', c='g', s=10, alpha=0.8)
plot.ax_marg_x.set_xlim(0, 100)
plot.ax_marg_y.set_ylim(0, 100)
x0, x1 = plot.ax_joint.get_xlim()
y0, y1 = plot.ax_joint.get_ylim()
lims = [max(x0, y0), min(x1, y1)]
plot.ax_joint.plot(lims, lims, ':k', color='red')
Features_Importance = pd.DataFrame.from_records((list(zip(DF[selected_columns], model.feature_importances_))))
print(Features_Importance)
importances = model.feature_importances_
x_values = list(range(len(importances)))
plt.figure(figsize=(12,6))
plt.bar(x_values, importances, orientation = 'vertical')
plt.xticks(x_values, selected_columns, rotation='vertical', size=8)
plt.ylabel('Importance'); plt.xlabel('Features'); plt.title('Variable Importances Timeline Prediction');
plt.savefig("FeatureImportance.png", bbox_inches='tight', dpi=96)
plt.show()
def select_columns(data_frame, column_names):
new_frame = data_frame.loc[:, column_names]
return new_frame
DFpred = select_columns(DFimp, selected_columns)
print('Imputing Missing Values')
#Ipute missing
imp2 = IterativeImputer(missing_values=np.nan, max_iter=800, random_state=0)
DFpredt= pd.DataFrame(imp2.fit_transform(DFpred))
DFpredt.columns = used_columns
DFpredt['GDP'] = DFpredt['GDP']/10000000000
DFpredt['TouristArrival'] = DFpredt['TouristArrival']/1000000
DFpredt['pop_above65'] = DFpredt['pop_above65']/1000000
DFpredt['pop_15-64'] = DFpredt['pop_15-64']/1000000
DFpredt['pop_0-14'] = DFpredt['pop_0-14']/1000000
DFw = pd.DataFrame(DFpredt)
DFw.columns = used_columns
DFimp['predDayZero'] = model.predict(DFw)
print('Done')
print('****************************************************************************************')
#%%
print('Calculating Weights')
DFimp['weightsP'] = (abs(DFw['GDP'])*Features_Importance.loc[0,1])+(abs(DFw['PopDen'])*Features_Importance.loc[1,1])+(abs(DFw['HealthExp'])*Features_Importance.loc[2,1])+(abs(DFw['TouristArrival'])*Features_Importance.loc[3,1]+(abs(DFw['sanitaryServie_pct'])*Features_Importance.loc[4,1])+(abs(DFw['pop_above65'])*Features_Importance.loc[5,1])+(abs(DFw['perc_male'])*Features_Importance.loc[8,1])+(abs(DFw['UrbanPop_perc'])*Features_Importance.loc[9,1])+(abs(DFw['Pop_growth'])*Features_Importance.loc[10,1])+(abs(DFw['TouristUSD'])*Features_Importance.loc[11,1])+(abs(DFw['CO2Emiss'])*Features_Importance.loc[12,1])+(abs(DFw['ChineseDiasp'])*Features_Importance.loc[13,1]))
#DFimp['weightsP'] = ((abs(DFw['HealthExp'])*Features_Importance.loc[2,1])+(abs(DFw['sanitaryServie_pct'])*Features_Importance.loc[4,1])+(abs(DFw['TouristUSD'])*Features_Importance.loc[11,1]))
#DFimp['weightsP'] = (abs(DFw['TouristArrival']))
#DFimp['weightsP'] = (abs(DFw['ChineseDiasp']))
x = DFimp['weightsP'].values.astype(float).reshape(-1,1)
scale = preprocessing.MinMaxScaler()
DFimp['weightsP'] = scale.fit_transform(x,x)
plt.figure(figsize=(10,6))
DFimp.weightsP.hist(alpha=0.4, bins=30, range=[0,1])
plt.title("MWeights from RF model")
plt.xlabel("Weights")
plt.savefig("Weights of nodes.png", bbox_inches='tight', dpi=96)
plt.show()
DFnodes = (DFimp.loc[:, ['ID', 'City', 'Country', 'lat', 'lon', 'weightsP', 'DayZero', 'predDayZero']])
print('Done')
print('****************************************************************************************')
#%%
#Cluster demographic data to assign as weights
print('Clustering of demographic data')
#Create dataset for clustering (countries), impute and scale
country = DFimp.drop_duplicates(subset='Country', keep="first")
used_columns2 = ['DayZero', 'PopDen', 'HealthExp', 'TouristArrival', 'sanitaryServie_pct', 'pop_above65', 'pop_15-64', 'pop_0-14', 'perc_male', 'UrbanPop_perc', 'Pop_growth', 'TouristUSD', 'CO2Emiss','ChineseDiasp']
selected_columns2 = used_columns2
def select_columns(data_frame, column_names):
new_frame = data_frame.loc[:, column_names]
return new_frame
countryC = select_columns(country, selected_columns2)
countryCt= pd.DataFrame(imp2.fit_transform(countryC))
countryCt.columns = used_columns2
names = countryCt.columns
scaler = preprocessing.StandardScaler()
scaled_countryCt = scaler.fit_transform(countryCt)
scaled_countryCt = pd.DataFrame(scaled_countryCt, columns=names)
#create linkage matrix ad assign hierarchical clustering
Z = linkage(scaled_countryCt, method='ward', metric='euclidean', optimal_ordering=False)
plt.figure(figsize=(15, 8))
plt.title('Hierarchical Clustering Dendrogram')
plt.xlabel('sample index')
plt.ylabel('distance')
dendrogram(
Z,
leaf_rotation=90., # rotates the x axis labels
leaf_font_size=3., # font size for the x axis labels
)
plt.savefig("Hierarchical tree classification.png", bbox_inches='tight', dpi=200)
plt.show()
MaxK=100
clustersperf = fcluster(Z, MaxK, criterion='maxclust')
clustersperf
last = Z[-MaxK:, 2]
last_rev = last[::-1]
idxs = np.arange(1, len(last) + 1)
plt.figure(figsize=(10, 6))
plt.plot(idxs, last_rev)
acceleration = np.diff(last, 2) # 2nd derivative of the distances
acceleration_rev = acceleration[::-1]
plt.plot(idxs[:-2] + 1, acceleration_rev)
plt.title("Clustering Performance")
plt.savefig("Hierarchical clustering sensivity.png", bbox_inches='tight', dpi=96)
plt.show()
k = acceleration_rev.argmax() + 2 # if idx 0 is the max of this we want 2 clusters
print ("clusters:", k)
#%%
#Assign clusters to aiports
country['clust'] = fcluster(Z, n, criterion='maxclust')
c=(country.loc[:,'Country'])
cl=(country.loc[:,'clust'])
cdict = dict(zip(c,cl))
DFnodes['clust'] = DFnodes['Country'].map(cdict)
convert_dict = {'ID':int, 'City':str, 'Country':str, 'lat':float, 'lon':float, 'weightsP':float, 'DayZero':float, 'predDayZero':float, 'clust':float}
DFnodes = DFnodes.astype(convert_dict)
#Save data
DFnodes.to_csv(r'C:/.............................................', index=False, header=False, quoting=csv.QUOTE_NONNUMERIC, quotechar= '"', encoding='ANSI')
print('[Done]')
#%% DEFINITIONS
def create_network(nodes, edges):
#Load Data for Nodes and Routes
print("Creating network.")
G = nx.DiGraph()
print("\tLoading airports", end="")
sys.stdout.flush()
# Populate the graph with nodes.
with open('C:/...........................................................................', 'r', encoding='ANSI') as f:
for line in f.readlines():
entries = line.replace('"',"").rstrip().split(",")
G.add_node(int(entries[0]),
country=entries[2],
name=entries[1],
lat=entries[3],
lon=entries[4],
weightP=entries[5],
clust=entries[8],
predDayZero=entries[7],
dayzero = entries[6]
)
print("\t\t\t\t\t[Done]")
print("\tLoading routes",end="")
# Populate the graph with edges.v
sys.stdout.flush()
edge_count = 0
error_count = 0
duplicate_count = 0
line_num = 1
with open('C:/........................................................................', 'r', encoding='ANSI') as f:
for line in f.readlines():
entries = line.replace('"',"").rstrip().split(",")
try:
if G.has_edge(int(entries[3]),int(entries[5])):
duplicate_count += 1
else:
if line_num > 1:
from_vertex = int(entries[3])
to_vertex = int(entries[5])
G.add_edge(from_vertex, to_vertex )
G.edges[from_vertex, to_vertex]['IATAFrom'] = entries[2]
G.edges[from_vertex, to_vertex]['IATATo'] = entries[4]
edge_count += 1
except ValueError:
# The value doesn't exist
error_count += 1
pass
line_num += 1
print("\t\t\t\t\t\t[Done]")
def calculate_weights(input_network):
"""
Add weights to the edges of a network based on the degrees of the connecting
verticies, and return the network.
Args:
input_network: A NetworkX graph object
Returns:
G: A weighted NetworkX graph object.
"""
G = input_network.copy()
# Add weights to edges
for n in G.nodes():
successors = list(G.successors(n))
weights = dict()
# Calculate the total out degree of all succs
wP = float(G.nodes[n]["weightP"])
predDayZero = float(G.nodes[n]["predDayZero"])
dayzero = float(G.nodes[n]["dayzero"])
total_degree = 0
for successor in successors:
total_degree = G.out_degree(successor)
# Find the weight for all possible successors
for successor in successors:
successor_degree = G.out_degree(successor)
if total_degree > 0:
probability_of_infection = successor_degree/total_degree
else:
probability_of_infection = 0
weights[successor] = probability_of_infection
largest_weight = 0
smallest_weight = 2
for successor, weight in weights.items():
if weight > largest_weight:
largest_weight = weight
elif weight < smallest_weight:
smallest_weight = weight
for successor in successors:
if largest_weight != smallest_weight:
relative_weight = str(((weights[successor] - smallest_weight) / (largest_weight - smallest_weight)))
else:
relative_weight = 0
#print(relative_weight)
G[n][successor]['weight'] = relative_weight
G[n][successor]['weightP'] = wP
G[n][successor]['predDayZero'] = predDayZero
G[n][successor]['dayzero'] = dayzero
return G
# Calculate the edge weights
print("\tCalculating edge weights",end="")
G = calculate_weights(G)
print("\t\t\t\t[Done]")
# Limit to the first subgraph
print("\tFinding largest subgraph",end="")
undirected = G.to_undirected()
subgraphs = nx.subgraph(G, undirected)
subgraph_nodes = subgraphs.nodes()
to_remove = list()
for node in G.nodes():
if node not in subgraph_nodes:
to_remove.append(node)
G.remove_nodes_from(to_remove)
print("\t\t\t\t[Done]")
# Remove nodes without inbound edges
print("\tRemoving isolated vertices",end="")
indeg = G.in_degree()
outdeg = G.out_degree()
to_remove=[n for n, degree in indeg if (indeg[n] + outdeg[n] < 1)]
G.remove_nodes_from(to_remove)
print("\t\t\t\t[Done]")
# Add clustering data
print("\tCalculating clustering coefficents",end="")
cluster_network = nx.Graph(G)
lcluster = nx.clustering(cluster_network)
for i,j in G.edges():
cluster_sum = lcluster[i] + lcluster[j]
G[i][j]['cluster'] = cluster_sum
print("\t\t\t[Done]")
# Flag flights as domestic or international and remove Domestic
print("\tCategorizing international and domestic flights",end="")
for i,j in G.edges():
if G.nodes[i]["country"] == G.nodes[j]["country"]:
G[i][j]['international'] = False
else:
G[i][j]['international'] = True
print("\t\t[Done]")
# Calculate distance between demographics
print("\tCalculaying demographic clusters distance",end="")
for i,j in G.edges():
G[i][j]['DistDemo'] = abs(float(G.nodes[i]["clust"]) - float(G.nodes[j]["clust"]))
print("\t\t[Done]")
# Remove nodes without inbound edges
print("\tRemoving isolated vertices",end="")
indeg = G.in_degree()
outdeg = G.out_degree()
to_remove=[n for n, degree in indeg if (indeg[n] + outdeg[n] < 1)]
G.remove_nodes_from(to_remove)
print("\t\t\t\t[Done]")
# Limit to the first subgraph
print("\tFinding largest subgraph",end="")
undirected = G.to_undirected()
subgraphs = nx.subgraph(G, undirected)
subgraph_nodes = subgraphs.nodes()
to_remove = list()
for node in G.nodes():
if node not in subgraph_nodes:
to_remove.append(node)
G.remove_nodes_from(to_remove)
print("\t\t\t\t[Done]")
return G
def infection(input_network, vaccination, starts, DELAY=DELAY, Cancel_Delay=Cancel_Delay, vis = True, file_name = "sir.csv", title = MName, RECALCULATE = False):
print("Simulating infection.")
network = input_network.copy()
# Recalculate the weights of the network as per necessary
# Open the data file
f = open(file_name, "w")
f.write("time, s, e, i, r\n")
f.close()
# Set the default to susceptable
sys.stdout.flush()
for node in network.nodes():
network.nodes[node]["status"] = "s"
network.nodes[node]["color"] = "#A0C8F0"
network.nodes[node]["age"] = 0
# Assign the infected
#for start in starts:
infected = start
network.nodes[infected]["status"] = "i"
network.nodes[infected]["color"] = "red"
if vis:
pos = nx.spring_layout(network, scale=2)
if isinstance(network,nx.DiGraph):
in_degree = network.in_degree()[infected]
out_degree = network.out_degree()[infected]
degree = in_degree + out_degree
else:
degree = network.degree()[infected]
print("\t",network.nodes[infected]["name"],"[",degree,"]", " connections")
#List vaccinated edges and remove
for i,j in network.edges():
network[i][j]["vaccinated"] = False
if network.nodes[i]["country"] in Vaccinated_Countries or network.nodes[j]["country"] in Vaccinated_Countries:
network[i][j]["vaccinated"] = True
vaccination = list(((u,v) for u,v,j in network.edges(data=True) if j['vaccinated'] == True))
if vaccination is not None:
print("\tVaccinated: ",Vaccinated_Countries, ": ", len(vaccination)," edges" )
else:
print("\tVaccinated: None")
if cancelled is not None:
print("\tCancelled: ", len(cancelled)," edges" )
else:
print("\tCancelled: None")
# Iterate Vaccination and/or Cancellation through the evolution of the disease.
for step in range(0,panday):
# If the delay is over, vaccinate.
# Convert the STRING!
if int(step) == int(DELAY):
if vaccination is not None:
print(DELAY,"Vaccination on step",DELAY)
network.remove_edges_from(vaccination)
# Recalculate the weights of the network as per necessary
if RECALCULATE == True:
network = calculate_weights(network)
if int(step) == int(Cancel_Delay):
if cancelled is not None:
print("Cancellation on step",Cancel_Delay, ": ", len(cancelled), " remove flights")
network.remove_edges_from(cancelled)
# Recalculate the weights of the network as per necessary
if RECALCULATE == True:
network = calculate_weights(network)
# Create variables to hold the outcomes as they happen
S,E,I,R = 0,0,0,0
for node in network.nodes():
status = network.nodes[node]["status"]
age = network.nodes[node]["age"]
color = network.nodes[node]["color"]
if status is "i" and age >= gamma:
# The infected has reached its recovery time after 60 days
network.nodes[node]["status"] = "r"
network.nodes[node]["color"] = "purple"
if status is "e" and age >= alpha and age < gamma:
# Exposed nodes have an incubation in average 14 days
network.nodes[node]["status"] = "i"
network.nodes[node]["color"] = "red"
elif status is "e":
network.nodes[node]["age"] += 1
elif status is "i":
# Propogate the infection.
if age > alpha:
victims = (list(network.successors(node)))
number_infections = 0
if len(victims) >= beta:
victims = random.sample((list(network.successors(node))), beta)
number_infections = 0
else:
victims = (list(network.successors(node)))
number_infections = 0
for victim in victims:
infect_status = network.nodes[victim]["status"]
infect = False # Set this flag to False to start weighting.
rand = random.uniform(0,1)
if network[node][victim]['international'] == False and random.uniform(0,1) <= float(network[node][victim]['weight']):
infect = True
number_infections+=1
if use_pred == True and network[node][victim]['international'] == True :
if use_demo == True and network[node][victim]['DistDemo'] >= rand:
infect = True
number_infections+=1
if use_weightP == True and rand <= float(network[node][victim]['weightP']):
infect = True
number_infections+=1
if use_trueevol == True and network[node][victim]['international'] == True and float(network[node][victim]['dayzero'])<step:
if use_demo == True and network[node][victim]['DistDemo'] >= rand:
infect = True
number_infections+=1
if use_weightP == True and rand <= float(network[node][victim]['weightP']):
infect = True
number_infections+=1
if infect_status == "s" and infect == True:
network.nodes[victim]["status"] = "e"
network.nodes[victim]["age"] = 0
network.nodes[victim]["color"] = "#30cc1f"
network.nodes[node]["age"] += 1
# Loop twice to prevent bias.
for node in network.nodes():
status = network.nodes[node]["status"]
age = network.nodes[node]["age"]
color = network.nodes[node]["color"]
if status is "s":
# Count those susceptable
S += 1
if status is "e":
E += 1
if status is "v":
S += 1
elif status is "r":
R += 1
elif status is "i":
I += 1
print("{0}, {1}, {2}, {3}, {4}".format(step, S, E, I, R))
printline = "{0}, {1}, {2}, {3}, {4}".format(step, S, E, I, R)
f = open(file_name, "a")
f.write(printline + "\n")
f.close()
print("\t"+printline)
if I is 0:
break
if vis:
#write_dot(network, title+".dot")
visualize(network, title, pos)
print("\t----------\n\tS: {0}, I: {1}, R: {2}".format(S,I,R))
return {"Suscceptable":S,"Infected":I, "Recovered":R}
def weighted_random(weights):
number = random.random() * sum(weights.values())
for k,v in weights.items():
if number <= v:
break
number -= v
return k
def pad_string(integer, n):
"""
Add "0" to the front of an interger so that the resulting string in n
characters long.
Args:
integer: The number to pad.
n: The desired length of the string
Returns
string: The padded string representation of the integer.
"""
string = str(integer)
while len(string) < n:
string = "0" + string
return string
def visualize(network, title,pos):
"""
Visualize the network given an array of posisitons.
"""
print("-- Starting to Visualize --")
colors = []
colori = []
i_edge_colors = []
d_edge_colors = []
default = []
infected = []
nstart = []
ninfect = []
for node in network.nodes():
colorn = network.nodes[node]["color"]
if colorn == "#A0C8F0":
nstart.append(node)
colors.append(network.nodes[node]["color"])
elif colorn == "#30cc1f" or colorn == "red" or colorn == "purple":
ninfect.append(node)
colori.append(network.nodes[node]["color"])
for i,j in network.edges():
color = network.nodes[i]["color"]
if color == "#A0C8F0" or color == "#30cc1f" or color == "purple":
color = "#A6A6A6"
default.append((i,j))
d_edge_colors.append(color)
else:
color = "red"
infected.append((i,j))
i_edge_colors.append(color)
plt.figure(figsize=(30,20))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
#make density plot of infection
node_positions = {node[0]: (float(node[1]['lon']), float(node[1]['lat'])) for node in network.nodes(data=True)}
xp = []
yp = []
for node in network.nodes():
infec = network.nodes[node]["status"]
if infec == 'i':
xp.append(network.nodes[node]['lon'])
yp.append(network.nodes[node]['lat'])
if len(xp)>=1:
m1, m2 = np.array(xp).astype(np.float), np.array(yp).astype(np.float)
xmin = -180
xmax = 180
ymin = -90
ymax = 90
# get the density estimation
Xp, Yp = np.mgrid[xmin:xmax:250j, ymin:ymax:250j]
XpYp = np.vstack([Xp.ravel(), Yp.ravel()]).T
XpYp = np.radians(XpYp)
values = np.column_stack((np.array(np.vstack(m1)), np.array(np.vstack(m2))))
kernel = KernelDensity(bandwidth=0.035)
kernel.fit(np.radians(values))
#kernel = stats.gaussian_kde(values)
Z = np.exp(kernel.score_samples(XpYp))
Z = Z.reshape(Xp.shape)
# plot the result
cmap = plt.cm.jet
cmap.set_under('white')
plt.imshow(np.rot90(Z), norm = plt.Normalize(vmin=(Z.max()-(Z.max()*0.9)), vmax=Z.max()), cmap=cmap,
extent=[xmin, xmax, ymin, ymax], alpha=0.3, interpolation = 'gaussian')
# Fist pass - Gray lines
nx.draw_networkx_edges(network,pos=node_positions,edgelist=default,
width=0.005,
edge_color=d_edge_colors,
alpha=0.005,
arrows=False)
# Second Pass - Colored lines
nx.draw_networkx_edges(network,pos=node_positions,edgelist=infected,
width=0.1,
edge_color=i_edge_colors,
alpha=0.25,
arrows=False)
# first Pass - small nodes
nx.draw_networkx_nodes(network,
pos=node_positions,
nodelist=nstart,
linewidths=0.2,
node_size=5,
with_labels=False,
node_color = colors)
# # Second Pass - large nodes
nx.draw_networkx_nodes(network,
pos=node_positions,
nodelist=ninfect,
linewidths=0.2,
node_size=20,
with_labels=False,
node_color = colori)
plt.axis('off')
number_files = str(len(os.listdir()))
while len(number_files) < 3:
number_files = "0" + number_files
plt.savefig("infection-{0}.png".format(number_files),
bbox_inches='tight', dpi=72
)
plt.show()
plt.close()
#%% BUILDING NETWORK
simulation = 0
for i in range (NUM_SIMULATIONS):
for effort in efforts:
#seed = 100
#random.seed(seed)
# Identify the script.
print("Flight Network Disease Simulator 1.0.0")
print("Modified by <NAME> from <NAME> and <NAME>\n\n")
#Simulation od the Pandemic
print("Setting Simulation Parameters.")
# Determine the parameters of the current simulation.
args = sys.argv[1:]
opts, args = getopt.getopt("brcsidv",["delay=","nsim="])
AIRPORT_DATA = args[0]
ROUTE_DATA = args[1]
# Make a new folder for the data.
subsim = (strategy + pad_string(simulation,4))
os.makedirs(subsim)
os.chdir(subsim)
# Create the network using the command arguments.
network = create_network(AIRPORT_DATA, ROUTE_DATA)
print("\tDetermining network type.")
# Determine if the graph is directed or undirected
if isinstance(network,nx.DiGraph):
network_type = "Directed"
else:
network_type = "Undirected"
print("\t\t[Done]")
print("\tCalculaing edges and verticies.")
# Number of verticies and edges
edges = network.number_of_edges()
verticies = network.number_of_nodes()
print("\t\t[Done]")
# Not every vertex can lead to every other vertex.
# Create a subgraph that can.
print("\tTemporarily converting to undirected.")
undirected = network.to_undirected()
print("\t\t[Done]")
print("\tFinding subgraphs.")
subgraphs = [undirected.subgraph(c).copy() for c in nx.connected_components(undirected)]
print("\t\t[Done]")
# Find the number of vertices in the diameter of the network
print("\tFinding network diameter.")
#diameter = nx.diameter(subgraphs[0])
print("\t\t[Done]")
print("\tStoring network parameters")
data_file = open("network.dat", "w")
data_file.write("Simulation name: ")
data_file.write("Network properties\n===============\n")
data_file.write("Network type: {0}\n".format(network_type))
data_file.write("Number of verticies: {0}\n".format(verticies))
data_file.write("Number of edges: {0}\n".format(edges))
#data_file.write("Diameter: {0}\n".format(diameter))
data_file.close()
print("\t\t[Done]")
print("\tRemoving international and/or domestic flights")
#Remove International and/or Domestic flights
if INTERNATIONAL == False:
etoi_remove = list(((u,v) for u,v,j in network.edges(data=True) if j['international']==True))
network.remove_edges_from(etoi_remove)
if DOMESTIC == False:
etod_remove = list(((u,v) for u,v,j in network.edges(data=True) if j['international']==False))
network.remove_edges_from(etod_remove)
print("\t\t[Done]")
#Drawing network
print("\tPlotting Network.")
node_positions = {node[0]: (float(node[1]['lon']), float(node[1]['lat'])) for node in network.nodes(data=True)}
plt.figure(figsize=(30,20))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
nx.draw_networkx_nodes(network, pos=node_positions, node_color = 'blue', linewidths=0.2, node_size=40, with_labels=False)
nx.draw_networkx_edges(network,pos=node_positions, edge_color='gray', width=0.2, alpha=0.6, arrows=False)
plt.axis('off')
plt.savefig("Air Transportation Network.png", bbox_inches='tight', dpi=300)
plt.show()
print("\t\t\t\t[Done]")
#%% RUNNING SIMULATION
#Creating flight cancellation list
print("{0} Cancellation Strategy Mode.".format(strategy) )
# Generate a list a sorted list of flights to cancel based on the
# strategy.
#Build cancellation edge pool
print("\tBuilding cancellation list")
edgepoolG = network.copy()
edgepool = list()
if INTER_cancel == True and DOM_cancel == False:
etoi_remove = list(((u,v) for u,v,j in network.edges(data=True) if j['international']==False))
edgepoolG.remove_edges_from(etoi_remove)
edgepool = list(edgepoolG.edges(data=True))
if DOM_cancel == True and INTER_cancel == False:
etod_remove = list(((u,v) for u,v,j in network.edges(data=True) if j['international']==True))
edgepoolG.remove_edges_from(etod_remove)
edgepool = list(edgepoolG.edges(data=True))
else:
edgepool = list(edgepoolG.edges(data=True))
cancellist = list()
if strategy != 'none':
cancellist = list()
if strategy == "random":
# Sort the edges randomly
cancellist = random.sample(edgepool, len(edgepool))
if strategy == "clustering":
# Sort the edges based on the sum of the clustering coefficent.
sorted_cluster = sorted(edgepool, key=lambda k: k[2]['cluster'], reverse=True)
for cluster_item in sorted_cluster:
if network[cluster_item[0]][cluster_item[1]]['cluster'] < 2:
if network[cluster_item[0]][cluster_item[1]]['cluster'] > 0:
cancellist.append((cluster_item[0], cluster_item[1]))
if strategy == "betweenness":
# Sort the edges based on weighted edge-betweenness.
betweennesses = nx.edge_betweenness_centrality(network, weight='weight')
cancellist = sorted(betweennesses.keys(), key=lambda k: betweennesses[k], reverse=True)
elif strategy == "custom" and len(custom_cancel)>0:
cancellist = list()
for (u,v) in edgepoolG.edges():
if edgepoolG.nodes[u]['country'] in custom_cancel or edgepoolG.nodes[v]['country'] in custom_cancel:
eremove=(u,v)
cancellist.append(eremove)
#print(cancellist[:20])
print(len(cancellist), " Flights available for cancellation")
print("\t\t[Done]")
#Open a file for this targets dataset
#output_file = open("{0}/{0}_{1}.csv".format(strategy, pad_string(simulation,4)),"w")
#output_file.write('"effort","total_infected, edges_closed"\n')
#Running simulation
if effort > 0:
max_index = int((len(cancellist) * (effort)/100)-1)
cancelled = cancellist[0:max_index]
else:
cancelled = None
title = "{0} - {1}%".format(strategy, effort/100)
results = infection(network, start, target, vis=VISUALIZE, title=title, DELAY=DELAY, Cancel_Delay=Cancel_Delay)
total_infected = results["Infected"] + results["Recovered"]
#output_file.write("{0},{1}\n".format(effort/100,total_infected))
#if total_infected == 1:
# for remaining_effort in range(effort+1):
# output_file.write("{0},{1}\n".format(remaining_effort/100, total_infected))
# break
simulation += 1
#iteration += 1
#output_file.close()
os.chdir(r"C:\Users\jeanphilippep\OneDrive - mirageoscience\PROJECTS\COVID-19")
os.chdir (currenttime)
#%%
strategy= 'custom' #cancellation strategy ("clustering", "betweenness", "random" or "custom" )
custom_cancel = ['China', 'Japan'] #List of countrie to cancel flights
INTER_cancel = True #Cancel international flights
DOM_cancel = True #Cancel domestic flights
Cancel_Delay = 32 #on which step to start cancelling flights
#Stemming efforts (in pervent)
efforts = (100,)
RECALCULATE = False
args = sys.argv[1:]
opts, args = getopt.getopt("brcsidv",["delay=","nsim="])
AIRPORT_DATA = args[0]
ROUTE_DATA = args[1]
# Create the network using the command arguments.
network = create_network(AIRPORT_DATA, ROUTE_DATA)
edgepoolG = network.copy()
edgepool = list()
if INTER_cancel == True and DOM_cancel == False:
etoi_remove = list(((u,v) for u,v,j in network.edges(data=True) if j['international']==False))
edgepoolG.remove_edges_from(etoi_remove)
edgepool = list(edgepoolG.edges(data=True))
if DOM_cancel == True and INTER_cancel == False:
etod_remove = list(((u,v) for u,v,j in network.edges(data=True) if j['international']==True))
edgepoolG.remove_edges_from(etod_remove)
edgepool = list(edgepoolG.edges(data=True))
else:
edgepool = list(edgepoolG.edges(data=True))
cancellist = list()
if strategy != 'none':
cancellist = list()
if strategy == "random":
# Sort the edges randomly
cancellist = random.sample(edgepool, len(edgepool))
if strategy == "clustering":
# Sort the edges based on the sum of the clustering coefficent.
sorted_cluster = sorted(edgepool, key=lambda k: k[2]['cluster'], reverse=True)
for cluster_item in sorted_cluster:
if network[cluster_item[0]][cluster_item[1]]['cluster'] < 2:
if network[cluster_item[0]][cluster_item[1]]['cluster'] > 0:
cancellist.append((cluster_item[0], cluster_item[1]))
if strategy == "betweenness":
# Sort the edges based on weighted edge-betweenness.
betweennesses = nx.edge_betweenness_centrality(network, weight='weight')
cancellist = sorted(betweennesses.keys(), key=lambda k: betweennesses[k], reverse=True)
elif strategy == "custom" and len(custom_cancel)>0:
cancellist = list()
for (u,v) in edgepoolG.edges():
if edgepoolG.nodes[u]['country'] in custom_cancel or edgepoolG.nodes[v]['country'] in custom_cancel:
eremove=(u,v)
cancellist.append(eremove)
#print(cancellist[:20])
print(len(cancellist), " Flights available for cancellation")
print("\t\t[Done]")
if effort > 0:
max_index = int((len(cancellist) * (effort)/100)-1)
cancelled = cancellist[0:max_index]
else:
cancelled = None
network.remove_edges_from(cancelled)
# Recalculate the weights of the network as per necessary
if RECALCULATE == True:
network = calculate_weights(network)
node_positions = {node[0]: (float(node[1]['lon']), float(node[1]['lat'])) for node in network.nodes(data=True)}
plt.figure(figsize=(30,20))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
nx.draw_networkx_nodes(network, pos=node_positions, node_color = 'blue', linewidths=0.2, node_size=40, with_labels=False)
nx.draw_networkx_edges(network,pos=node_positions, edge_color='gray', width=0.2, alpha=0.6, arrows=False)
plt.axis('off')
plt.show()
``` |
{
"source": "jppang/circuitpython",
"score": 3
} |
#### File: circuitpython/docs/c2rst.py
```python
def c2rst(app, docname, source):
""" Pre-parse '.c' & '.h' files that contain rST source.
"""
# Make sure we're outputting HTML
if app.builder.format != 'html':
return
fname = app.env.doc2path(docname)
if (not fname.endswith(".c") and
not fname.endswith(".h")):
#print("skipping:", fname)
return
src = source[0]
stripped = []
for line in src.split("\n"):
line = line.strip()
if line == "//|":
stripped.append("")
elif line.startswith("//| "):
stripped.append(line[len("//| "):])
stripped = "\r\n".join(stripped)
rendered = app.builder.templates.render_string(
stripped, app.config.html_context
)
source[0] = rendered
def setup(app):
app.connect("source-read", c2rst)
```
#### File: circuitpython/docs/rstjinja.py
```python
def rstjinja(app, docname, source):
"""
Render our pages as a jinja template for fancy templating goodness.
"""
# Make sure we're outputting HTML
if app.builder.format != 'html':
return
# we only want our one jinja template to run through this func
if "shared-bindings/support_matrix" not in docname:
return
src = source[0]
print(docname)
rendered = app.builder.templates.render_string(
src, app.config.html_context
)
source[0] = rendered
def setup(app):
app.connect("source-read", rstjinja)
``` |
{
"source": "jppang/micropython",
"score": 2
} |
#### File: drivers/collection/sht25.py
```python
import time
class SHT25:
i2c = []
ADDR = 64
CMD_READ_TEMPERATURE = 0xF3
CMD_READ_HUMIDITY = 0xF5
CMD_READ_REGISTER = 0xE7
CMD_WRITE_REGISTER = 0xE6
CMD_RESET = 0xFE
def __init__(self, _i2c):
self.i2c = _i2c
def toTemperature(self, buf):
return -46.85 + 175.72 * ((buf[0] << 8) + buf[1]) /2**16
def toHumidity(self, buf):
return -6 + 125.0 * ((buf[0] << 8) + buf[1]) / 2**16
def decodeUserReg(self, buf):
reg = buf[0]
ret = []
if(0b10000001 & reg == 0b10000001):
ret.append("11bits")
elif(0b10000001 & reg == 0b10000000):
ret.append("RH 10bit T 13bit")
elif(0b10000001 & reg == 0b00000001):
ret.append("RH 8bit T 12bit")
elif(0b10000001 & reg == 0b00000000):
ret.append("RH 12bit T 14bit")
if(0b01000000 & reg == 0b01000000):
ret.append("VDD < 2.5")
else:
ret.append("VDD > 2.5")
if(0b00000100 & reg == 0b00000100):
ret.append("heater ON")
else:
ret.append("heater OFF")
if(0b00000010 & reg == 0b00000010):
ret.append("OTP reload disabled")
else:
ret.append("OTP reload enabled")
return ret
def runI2CCommand(self, command, bytesToRead):
b = bytearray(1)
b[0] = command
self.i2c.writeto(self.ADDR, b)
if(bytesToRead > 0):
recv = bytearray(bytesToRead)
retryCounter = 0
done = False
while retryCounter < 15 and not done:
try:
self.i2c.readfrom_into(self.ADDR, recv)
done = True
retryCounter = retryCounter + 1
except:
time.sleep(0.01)
return recv
def getTemperature(self):
return self.toTemperature(self.runI2CCommand(self.CMD_READ_TEMPERATURE, 3))
def getHumidity(self):
return self.toHumidity(self.runI2CCommand(self.CMD_READ_HUMIDITY, 3))
def getUserRegister(self):
return self.decodeUserReg(self.runI2CCommand(self.CMD_READ_REGISTER, 1))
def setUserRegister(self, register):
b = bytearray(2)
b[0] = self.CMD_WRITE_REGISTER
b[1] = register & 0b11000111
self.i2c.writeto(self.ADDR, b)
def reset(self):
self.runI2CCommand(self.CMD_RESET, 0)
``` |
{
"source": "jpparent/AoC2017",
"score": 3
} |
#### File: jpparent/AoC2017/12-2.py
```python
f = open('12.input')
input = f.readlines()
f.close()
register = {}
for line in input:
id = line[:line.index('<')-1]
links = line[line.index('>')+1:].strip()
register[id] = links.split(', ')
groups = set([])
def recursiveAdd(key):
if key not in register:
return
if key in groups:
return
if key not in groups:
groups.add(key)
for x in register[key]:
recursiveAdd(x)
# this is pretty hackish...
# remove the elements that are part of a group
# and count each time we restart the recursive add
# which will start on an element that is naturaly
# part of a different group
del register[key]
numGroups = 0
while len(register) > 0:
recursiveAdd(list(register)[0])
numGroups += 1
print(numGroups)
``` |
{
"source": "jpparent/aoc2020",
"score": 3
} |
#### File: jpparent/aoc2020/07a.py
```python
import re
f = open('07.txt')
rows = f.readlines()
f.close()
totalBagNames = set()
def findHolder(color):
global totalBagNames
for row in rows:
result = re.search('^(.+) bags \w+.* \d+ '+ color + ' \w+\W{1}.*', row)
if result:
totalBagNames.add(result.group(1))
findHolder(result.group(1))
findHolder('shiny gold')
print(len(totalBagNames))
``` |
{
"source": "jppavez/fullstack",
"score": 3
} |
#### File: scraper/providers/BooksToScrape.py
```python
import requests
import re
from urllib.parse import urlparse
from bs4 import BeautifulSoup
class BooksToScrape():
def __init__(self):
self.MAIN_URL = "http://books.toscrape.com/"
def getSoup(self, url):
request = requests.get(url)
if request.status_code != 200:
raise Exception("Error fetching {}".format(url))
soup = BeautifulSoup(request.text, "html.parser")
return soup
def cleanUrl(self, url):
parsed_url = urlparse(url)
if url.endswith('.html'):
path_url = parsed_url.path
clean_path = path_url.rpartition('/')[0]
return parsed_url.scheme + '://' + parsed_url.netloc + clean_path + '/'
return url
def getCategories(self):
CATEGORIES_RESULT = []
soup = self.getSoup(self.MAIN_URL)
categories = soup.find('div', attrs={'class': 'side_categories'}).find_all('li')
if len(categories) <= 1:
raise Exception("No categories")
for category in categories[1:]:
category_a_element = category.find('a')
name = category_a_element.text.strip()
url = self.MAIN_URL + category_a_element['href']
CATEGORIES_RESULT.append((name, url))
return CATEGORIES_RESULT
def getBookFromCategory(self, category_url):
CATEGORY_URL = category_url
BOOKS_RESULTS = []
soup = self.getSoup(CATEGORY_URL)
HAS_NEXT_PAGE = soup.find('li', attrs={'class': 'next'})
books = soup.find_all('article')
for book in books:
a_element = book.find('h3').find('a')
url = a_element['href']
title = a_element['title']
BOOKS_RESULTS.append((title, self.cleanUrl(CATEGORY_URL) + url))
if HAS_NEXT_PAGE:
next_page_url = HAS_NEXT_PAGE.find('a')['href']
BOOKS_RESULTS += self.getBookFromCategory(
self.cleanUrl(CATEGORY_URL) + next_page_url)
return BOOKS_RESULTS
def getBookInformation(self, book_url):
BOOK_URL = book_url
soup = self.getSoup(BOOK_URL)
title = self._parseTitle(soup)
upc = self._parseUPC(soup)
price = self._parsePrice(soup)
thumbnail = self._parseThumbail(soup, book_url)
stock, stock_quantity = self._parseStock(soup)
description = self._parseProductDescription(soup)
return title, upc, price, thumbnail, stock, stock_quantity, description
def _parseTitle(self, soup_book_info):
product_main = soup_book_info.find('div', {'class': 'product_main'})
title = product_main.find('h1').text.strip()
return title
def _parseUPC(self, soup_book_info):
upc = soup_book_info.find('th', text="UPC").find_next_siblings('td')
return upc[0].text.strip()
def _parsePrice(self, soup_book_info):
product_main = soup_book_info.find('div', {'class': 'product_main'})
price = product_main.find('p', {'class': 'price_color'}).text.strip()
price = "".join([p for p in price if p.isnumeric() or p == '.' or p == ','])
return price
def _parseThumbail(self, soup_book_info, book_url):
clean_url = self.cleanUrl(book_url)
thumbnail = soup_book_info.find(
'div', {'id': 'product_gallery'}).find('img')['src']
return clean_url + thumbnail
def _parseStock(self, soup_book_info):
stock = soup_book_info.find('th', text='Availability').find_next_siblings('td')
if not stock:
return False, 0
stock_text = stock[0].text.strip()
if 'In stock' in stock_text:
m = re.search("(\d+) available", stock_text)
if m:
return True, m[1]
return False, 0
def _parseProductDescription(self, soup_book_info):
product_description = soup_book_info.find(
'div', {'id': 'product_description'})
if product_description:
product_description = product_description.find_next_siblings('p')
else:
return
return product_description[0].text.strip()
```
#### File: apps/scraper/views.py
```python
from __future__ import unicode_literals
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from .providers.BooksToScrape import BooksToScrape
from apps.base.models.Category import Category
from apps.base.models.Book import Book
def scrap_main(request):
scrap = BooksToScrape()
categories = scrap.getCategories()
for name, url in categories:
# Verificar que categoria ya no exista
category_exist = Category.objects.filter(name=name)
if category_exist:
continue
category = Category.create(name=name)
books = scrap.getBookFromCategory(url)
for book in books:
book_name = book[0]
book_info_url = book[1]
title, upc, price, thumbnail, stock, stock_quantity, description = scrap.getBookInformation(
book_info_url)
# UPC es unico
book_exist = Book.objects.filter(upc=upc)
if book_exist:
continue
book = Book.create(title=title,
upc=upc,
price=price,
thumbnail=thumbnail,
stock=stock,
stock_quantity=stock_quantity,
description=description)
category.books.add(book)
return HttpResponse("SUCCESS")
``` |
{
"source": "jppcel/controleCatraca",
"score": 3
} |
#### File: lib.linux-armv6l-2.7/max7219/font.py
```python
class proportional(object):
"""
Wraps an existing font array, and on on indexing, trims any leading
or trailing zero column definitions. This works especially well
with scrolling messages, as interspace columns are squeezed to a
single pixel.
"""
def __init__(self, font):
self.font = font
def __getitem__(self, asciiCode):
bitmap = self.font[asciiCode]
# Don't trim the space character down
if asciiCode == 32:
return bitmap
else:
return self._trim(bitmap) + [0]
def _trim(self, arr):
nonzero = [idx for idx, val in enumerate(arr) if val != 0]
if not nonzero:
return []
first = nonzero[0]
last = nonzero[-1]+1
return arr[first:last]
# bit patterns for the CP437 font
# see https://en.wikipedia.org/wiki/Code_page_437 for details
CP437_FONT = [
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x00
[0x7E, 0x81, 0x95, 0xB1, 0xB1, 0x95, 0x81, 0x7E], # 0x01
[0x7E, 0xFF, 0xEB, 0xCF, 0xCF, 0xEB, 0xFF, 0x7E], # 0x02
[0x0E, 0x1F, 0x3F, 0x7E, 0x3F, 0x1F, 0x0E, 0x00], # 0x03
[0x08, 0x1C, 0x3E, 0x7F, 0x3E, 0x1C, 0x08, 0x00], # 0x04
[0x18, 0xBA, 0xFF, 0xFF, 0xFF, 0xBA, 0x18, 0x00], # 0x05
[0x10, 0xB8, 0xFC, 0xFF, 0xFC, 0xB8, 0x10, 0x00], # 0x06
[0x00, 0x00, 0x18, 0x3C, 0x3C, 0x18, 0x00, 0x00], # 0x07
[0xFF, 0xFF, 0xE7, 0xC3, 0xC3, 0xE7, 0xFF, 0xFF], # 0x08
[0x00, 0x3C, 0x66, 0x42, 0x42, 0x66, 0x3C, 0x00], # 0x09
[0xFF, 0xC3, 0x99, 0xBD, 0xBD, 0x99, 0xC3, 0xFF], # 0x0A
[0x70, 0xF8, 0x88, 0x88, 0xFD, 0x7F, 0x07, 0x0F], # 0x0B
[0x00, 0x4E, 0x5F, 0xF1, 0xF1, 0x5F, 0x4E, 0x00], # 0x0C
[0xC0, 0xE0, 0xFF, 0x7F, 0x05, 0x05, 0x07, 0x07], # 0x0D
[0xC0, 0xFF, 0x7F, 0x05, 0x05, 0x65, 0x7F, 0x3F], # 0x0E
[0x99, 0x5A, 0x3C, 0xE7, 0xE7, 0x3C, 0x5A, 0x99], # 0x0F
[0x7F, 0x3E, 0x3E, 0x1C, 0x1C, 0x08, 0x08, 0x00], # 0x10
[0x08, 0x08, 0x1C, 0x1C, 0x3E, 0x3E, 0x7F, 0x00], # 0x11
[0x00, 0x24, 0x66, 0xFF, 0xFF, 0x66, 0x24, 0x00], # 0x12
[0x00, 0x5F, 0x5F, 0x00, 0x00, 0x5F, 0x5F, 0x00], # 0x13
[0x06, 0x0F, 0x09, 0x7F, 0x7F, 0x01, 0x7F, 0x7F], # 0x14
[0x40, 0xDA, 0xBF, 0xA5, 0xFD, 0x59, 0x03, 0x02], # 0x15
[0x00, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x00], # 0x16
[0x80, 0x94, 0xB6, 0xFF, 0xFF, 0xB6, 0x94, 0x80], # 0x17
[0x00, 0x04, 0x06, 0x7F, 0x7F, 0x06, 0x04, 0x00], # 0x18
[0x00, 0x10, 0x30, 0x7F, 0x7F, 0x30, 0x10, 0x00], # 0x19
[0x08, 0x08, 0x08, 0x2A, 0x3E, 0x1C, 0x08, 0x00], # 0x1A
[0x08, 0x1C, 0x3E, 0x2A, 0x08, 0x08, 0x08, 0x00], # 0x1B
[0x3C, 0x3C, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00], # 0x1C
[0x08, 0x1C, 0x3E, 0x08, 0x08, 0x3E, 0x1C, 0x08], # 0x1D
[0x30, 0x38, 0x3C, 0x3E, 0x3E, 0x3C, 0x38, 0x30], # 0x1E
[0x06, 0x0E, 0x1E, 0x3E, 0x3E, 0x1E, 0x0E, 0x06], # 0x1F
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # ' '
[0x00, 0x06, 0x5F, 0x5F, 0x06, 0x00, 0x00, 0x00], # '!'
[0x00, 0x07, 0x07, 0x00, 0x07, 0x07, 0x00, 0x00], # '"'
[0x14, 0x7F, 0x7F, 0x14, 0x7F, 0x7F, 0x14, 0x00], # '#'
[0x24, 0x2E, 0x6B, 0x6B, 0x3A, 0x12, 0x00, 0x00], # '$'
[0x46, 0x66, 0x30, 0x18, 0x0C, 0x66, 0x62, 0x00], # '%'
[0x30, 0x7A, 0x4F, 0x5D, 0x37, 0x7A, 0x48, 0x00], # '&'
[0x04, 0x07, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00], # '''
[0x00, 0x1C, 0x3E, 0x63, 0x41, 0x00, 0x00, 0x00], # '('
[0x00, 0x41, 0x63, 0x3E, 0x1C, 0x00, 0x00, 0x00], # ')'
[0x08, 0x2A, 0x3E, 0x1C, 0x1C, 0x3E, 0x2A, 0x08], # '*'
[0x08, 0x08, 0x3E, 0x3E, 0x08, 0x08, 0x00, 0x00], # '+'
[0x00, 0x80, 0xE0, 0x60, 0x00, 0x00, 0x00, 0x00], # ','
[0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00, 0x00], # '-'
[0x00, 0x00, 0x60, 0x60, 0x00, 0x00, 0x00, 0x00], # '.'
[0x60, 0x30, 0x18, 0x0C, 0x06, 0x03, 0x01, 0x00], # '/'
[0x3E, 0x7F, 0x71, 0x59, 0x4D, 0x7F, 0x3E, 0x00], # '0'
[0x40, 0x42, 0x7F, 0x7F, 0x40, 0x40, 0x00, 0x00], # '1'
[0x62, 0x73, 0x59, 0x49, 0x6F, 0x66, 0x00, 0x00], # '2'
[0x22, 0x63, 0x49, 0x49, 0x7F, 0x36, 0x00, 0x00], # '3'
[0x18, 0x1C, 0x16, 0x53, 0x7F, 0x7F, 0x50, 0x00], # '4'
[0x27, 0x67, 0x45, 0x45, 0x7D, 0x39, 0x00, 0x00], # '5'
[0x3C, 0x7E, 0x4B, 0x49, 0x79, 0x30, 0x00, 0x00], # '6'
[0x03, 0x03, 0x71, 0x79, 0x0F, 0x07, 0x00, 0x00], # '7'
[0x36, 0x7F, 0x49, 0x49, 0x7F, 0x36, 0x00, 0x00], # '8'
[0x06, 0x4F, 0x49, 0x69, 0x3F, 0x1E, 0x00, 0x00], # '9'
[0x00, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00], # ':'
[0x00, 0x80, 0xE6, 0x66, 0x00, 0x00, 0x00, 0x00], # ';'
[0x08, 0x1C, 0x36, 0x63, 0x41, 0x00, 0x00, 0x00], # '<'
[0x24, 0x24, 0x24, 0x24, 0x24, 0x24, 0x00, 0x00], # '='
[0x00, 0x41, 0x63, 0x36, 0x1C, 0x08, 0x00, 0x00], # '>'
[0x02, 0x03, 0x51, 0x59, 0x0F, 0x06, 0x00, 0x00], # '?'
[0x3E, 0x7F, 0x41, 0x5D, 0x5D, 0x1F, 0x1E, 0x00], # '@'
[0x7C, 0x7E, 0x13, 0x13, 0x7E, 0x7C, 0x00, 0x00], # 'A'
[0x41, 0x7F, 0x7F, 0x49, 0x49, 0x7F, 0x36, 0x00], # 'B'
[0x1C, 0x3E, 0x63, 0x41, 0x41, 0x63, 0x22, 0x00], # 'C'
[0x41, 0x7F, 0x7F, 0x41, 0x63, 0x3E, 0x1C, 0x00], # 'D'
[0x41, 0x7F, 0x7F, 0x49, 0x5D, 0x41, 0x63, 0x00], # 'E'
[0x41, 0x7F, 0x7F, 0x49, 0x1D, 0x01, 0x03, 0x00], # 'F'
[0x1C, 0x3E, 0x63, 0x41, 0x51, 0x73, 0x72, 0x00], # 'G'
[0x7F, 0x7F, 0x08, 0x08, 0x7F, 0x7F, 0x00, 0x00], # 'H'
[0x00, 0x41, 0x7F, 0x7F, 0x41, 0x00, 0x00, 0x00], # 'I'
[0x30, 0x70, 0x40, 0x41, 0x7F, 0x3F, 0x01, 0x00], # 'J'
[0x41, 0x7F, 0x7F, 0x08, 0x1C, 0x77, 0x63, 0x00], # 'K'
[0x41, 0x7F, 0x7F, 0x41, 0x40, 0x60, 0x70, 0x00], # 'L'
[0x7F, 0x7F, 0x0E, 0x1C, 0x0E, 0x7F, 0x7F, 0x00], # 'M'
[0x7F, 0x7F, 0x06, 0x0C, 0x18, 0x7F, 0x7F, 0x00], # 'N'
[0x1C, 0x3E, 0x63, 0x41, 0x63, 0x3E, 0x1C, 0x00], # 'O'
[0x41, 0x7F, 0x7F, 0x49, 0x09, 0x0F, 0x06, 0x00], # 'P'
[0x1E, 0x3F, 0x21, 0x71, 0x7F, 0x5E, 0x00, 0x00], # 'Q'
[0x41, 0x7F, 0x7F, 0x09, 0x19, 0x7F, 0x66, 0x00], # 'R'
[0x26, 0x6F, 0x4D, 0x59, 0x73, 0x32, 0x00, 0x00], # 'S'
[0x03, 0x41, 0x7F, 0x7F, 0x41, 0x03, 0x00, 0x00], # 'T'
[0x7F, 0x7F, 0x40, 0x40, 0x7F, 0x7F, 0x00, 0x00], # 'U'
[0x1F, 0x3F, 0x60, 0x60, 0x3F, 0x1F, 0x00, 0x00], # 'V'
[0x7F, 0x7F, 0x30, 0x18, 0x30, 0x7F, 0x7F, 0x00], # 'W'
[0x43, 0x67, 0x3C, 0x18, 0x3C, 0x67, 0x43, 0x00], # 'X'
[0x07, 0x4F, 0x78, 0x78, 0x4F, 0x07, 0x00, 0x00], # 'Y'
[0x47, 0x63, 0x71, 0x59, 0x4D, 0x67, 0x73, 0x00], # 'Z'
[0x00, 0x7F, 0x7F, 0x41, 0x41, 0x00, 0x00, 0x00], # '['
[0x01, 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x00], # backslash
[0x00, 0x41, 0x41, 0x7F, 0x7F, 0x00, 0x00, 0x00], # ']'
[0x08, 0x0C, 0x06, 0x03, 0x06, 0x0C, 0x08, 0x00], # '^'
[0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80], # '_'
[0x00, 0x00, 0x03, 0x07, 0x04, 0x00, 0x00, 0x00], # '`'
[0x20, 0x74, 0x54, 0x54, 0x3C, 0x78, 0x40, 0x00], # 'a'
[0x41, 0x7F, 0x3F, 0x48, 0x48, 0x78, 0x30, 0x00], # 'b'
[0x38, 0x7C, 0x44, 0x44, 0x6C, 0x28, 0x00, 0x00], # 'c'
[0x30, 0x78, 0x48, 0x49, 0x3F, 0x7F, 0x40, 0x00], # 'd'
[0x38, 0x7C, 0x54, 0x54, 0x5C, 0x18, 0x00, 0x00], # 'e'
[0x48, 0x7E, 0x7F, 0x49, 0x03, 0x02, 0x00, 0x00], # 'f'
[0x98, 0xBC, 0xA4, 0xA4, 0xF8, 0x7C, 0x04, 0x00], # 'g'
[0x41, 0x7F, 0x7F, 0x08, 0x04, 0x7C, 0x78, 0x00], # 'h'
[0x00, 0x44, 0x7D, 0x7D, 0x40, 0x00, 0x00, 0x00], # 'i'
[0x60, 0xE0, 0x80, 0x80, 0xFD, 0x7D, 0x00, 0x00], # 'j'
[0x41, 0x7F, 0x7F, 0x10, 0x38, 0x6C, 0x44, 0x00], # 'k'
[0x00, 0x41, 0x7F, 0x7F, 0x40, 0x00, 0x00, 0x00], # 'l'
[0x7C, 0x7C, 0x18, 0x38, 0x1C, 0x7C, 0x78, 0x00], # 'm'
[0x7C, 0x7C, 0x04, 0x04, 0x7C, 0x78, 0x00, 0x00], # 'n'
[0x38, 0x7C, 0x44, 0x44, 0x7C, 0x38, 0x00, 0x00], # 'o'
[0x84, 0xFC, 0xF8, 0xA4, 0x24, 0x3C, 0x18, 0x00], # 'p'
[0x18, 0x3C, 0x24, 0xA4, 0xF8, 0xFC, 0x84, 0x00], # 'q'
[0x44, 0x7C, 0x78, 0x4C, 0x04, 0x1C, 0x18, 0x00], # 'r'
[0x48, 0x5C, 0x54, 0x54, 0x74, 0x24, 0x00, 0x00], # 's'
[0x00, 0x04, 0x3E, 0x7F, 0x44, 0x24, 0x00, 0x00], # 't'
[0x3C, 0x7C, 0x40, 0x40, 0x3C, 0x7C, 0x40, 0x00], # 'u'
[0x1C, 0x3C, 0x60, 0x60, 0x3C, 0x1C, 0x00, 0x00], # 'v'
[0x3C, 0x7C, 0x70, 0x38, 0x70, 0x7C, 0x3C, 0x00], # 'w'
[0x44, 0x6C, 0x38, 0x10, 0x38, 0x6C, 0x44, 0x00], # 'x'
[0x9C, 0xBC, 0xA0, 0xA0, 0xFC, 0x7C, 0x00, 0x00], # 'y'
[0x4C, 0x64, 0x74, 0x5C, 0x4C, 0x64, 0x00, 0x00], # 'z'
[0x08, 0x08, 0x3E, 0x77, 0x41, 0x41, 0x00, 0x00], # '{'
[0x00, 0x00, 0x00, 0x77, 0x77, 0x00, 0x00, 0x00], # '|'
[0x41, 0x41, 0x77, 0x3E, 0x08, 0x08, 0x00, 0x00], # '}'
[0x02, 0x03, 0x01, 0x03, 0x02, 0x03, 0x01, 0x00], # '~'
[0x70, 0x78, 0x4C, 0x46, 0x4C, 0x78, 0x70, 0x00], # 0x7F
[0x0E, 0x9F, 0x91, 0xB1, 0xFB, 0x4A, 0x00, 0x00], # 0x80
[0x3A, 0x7A, 0x40, 0x40, 0x7A, 0x7A, 0x40, 0x00], # 0x81
[0x38, 0x7C, 0x54, 0x55, 0x5D, 0x19, 0x00, 0x00], # 0x82
[0x02, 0x23, 0x75, 0x55, 0x55, 0x7D, 0x7B, 0x42], # 0x83
[0x21, 0x75, 0x54, 0x54, 0x7D, 0x79, 0x40, 0x00], # 0x84
[0x21, 0x75, 0x55, 0x54, 0x7C, 0x78, 0x40, 0x00], # 0x85
[0x20, 0x74, 0x57, 0x57, 0x7C, 0x78, 0x40, 0x00], # 0x86
[0x18, 0x3C, 0xA4, 0xA4, 0xE4, 0x40, 0x00, 0x00], # 0x87
[0x02, 0x3B, 0x7D, 0x55, 0x55, 0x5D, 0x1B, 0x02], # 0x88
[0x39, 0x7D, 0x54, 0x54, 0x5D, 0x19, 0x00, 0x00], # 0x89
[0x39, 0x7D, 0x55, 0x54, 0x5C, 0x18, 0x00, 0x00], # 0x8A
[0x01, 0x45, 0x7C, 0x7C, 0x41, 0x01, 0x00, 0x00], # 0x8B
[0x02, 0x03, 0x45, 0x7D, 0x7D, 0x43, 0x02, 0x00], # 0x8C
[0x01, 0x45, 0x7D, 0x7C, 0x40, 0x00, 0x00, 0x00], # 0x8D
[0x79, 0x7D, 0x16, 0x12, 0x16, 0x7D, 0x79, 0x00], # 0x8E
[0x70, 0x78, 0x2B, 0x2B, 0x78, 0x70, 0x00, 0x00], # 0x8F
[0x44, 0x7C, 0x7C, 0x55, 0x55, 0x45, 0x00, 0x00], # 0x90
[0x20, 0x74, 0x54, 0x54, 0x7C, 0x7C, 0x54, 0x54], # 0x91
[0x7C, 0x7E, 0x0B, 0x09, 0x7F, 0x7F, 0x49, 0x00], # 0x92
[0x32, 0x7B, 0x49, 0x49, 0x7B, 0x32, 0x00, 0x00], # 0x93
[0x32, 0x7A, 0x48, 0x48, 0x7A, 0x32, 0x00, 0x00], # 0x94
[0x32, 0x7A, 0x4A, 0x48, 0x78, 0x30, 0x00, 0x00], # 0x95
[0x3A, 0x7B, 0x41, 0x41, 0x7B, 0x7A, 0x40, 0x00], # 0x96
[0x3A, 0x7A, 0x42, 0x40, 0x78, 0x78, 0x40, 0x00], # 0x97
[0x9A, 0xBA, 0xA0, 0xA0, 0xFA, 0x7A, 0x00, 0x00], # 0x98
[0x01, 0x19, 0x3C, 0x66, 0x66, 0x3C, 0x19, 0x01], # 0x99
[0x3D, 0x7D, 0x40, 0x40, 0x7D, 0x3D, 0x00, 0x00], # 0x9A
[0x18, 0x3C, 0x24, 0xE7, 0xE7, 0x24, 0x24, 0x00], # 0x9B
[0x68, 0x7E, 0x7F, 0x49, 0x43, 0x66, 0x20, 0x00], # 0x9C
[0x2B, 0x2F, 0xFC, 0xFC, 0x2F, 0x2B, 0x00, 0x00], # 0x9D
[0xFF, 0xFF, 0x09, 0x09, 0x2F, 0xF6, 0xF8, 0xA0], # 0x9E
[0x40, 0xC0, 0x88, 0xFE, 0x7F, 0x09, 0x03, 0x02], # 0x9F
[0x20, 0x74, 0x54, 0x55, 0x7D, 0x79, 0x40, 0x00], # 0xA0
[0x00, 0x44, 0x7D, 0x7D, 0x41, 0x00, 0x00, 0x00], # 0xA1
[0x30, 0x78, 0x48, 0x4A, 0x7A, 0x32, 0x00, 0x00], # 0xA2
[0x38, 0x78, 0x40, 0x42, 0x7A, 0x7A, 0x40, 0x00], # 0xA3
[0x7A, 0x7A, 0x0A, 0x0A, 0x7A, 0x70, 0x00, 0x00], # 0xA4
[0x7D, 0x7D, 0x19, 0x31, 0x7D, 0x7D, 0x00, 0x00], # 0xA5
[0x00, 0x26, 0x2F, 0x29, 0x2F, 0x2F, 0x28, 0x00], # 0xA6
[0x00, 0x26, 0x2F, 0x29, 0x2F, 0x26, 0x00, 0x00], # 0xA7
[0x30, 0x78, 0x4D, 0x45, 0x60, 0x20, 0x00, 0x00], # 0xA8
[0x38, 0x38, 0x08, 0x08, 0x08, 0x08, 0x00, 0x00], # 0xA9
[0x08, 0x08, 0x08, 0x08, 0x38, 0x38, 0x00, 0x00], # 0xAA
[0x4F, 0x6F, 0x30, 0x18, 0xCC, 0xEE, 0xBB, 0x91], # 0xAB
[0x4F, 0x6F, 0x30, 0x18, 0x6C, 0x76, 0xFB, 0xF9], # 0xAC
[0x00, 0x00, 0x00, 0x7B, 0x7B, 0x00, 0x00, 0x00], # 0xAD
[0x08, 0x1C, 0x36, 0x22, 0x08, 0x1C, 0x36, 0x22], # 0xAE
[0x22, 0x36, 0x1C, 0x08, 0x22, 0x36, 0x1C, 0x08], # 0xAF
[0xAA, 0x00, 0x55, 0x00, 0xAA, 0x00, 0x55, 0x00], # 0xB0
[0xAA, 0x55, 0xAA, 0x55, 0xAA, 0x55, 0xAA, 0x55], # 0xB1
[0xDD, 0xFF, 0xAA, 0x77, 0xDD, 0xAA, 0xFF, 0x77], # 0xB2
[0x00, 0x00, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00], # 0xB3
[0x10, 0x10, 0x10, 0xFF, 0xFF, 0x00, 0x00, 0x00], # 0xB4
[0x14, 0x14, 0x14, 0xFF, 0xFF, 0x00, 0x00, 0x00], # 0xB5
[0x10, 0x10, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0x00], # 0xB6
[0x10, 0x10, 0xF0, 0xF0, 0x10, 0xF0, 0xF0, 0x00], # 0xB7
[0x14, 0x14, 0x14, 0xFC, 0xFC, 0x00, 0x00, 0x00], # 0xB8
[0x14, 0x14, 0xF7, 0xF7, 0x00, 0xFF, 0xFF, 0x00], # 0xB9
[0x00, 0x00, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0x00], # 0xBA
[0x14, 0x14, 0xF4, 0xF4, 0x04, 0xFC, 0xFC, 0x00], # 0xBB
[0x14, 0x14, 0x17, 0x17, 0x10, 0x1F, 0x1F, 0x00], # 0xBC
[0x10, 0x10, 0x1F, 0x1F, 0x10, 0x1F, 0x1F, 0x00], # 0xBD
[0x14, 0x14, 0x14, 0x1F, 0x1F, 0x00, 0x00, 0x00], # 0xBE
[0x10, 0x10, 0x10, 0xF0, 0xF0, 0x00, 0x00, 0x00], # 0xBF
[0x00, 0x00, 0x00, 0x1F, 0x1F, 0x10, 0x10, 0x10], # 0xC0
[0x10, 0x10, 0x10, 0x1F, 0x1F, 0x10, 0x10, 0x10], # 0xC1
[0x10, 0x10, 0x10, 0xF0, 0xF0, 0x10, 0x10, 0x10], # 0xC2
[0x00, 0x00, 0x00, 0xFF, 0xFF, 0x10, 0x10, 0x10], # 0xC3
[0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10], # 0xC4
[0x10, 0x10, 0x10, 0xFF, 0xFF, 0x10, 0x10, 0x10], # 0xC5
[0x00, 0x00, 0x00, 0xFF, 0xFF, 0x14, 0x14, 0x14], # 0xC6
[0x00, 0x00, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0x10], # 0xC7
[0x00, 0x00, 0x1F, 0x1F, 0x10, 0x17, 0x17, 0x14], # 0xC8
[0x00, 0x00, 0xFC, 0xFC, 0x04, 0xF4, 0xF4, 0x14], # 0xC9
[0x14, 0x14, 0x17, 0x17, 0x10, 0x17, 0x17, 0x14], # 0xCA
[0x14, 0x14, 0xF4, 0xF4, 0x04, 0xF4, 0xF4, 0x14], # 0xCB
[0x00, 0x00, 0xFF, 0xFF, 0x00, 0xF7, 0xF7, 0x14], # 0xCC
[0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14], # 0xCD
[0x14, 0x14, 0xF7, 0xF7, 0x00, 0xF7, 0xF7, 0x14], # 0xCE
[0x14, 0x14, 0x14, 0x17, 0x17, 0x14, 0x14, 0x14], # 0xCF
[0x10, 0x10, 0x1F, 0x1F, 0x10, 0x1F, 0x1F, 0x10], # 0xD0
[0x14, 0x14, 0x14, 0xF4, 0xF4, 0x14, 0x14, 0x14], # 0xD1
[0x10, 0x10, 0xF0, 0xF0, 0x10, 0xF0, 0xF0, 0x10], # 0xD2
[0x00, 0x00, 0x1F, 0x1F, 0x10, 0x1F, 0x1F, 0x10], # 0xD3
[0x00, 0x00, 0x00, 0x1F, 0x1F, 0x14, 0x14, 0x14], # 0xD4
[0x00, 0x00, 0x00, 0xFC, 0xFC, 0x14, 0x14, 0x14], # 0xD5
[0x00, 0x00, 0xF0, 0xF0, 0x10, 0xF0, 0xF0, 0x10], # 0xD6
[0x10, 0x10, 0xFF, 0xFF, 0x10, 0xFF, 0xFF, 0x10], # 0xD7
[0x14, 0x14, 0x14, 0xFF, 0xFF, 0x14, 0x14, 0x14], # 0xD8
[0x10, 0x10, 0x10, 0x1F, 0x1F, 0x00, 0x00, 0x00], # 0xD9
[0x00, 0x00, 0x00, 0xF0, 0xF0, 0x10, 0x10, 0x10], # 0xDA
[0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF], # 0xDB
[0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0], # 0xDC
[0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00], # 0xDD
[0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF], # 0xDE
[0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F], # 0xDF
[0x38, 0x7C, 0x44, 0x6C, 0x38, 0x6C, 0x44, 0x00], # 0xE0
[0xFC, 0xFE, 0x2A, 0x2A, 0x3E, 0x14, 0x00, 0x00], # 0xE1
[0x7E, 0x7E, 0x02, 0x02, 0x06, 0x06, 0x00, 0x00], # 0xE2
[0x02, 0x7E, 0x7E, 0x02, 0x7E, 0x7E, 0x02, 0x00], # 0xE3
[0x63, 0x77, 0x5D, 0x49, 0x63, 0x63, 0x00, 0x00], # 0xE4
[0x38, 0x7C, 0x44, 0x7C, 0x3C, 0x04, 0x04, 0x00], # 0xE5
[0x80, 0xFE, 0x7E, 0x20, 0x20, 0x3E, 0x1E, 0x00], # 0xE6
[0x04, 0x06, 0x02, 0x7E, 0x7C, 0x06, 0x02, 0x00], # 0xE7
[0x99, 0xBD, 0xE7, 0xE7, 0xBD, 0x99, 0x00, 0x00], # 0xE8
[0x1C, 0x3E, 0x6B, 0x49, 0x6B, 0x3E, 0x1C, 0x00], # 0xE9
[0x4C, 0x7E, 0x73, 0x01, 0x73, 0x7E, 0x4C, 0x00], # 0xEA
[0x30, 0x78, 0x4A, 0x4F, 0x7D, 0x39, 0x00, 0x00], # 0xEB
[0x18, 0x3C, 0x24, 0x3C, 0x3C, 0x24, 0x3C, 0x18], # 0xEC
[0x98, 0xFC, 0x64, 0x3C, 0x3E, 0x27, 0x3D, 0x18], # 0xED
[0x1C, 0x3E, 0x6B, 0x49, 0x49, 0x00, 0x00, 0x00], # 0xEE
[0x7E, 0x7F, 0x01, 0x01, 0x7F, 0x7E, 0x00, 0x00], # 0xEF
[0x2A, 0x2A, 0x2A, 0x2A, 0x2A, 0x2A, 0x00, 0x00], # 0xF0
[0x44, 0x44, 0x5F, 0x5F, 0x44, 0x44, 0x00, 0x00], # 0xF1
[0x40, 0x51, 0x5B, 0x4E, 0x44, 0x40, 0x00, 0x00], # 0xF2
[0x40, 0x44, 0x4E, 0x5B, 0x51, 0x40, 0x00, 0x00], # 0xF3
[0x00, 0x00, 0x00, 0xFE, 0xFF, 0x01, 0x07, 0x06], # 0xF4
[0x60, 0xE0, 0x80, 0xFF, 0x7F, 0x00, 0x00, 0x00], # 0xF5
[0x08, 0x08, 0x6B, 0x6B, 0x08, 0x08, 0x00, 0x00], # 0xF6
[0x24, 0x36, 0x12, 0x36, 0x24, 0x36, 0x12, 0x00], # 0xF7
[0x00, 0x06, 0x0F, 0x09, 0x0F, 0x06, 0x00, 0x00], # 0xF8
[0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00], # 0xF9
[0x00, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x00], # 0xFA
[0x10, 0x30, 0x70, 0xC0, 0xFF, 0xFF, 0x01, 0x01], # 0xFB
[0x00, 0x1F, 0x1F, 0x01, 0x1F, 0x1E, 0x00, 0x00], # 0xFC
[0x00, 0x19, 0x1D, 0x17, 0x12, 0x00, 0x00, 0x00], # 0xFD
[0x00, 0x00, 0x3C, 0x3C, 0x3C, 0x3C, 0x00, 0x00], # 0xFE
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xFF
] # end of CP437_FONT
# -----------------------------------------------------------
# Bit patterns for SINCLAIR_FONT
# (based on the character set from the Sinclair ZX Spectrum)
# Source: www.henningkarlsen.com/electronics/r_fonts.php
# Transposed by JLCArchibald
# Note: Only contains characters 0x20 - 0x7E inclusive
# All others will appear as blanks
SINCLAIR_FONT = [
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x00
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x01
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x02
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x03
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x04
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x05
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x06
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x07
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x08
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x09
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x0A
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x0B
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x0C
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x0D
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x0E
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x0F
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x10
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x11
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x12
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x13
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x14
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x15
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x16
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x17
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x18
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x19
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x1A
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x1B
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x1C
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x1D
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x1E
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x1F
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # ' '
[0x00, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x00, 0x00], # '!'
[0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00], # '"'
[0x00, 0x24, 0x7E, 0x24, 0x24, 0x7E, 0x24, 0x00], # '#'
[0x00, 0x2E, 0x2A, 0x7F, 0x2A, 0x3A, 0x00, 0x00], # '$'
[0x00, 0x46, 0x26, 0x10, 0x08, 0x64, 0x62, 0x00], # '%'
[0x00, 0x20, 0x54, 0x4A, 0x54, 0x20, 0x50, 0x00], # '&'
[0x00, 0x00, 0x00, 0x04, 0x02, 0x00, 0x00, 0x00], # '''
[0x00, 0x00, 0x00, 0x3C, 0x42, 0x00, 0x00, 0x00], # '('
[0x00, 0x00, 0x00, 0x42, 0x3C, 0x00, 0x00, 0x00], # ')'
[0x00, 0x10, 0x54, 0x38, 0x54, 0x10, 0x00, 0x00], # '*'
[0x00, 0x10, 0x10, 0x7C, 0x10, 0x10, 0x00, 0x00], # '+'
[0x00, 0x00, 0x00, 0x80, 0x60, 0x00, 0x00, 0x00], # '
[0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00], # '-'
[0x00, 0x00, 0x00, 0x60, 0x60, 0x00, 0x00, 0x00], # '.'
[0x00, 0x40, 0x20, 0x10, 0x08, 0x04, 0x00, 0x00], # '/'
[0x3C, 0x62, 0x52, 0x4A, 0x46, 0x3C, 0x00, 0x00], # '0'
[0x44, 0x42, 0x7E, 0x40, 0x40, 0x00, 0x00, 0x00], # '1'
[0x64, 0x52, 0x52, 0x52, 0x52, 0x4C, 0x00, 0x00], # '2'
[0x24, 0x42, 0x42, 0x4A, 0x4A, 0x34, 0x00, 0x00], # '3'
[0x30, 0x28, 0x24, 0x7E, 0x20, 0x20, 0x00, 0x00], # '4'
[0x2E, 0x4A, 0x4A, 0x4A, 0x4A, 0x32, 0x00, 0x00], # '5'
[0x3C, 0x4A, 0x4A, 0x4A, 0x4A, 0x30, 0x00, 0x00], # '6'
[0x02, 0x02, 0x62, 0x12, 0x0A, 0x06, 0x00, 0x00], # '7'
[0x34, 0x4A, 0x4A, 0x4A, 0x4A, 0x34, 0x00, 0x00], # '8'
[0x0C, 0x52, 0x52, 0x52, 0x52, 0x3C, 0x00, 0x00], # '9'
[0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00], # ':'
[0x00, 0x00, 0x80, 0x64, 0x00, 0x00, 0x00, 0x00], # ';'
[0x00, 0x00, 0x10, 0x28, 0x44, 0x00, 0x00, 0x00], # '<'
[0x00, 0x28, 0x28, 0x28, 0x28, 0x28, 0x00, 0x00], # '='
[0x00, 0x00, 0x44, 0x28, 0x10, 0x00, 0x00, 0x00], # '>'
[0x00, 0x04, 0x02, 0x02, 0x52, 0x0A, 0x04, 0x00], # '?'
[0x00, 0x3C, 0x42, 0x5A, 0x56, 0x5A, 0x1C, 0x00], # '@'
[0x7C, 0x12, 0x12, 0x12, 0x12, 0x7C, 0x00, 0x00], # 'A'
[0x7E, 0x4A, 0x4A, 0x4A, 0x4A, 0x34, 0x00, 0x00], # 'B'
[0x3C, 0x42, 0x42, 0x42, 0x42, 0x24, 0x00, 0x00], # 'C'
[0x7E, 0x42, 0x42, 0x42, 0x24, 0x18, 0x00, 0x00], # 'D'
[0x7E, 0x4A, 0x4A, 0x4A, 0x4A, 0x42, 0x00, 0x00], # 'E'
[0x7E, 0x0A, 0x0A, 0x0A, 0x0A, 0x02, 0x00, 0x00], # 'F'
[0x3C, 0x42, 0x42, 0x52, 0x52, 0x34, 0x00, 0x00], # 'G'
[0x7E, 0x08, 0x08, 0x08, 0x08, 0x7E, 0x00, 0x00], # 'H'
[0x00, 0x42, 0x42, 0x7E, 0x42, 0x42, 0x00, 0x00], # 'I'
[0x30, 0x40, 0x40, 0x40, 0x40, 0x3E, 0x00, 0x00], # 'J'
[0x7E, 0x08, 0x08, 0x14, 0x22, 0x40, 0x00, 0x00], # 'K'
[0x7E, 0x40, 0x40, 0x40, 0x40, 0x40, 0x00, 0x00], # 'L'
[0x7E, 0x04, 0x08, 0x08, 0x04, 0x7E, 0x00, 0x00], # 'M'
[0x7E, 0x04, 0x08, 0x10, 0x20, 0x7E, 0x00, 0x00], # 'N'
[0x3C, 0x42, 0x42, 0x42, 0x42, 0x3C, 0x00, 0x00], # 'O'
[0x7E, 0x12, 0x12, 0x12, 0x12, 0x0C, 0x00, 0x00], # 'P'
[0x3C, 0x42, 0x52, 0x62, 0x42, 0x3C, 0x00, 0x00], # 'Q'
[0x7E, 0x12, 0x12, 0x12, 0x32, 0x4C, 0x00, 0x00], # 'R'
[0x24, 0x4A, 0x4A, 0x4A, 0x4A, 0x30, 0x00, 0x00], # 'S'
[0x02, 0x02, 0x02, 0x7E, 0x02, 0x02, 0x02, 0x00], # 'T'
[0x3E, 0x40, 0x40, 0x40, 0x40, 0x3E, 0x00, 0x00], # 'U'
[0x1E, 0x20, 0x40, 0x40, 0x20, 0x1E, 0x00, 0x00], # 'V'
[0x3E, 0x40, 0x20, 0x20, 0x40, 0x3E, 0x00, 0x00], # 'W'
[0x42, 0x24, 0x18, 0x18, 0x24, 0x42, 0x00, 0x00], # 'X'
[0x02, 0x04, 0x08, 0x70, 0x08, 0x04, 0x02, 0x00], # 'Y'
[0x42, 0x62, 0x52, 0x4A, 0x46, 0x42, 0x00, 0x00], # 'Z'
[0x00, 0x00, 0x7E, 0x42, 0x42, 0x00, 0x00, 0x00], # '['
[0x00, 0x04, 0x08, 0x10, 0x20, 0x40, 0x00, 0x00], # backslash
[0x00, 0x00, 0x42, 0x42, 0x7E, 0x00, 0x00, 0x00], # '
[0x00, 0x08, 0x04, 0x7E, 0x04, 0x08, 0x00, 0x00], # '^'
[0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00], # '_'
[0x3C, 0x42, 0x99, 0xA5, 0xA5, 0x81, 0x42, 0x3C], # '`'
[0x00, 0x20, 0x54, 0x54, 0x54, 0x78, 0x00, 0x00], # 'a'
[0x00, 0x7E, 0x48, 0x48, 0x48, 0x30, 0x00, 0x00], # 'b'
[0x00, 0x00, 0x38, 0x44, 0x44, 0x44, 0x00, 0x00], # 'c'
[0x00, 0x30, 0x48, 0x48, 0x48, 0x7E, 0x00, 0x00], # 'd'
[0x00, 0x38, 0x54, 0x54, 0x54, 0x48, 0x00, 0x00], # 'e'
[0x00, 0x00, 0x00, 0x7C, 0x0A, 0x02, 0x00, 0x00], # 'f'
[0x00, 0x18, 0xA4, 0xA4, 0xA4, 0xA4, 0x7C, 0x00], # 'g'
[0x00, 0x7E, 0x08, 0x08, 0x08, 0x70, 0x00, 0x00], # 'h'
[0x00, 0x00, 0x00, 0x48, 0x7A, 0x40, 0x00, 0x00], # 'i'
[0x00, 0x00, 0x40, 0x80, 0x80, 0x7A, 0x00, 0x00], # 'j'
[0x00, 0x7E, 0x18, 0x24, 0x40, 0x00, 0x00, 0x00], # 'k'
[0x00, 0x00, 0x00, 0x3E, 0x40, 0x40, 0x00, 0x00], # 'l'
[0x00, 0x7C, 0x04, 0x78, 0x04, 0x78, 0x00, 0x00], # 'm'
[0x00, 0x7C, 0x04, 0x04, 0x04, 0x78, 0x00, 0x00], # 'n'
[0x00, 0x38, 0x44, 0x44, 0x44, 0x38, 0x00, 0x00], # 'o'
[0x00, 0xFC, 0x24, 0x24, 0x24, 0x18, 0x00, 0x00], # 'p'
[0x00, 0x18, 0x24, 0x24, 0x24, 0xFC, 0x80, 0x00], # 'q'
[0x00, 0x00, 0x78, 0x04, 0x04, 0x04, 0x00, 0x00], # 'r'
[0x00, 0x48, 0x54, 0x54, 0x54, 0x20, 0x00, 0x00], # 's'
[0x00, 0x00, 0x04, 0x3E, 0x44, 0x40, 0x00, 0x00], # 't'
[0x00, 0x3C, 0x40, 0x40, 0x40, 0x3C, 0x00, 0x00], # 'u'
[0x00, 0x0C, 0x30, 0x40, 0x30, 0x0C, 0x00, 0x00], # 'v'
[0x00, 0x3C, 0x40, 0x38, 0x40, 0x3C, 0x00, 0x00], # 'w'
[0x00, 0x44, 0x28, 0x10, 0x28, 0x44, 0x00, 0x00], # 'x'
[0x00, 0x1C, 0xA0, 0xA0, 0xA0, 0x7C, 0x00, 0x00], # 'y'
[0x00, 0x44, 0x64, 0x54, 0x4C, 0x44, 0x00, 0x00], # 'z'
[0x00, 0x08, 0x08, 0x76, 0x42, 0x42, 0x00, 0x00], # '{'
[0x00, 0x00, 0x00, 0x7E, 0x00, 0x00, 0x00, 0x00], # '|'
[0x00, 0x42, 0x42, 0x76, 0x08, 0x08, 0x00, 0x00], # '}'
[0x00, 0x00, 0x04, 0x02, 0x04, 0x02, 0x00, 0x00], # '~'
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x7F
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x80
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x81
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x82
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x83
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x84
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x85
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x86
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x87
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x88
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x89
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x8A
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x8B
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x8C
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x8D
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x8E
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x8F
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x90
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x91
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x92
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x93
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x94
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x95
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x96
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x97
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x98
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x99
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x9A
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x9B
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x9C
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x9D
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x9E
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x9F
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA0
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA1
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA2
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA3
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA4
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA5
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA6
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA7
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA8
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA9
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xAA
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xAB
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xAC
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xAD
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xAE
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xAF
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB0
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB1
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB2
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB3
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB4
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB5
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB6
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB7
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB8
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB9
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xBA
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xBB
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xBC
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xBD
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xBE
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xBF
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC0
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC1
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC2
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC3
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC4
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC5
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC6
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC7
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC8
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC9
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xCA
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xCB
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xCC
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xCD
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xCE
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xCF
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD0
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD1
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD2
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD3
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD4
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD5
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD6
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD7
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD8
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD9
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xDA
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xDB
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xDC
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xDD
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xDE
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xDF
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE0
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE1
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE2
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE3
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE4
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE5
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE6
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE7
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE8
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE9
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xEA
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xEB
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xEC
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xED
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xEE
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xEF
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF0
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF1
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF2
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF3
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF4
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF5
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF6
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF7
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF8
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF9
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xFA
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xFB
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xFC
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xFD
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xFE
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xFF
] # end of SINCLAIR_FONT
# -----------------------------------------------------------
# Bit patterns for LCD_FONT
# Source: www.avrfreaks.net/index.php?name=PNphpBB2&file=viewtopic&t=69880
# Transposed by JLCArchibald
# Note: Only contains characters 0x20 - 0x7F inclusive
# All others will appear as blanks
LCD_FONT = [
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x00
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x01
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x02
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x03
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x04
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x05
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x06
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x07
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x08
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x09
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x0A
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x0B
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x0C
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x0D
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x0E
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x0F
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x10
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x11
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x12
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x13
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x14
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x15
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x16
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x17
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x18
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x19
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x1A
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x1B
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x1C
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x1D
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x1E
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x1F
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # ' '
[0x00, 0x00, 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00], # '!'
[0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00], # '"'
[0x14, 0x7f, 0x14, 0x7f, 0x14, 0x00, 0x00, 0x00], # '#'
[0x24, 0x2a, 0x7f, 0x2a, 0x12, 0x00, 0x00, 0x00], # '$'
[0x23, 0x13, 0x08, 0x64, 0x62, 0x00, 0x00, 0x00], # '%'
[0x36, 0x49, 0x55, 0x22, 0x50, 0x00, 0x00, 0x00], # '&'
[0x00, 0x05, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00], # '''
[0x00, 0x1c, 0x22, 0x41, 0x00, 0x00, 0x00, 0x00], # '('
[0x00, 0x41, 0x22, 0x1c, 0x00, 0x00, 0x00, 0x00], # ')'
[0x14, 0x08, 0x3e, 0x08, 0x14, 0x00, 0x00, 0x00], # '*'
[0x08, 0x08, 0x3e, 0x08, 0x08, 0x00, 0x00, 0x00], # '+'
[0x00, 0x50, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00], # '
[0x08, 0x08, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00], # '-'
[0x00, 0x60, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00], # '.'
[0x20, 0x10, 0x08, 0x04, 0x02, 0x00, 0x00, 0x00], # '/'
[0x3e, 0x51, 0x49, 0x45, 0x3e, 0x00, 0x00, 0x00], # '0'
[0x00, 0x42, 0x7f, 0x40, 0x00, 0x00, 0x00, 0x00], # '1'
[0x42, 0x61, 0x51, 0x49, 0x46, 0x00, 0x00, 0x00], # '2'
[0x21, 0x41, 0x45, 0x4b, 0x31, 0x00, 0x00, 0x00], # '3'
[0x18, 0x14, 0x12, 0x7f, 0x10, 0x00, 0x00, 0x00], # '4'
[0x27, 0x45, 0x45, 0x45, 0x39, 0x00, 0x00, 0x00], # '5'
[0x3c, 0x4a, 0x49, 0x49, 0x30, 0x00, 0x00, 0x00], # '6'
[0x01, 0x71, 0x09, 0x05, 0x03, 0x00, 0x00, 0x00], # '7'
[0x36, 0x49, 0x49, 0x49, 0x36, 0x00, 0x00, 0x00], # '8'
[0x06, 0x49, 0x49, 0x29, 0x1e, 0x00, 0x00, 0x00], # '9'
[0x00, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00], # ':'
[0x00, 0x56, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00], # ';'
[0x08, 0x14, 0x22, 0x41, 0x00, 0x00, 0x00, 0x00], # '<'
[0x14, 0x14, 0x14, 0x14, 0x14, 0x00, 0x00, 0x00], # '='
[0x00, 0x41, 0x22, 0x14, 0x08, 0x00, 0x00, 0x00], # '>'
[0x02, 0x01, 0x51, 0x09, 0x06, 0x00, 0x00, 0x00], # '?'
[0x32, 0x49, 0x79, 0x41, 0x3e, 0x00, 0x00, 0x00], # '@'
[0x7e, 0x11, 0x11, 0x11, 0x7e, 0x00, 0x00, 0x00], # 'A'
[0x7f, 0x49, 0x49, 0x49, 0x36, 0x00, 0x00, 0x00], # 'B'
[0x3e, 0x41, 0x41, 0x41, 0x22, 0x00, 0x00, 0x00], # 'C'
[0x7f, 0x41, 0x41, 0x22, 0x1c, 0x00, 0x00, 0x00], # 'D'
[0x7f, 0x49, 0x49, 0x49, 0x41, 0x00, 0x00, 0x00], # 'E'
[0x7f, 0x09, 0x09, 0x09, 0x01, 0x00, 0x00, 0x00], # 'F'
[0x3e, 0x41, 0x49, 0x49, 0x7a, 0x00, 0x00, 0x00], # 'G'
[0x7f, 0x08, 0x08, 0x08, 0x7f, 0x00, 0x00, 0x00], # 'H'
[0x00, 0x41, 0x7f, 0x41, 0x00, 0x00, 0x00, 0x00], # 'I'
[0x20, 0x40, 0x41, 0x3f, 0x01, 0x00, 0x00, 0x00], # 'J'
[0x7f, 0x08, 0x14, 0x22, 0x41, 0x00, 0x00, 0x00], # 'K'
[0x7f, 0x40, 0x40, 0x40, 0x40, 0x00, 0x00, 0x00], # 'L'
[0x7f, 0x02, 0x0c, 0x02, 0x7f, 0x00, 0x00, 0x00], # 'M'
[0x7f, 0x04, 0x08, 0x10, 0x7f, 0x00, 0x00, 0x00], # 'N'
[0x3e, 0x41, 0x41, 0x41, 0x3e, 0x00, 0x00, 0x00], # 'O'
[0x7f, 0x09, 0x09, 0x09, 0x06, 0x00, 0x00, 0x00], # 'P'
[0x3e, 0x41, 0x51, 0x21, 0x5e, 0x00, 0x00, 0x00], # 'Q'
[0x7f, 0x09, 0x19, 0x29, 0x46, 0x00, 0x00, 0x00], # 'R'
[0x46, 0x49, 0x49, 0x49, 0x31, 0x00, 0x00, 0x00], # 'S'
[0x01, 0x01, 0x7f, 0x01, 0x01, 0x00, 0x00, 0x00], # 'T'
[0x3f, 0x40, 0x40, 0x40, 0x3f, 0x00, 0x00, 0x00], # 'U'
[0x1f, 0x20, 0x40, 0x20, 0x1f, 0x00, 0x00, 0x00], # 'V'
[0x3f, 0x40, 0x38, 0x40, 0x3f, 0x00, 0x00, 0x00], # 'W'
[0x63, 0x14, 0x08, 0x14, 0x63, 0x00, 0x00, 0x00], # 'X'
[0x07, 0x08, 0x70, 0x08, 0x07, 0x00, 0x00, 0x00], # 'Y'
[0x61, 0x51, 0x49, 0x45, 0x43, 0x00, 0x00, 0x00], # 'Z'
[0x00, 0x7f, 0x41, 0x41, 0x00, 0x00, 0x00, 0x00], # '['
[0x02, 0x04, 0x08, 0x10, 0x20, 0x00, 0x00, 0x00], # backslash
[0x00, 0x41, 0x41, 0x7f, 0x00, 0x00, 0x00, 0x00], # '
[0x04, 0x02, 0x01, 0x02, 0x04, 0x00, 0x00, 0x00], # '^'
[0x40, 0x40, 0x40, 0x40, 0x40, 0x00, 0x00, 0x00], # '_'
[0x00, 0x01, 0x02, 0x04, 0x00, 0x00, 0x00, 0x00], # '`'
[0x20, 0x54, 0x54, 0x54, 0x78, 0x00, 0x00, 0x00], # 'a'
[0x7f, 0x48, 0x44, 0x44, 0x38, 0x00, 0x00, 0x00], # 'b'
[0x38, 0x44, 0x44, 0x44, 0x20, 0x00, 0x00, 0x00], # 'c'
[0x38, 0x44, 0x44, 0x48, 0x7f, 0x00, 0x00, 0x00], # 'd'
[0x38, 0x54, 0x54, 0x54, 0x18, 0x00, 0x00, 0x00], # 'e'
[0x08, 0x7e, 0x09, 0x01, 0x02, 0x00, 0x00, 0x00], # 'f'
[0x0c, 0x52, 0x52, 0x52, 0x3e, 0x00, 0x00, 0x00], # 'g'
[0x7f, 0x08, 0x04, 0x04, 0x78, 0x00, 0x00, 0x00], # 'h'
[0x00, 0x44, 0x7d, 0x40, 0x00, 0x00, 0x00, 0x00], # 'i'
[0x20, 0x40, 0x44, 0x3d, 0x00, 0x00, 0x00, 0x00], # 'j'
[0x7f, 0x10, 0x28, 0x44, 0x00, 0x00, 0x00, 0x00], # 'k'
[0x00, 0x41, 0x7f, 0x40, 0x00, 0x00, 0x00, 0x00], # 'l'
[0x7c, 0x04, 0x18, 0x04, 0x78, 0x00, 0x00, 0x00], # 'm'
[0x7c, 0x08, 0x04, 0x04, 0x78, 0x00, 0x00, 0x00], # 'n'
[0x38, 0x44, 0x44, 0x44, 0x38, 0x00, 0x00, 0x00], # 'o'
[0x7c, 0x14, 0x14, 0x14, 0x08, 0x00, 0x00, 0x00], # 'p'
[0x08, 0x14, 0x14, 0x18, 0x7c, 0x00, 0x00, 0x00], # 'q'
[0x7c, 0x08, 0x04, 0x04, 0x08, 0x00, 0x00, 0x00], # 'r'
[0x48, 0x54, 0x54, 0x54, 0x20, 0x00, 0x00, 0x00], # 's'
[0x04, 0x3f, 0x44, 0x40, 0x20, 0x00, 0x00, 0x00], # 't'
[0x3c, 0x40, 0x40, 0x20, 0x7c, 0x00, 0x00, 0x00], # 'u'
[0x1c, 0x20, 0x40, 0x20, 0x1c, 0x00, 0x00, 0x00], # 'v'
[0x3c, 0x40, 0x30, 0x40, 0x3c, 0x00, 0x00, 0x00], # 'w'
[0x44, 0x28, 0x10, 0x28, 0x44, 0x00, 0x00, 0x00], # 'x'
[0x0c, 0x50, 0x50, 0x50, 0x3c, 0x00, 0x00, 0x00], # 'y'
[0x44, 0x64, 0x54, 0x4c, 0x44, 0x00, 0x00, 0x00], # 'z'
[0x00, 0x08, 0x36, 0x41, 0x00, 0x00, 0x00, 0x00], # '{'
[0x00, 0x00, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00], # '|'
[0x00, 0x41, 0x36, 0x08, 0x00, 0x00, 0x00, 0x00], # '}'
[0x10, 0x08, 0x08, 0x10, 0x08, 0x00, 0x00, 0x00], # '~'
[0x00, 0x00, 0x02, 0x05, 0x02, 0x00, 0x00, 0x00], # 0x7F
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x80
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x81
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x82
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x83
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x84
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x85
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x86
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x87
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x88
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x89
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x8A
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x8B
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x8C
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x8D
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x8E
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x8F
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x90
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x91
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x92
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x93
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x94
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x95
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x96
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x97
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x98
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x99
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x9A
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x9B
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x9C
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x9D
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x9E
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x9F
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA0
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA1
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA2
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA3
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA4
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA5
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA6
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA7
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA8
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xA9
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xAA
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xAB
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xAC
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xAD
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xAE
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xAF
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB0
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB1
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB2
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB3
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB4
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB5
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB6
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB7
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB8
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xB9
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xBA
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xBB
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xBC
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xBD
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xBE
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xBF
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC0
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC1
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC2
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC3
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC4
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC5
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC6
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC7
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC8
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xC9
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xCA
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xCB
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xCC
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xCD
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xCE
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xCF
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD0
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD1
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD2
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD3
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD4
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD5
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD6
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD7
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD8
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xD9
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xDA
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xDB
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xDC
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xDD
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xDE
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xDF
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE0
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE1
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE2
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE3
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE4
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE5
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE6
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE7
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE8
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xE9
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xEA
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xEB
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xEC
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xED
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xEE
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xEF
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF0
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF1
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF2
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF3
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF4
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF5
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF6
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF7
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF8
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xF9
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xFA
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xFB
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xFC
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xFD
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xFE
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0xFF
] # end of LCD_FONT
# -----------------------------------------------------------
# bit patterns for the Cyrillic Ukrainian font
# Transposed by Taras (@tarasius)
UKR_FONT = [
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x00
[0x7E, 0x81, 0x95, 0xB1, 0xB1, 0x95, 0x81, 0x7E], # 0x01
[0x7E, 0xFF, 0xEB, 0xCF, 0xCF, 0xEB, 0xFF, 0x7E], # 0x02
[0x0E, 0x1F, 0x3F, 0x7E, 0x3F, 0x1F, 0x0E, 0x00], # 0x03
[0x08, 0x1C, 0x3E, 0x7F, 0x3E, 0x1C, 0x08, 0x00], # 0x04
[0x18, 0xBA, 0xFF, 0xFF, 0xFF, 0xBA, 0x18, 0x00], # 0x05
[0x10, 0xB8, 0xFC, 0xFF, 0xFC, 0xB8, 0x10, 0x00], # 0x06
[0x00, 0x00, 0x18, 0x3C, 0x3C, 0x18, 0x00, 0x00], # 0x07
[0xFF, 0xFF, 0xE7, 0xC3, 0xC3, 0xE7, 0xFF, 0xFF], # 0x08
[0x00, 0x3C, 0x66, 0x42, 0x42, 0x66, 0x3C, 0x00], # 0x09
[0xFF, 0xC3, 0x99, 0xBD, 0xBD, 0x99, 0xC3, 0xFF], # 0x0A
[0x70, 0xF8, 0x88, 0x88, 0xFD, 0x7F, 0x07, 0x0F], # 0x0B
[0x00, 0x4E, 0x5F, 0xF1, 0xF1, 0x5F, 0x4E, 0x00], # 0x0C
[0xC0, 0xE0, 0xFF, 0x7F, 0x05, 0x05, 0x07, 0x07], # 0x0D
[0xC0, 0xFF, 0x7F, 0x05, 0x05, 0x65, 0x7F, 0x3F], # 0x0E
[0x99, 0x5A, 0x3C, 0xE7, 0xE7, 0x3C, 0x5A, 0x99], # 0x0F
[0x7F, 0x3E, 0x3E, 0x1C, 0x1C, 0x08, 0x08, 0x00], # 0x10
[0x08, 0x08, 0x1C, 0x1C, 0x3E, 0x3E, 0x7F, 0x00], # 0x11
[0x00, 0x24, 0x66, 0xFF, 0xFF, 0x66, 0x24, 0x00], # 0x12
[0x00, 0x5F, 0x5F, 0x00, 0x00, 0x5F, 0x5F, 0x00], # 0x13
[0x06, 0x0F, 0x09, 0x7F, 0x7F, 0x01, 0x7F, 0x7F], # 0x14
[0x40, 0xDA, 0xBF, 0xA5, 0xFD, 0x59, 0x03, 0x02], # 0x15
[0x00, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x00], # 0x16
[0x80, 0x94, 0xB6, 0xFF, 0xFF, 0xB6, 0x94, 0x80], # 0x17
[0x00, 0x04, 0x06, 0x7F, 0x7F, 0x06, 0x04, 0x00], # 0x18
[0x00, 0x10, 0x30, 0x7F, 0x7F, 0x30, 0x10, 0x00], # 0x19
[0x08, 0x08, 0x08, 0x2A, 0x3E, 0x1C, 0x08, 0x00], # 0x1A
[0x08, 0x1C, 0x3E, 0x2A, 0x08, 0x08, 0x08, 0x00], # 0x1B
[0x3C, 0x3C, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00], # 0x1C
[0x08, 0x1C, 0x3E, 0x08, 0x08, 0x3E, 0x1C, 0x08], # 0x1D
[0x30, 0x38, 0x3C, 0x3E, 0x3E, 0x3C, 0x38, 0x30], # 0x1E
[0x06, 0x0E, 0x1E, 0x3E, 0x3E, 0x1E, 0x0E, 0x06], # 0x1F
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # 0x20
[0x06, 0x5F, 0x5F, 0x06, 0x00, 0x00, 0x00, 0x00], # 0x21
[0x07, 0x07, 0x00, 0x07, 0x07, 0x00, 0x00, 0x00], # 0x22
[0x12, 0x3F, 0x3F, 0x12, 0x3F, 0x3F, 0x12, 0x00], # 0x23
[0x24, 0x2E, 0x2A, 0x6B, 0x6B, 0x3A, 0x12, 0x00], # 0x24
[0x46, 0x66, 0x30, 0x18, 0x0C, 0x66, 0x62, 0x00], # 0x25
[0x30, 0x7A, 0x4F, 0x55, 0x3F, 0x7A, 0x48, 0x00], # 0x26
[0x00, 0x04, 0x07, 0x03, 0x00, 0x00, 0x00, 0x00], # 0x27
[0x1C, 0x3E, 0x63, 0x41, 0x00, 0x00, 0x00, 0x00], # 0x28
[0x00, 0x41, 0x63, 0x3E, 0x1C, 0x00, 0x00, 0x00], # 0x29
[0x2A, 0x3E, 0x1C, 0x1C, 0x3E, 0x2A, 0x00, 0x00], # 0x2A
[0x08, 0x08, 0x3E, 0x3E, 0x08, 0x08, 0x00, 0x00], # 0x2B
[0x00, 0x80, 0xE0, 0x60, 0x00, 0x00, 0x00, 0x00], # 0x2C
[0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00, 0x00], # 0x2D
[0x00, 0x00, 0x60, 0x60, 0x00, 0x00, 0x00, 0x00], # 0x2E
[0x60, 0x30, 0x18, 0x0C, 0x06, 0x03, 0x01, 0x00], # 0x2F
[0x3E, 0x7F, 0x71, 0x59, 0x4D, 0x7F, 0x3E, 0x00], # 0x30
[0x40, 0x42, 0x7F, 0x7F, 0x40, 0x40, 0x00, 0x00], # 0x31
[0x62, 0x73, 0x59, 0x49, 0x6F, 0x66, 0x00, 0x00], # 0x32
[0x22, 0x63, 0x49, 0x49, 0x7F, 0x36, 0x00, 0x00], # 0x33
[0x18, 0x1C, 0x16, 0x53, 0x7F, 0x7F, 0x50, 0x00], # 0x34
[0x27, 0x67, 0x45, 0x45, 0x7D, 0x39, 0x00, 0x00], # 0x35
[0x3C, 0x7E, 0x4B, 0x49, 0x79, 0x30, 0x00, 0x00], # 0x36
[0x03, 0x03, 0x71, 0x79, 0x0F, 0x07, 0x00, 0x00], # 0x37
[0x36, 0x7F, 0x49, 0x49, 0x7F, 0x36, 0x00, 0x00], # 0x38
[0x06, 0x4F, 0x49, 0x69, 0x3F, 0x1E, 0x00, 0x00], # 0x39
[0x00, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00], # 0x3A
[0x00, 0x80, 0xE6, 0x66, 0x00, 0x00, 0x00, 0x00], # 0x3B
[0x08, 0x1C, 0x36, 0x63, 0x41, 0x00, 0x00, 0x00], # 0x3C
[0x24, 0x24, 0x24, 0x24, 0x24, 0x24, 0x00, 0x00], # 0x3D
[0x00, 0x41, 0x63, 0x36, 0x1C, 0x08, 0x00, 0x00], # 0x3E
[0x02, 0x03, 0x51, 0x59, 0x0F, 0x06, 0x00, 0x00], # 0x3F
[0x3E, 0x7F, 0x41, 0x5D, 0x5D, 0x1F, 0x1E, 0x00], # 0x40
[0x7C, 0x7E, 0x13, 0x13, 0x7E, 0x7C, 0x00, 0x00], # 0x41
[0x41, 0x7F, 0x7F, 0x49, 0x49, 0x7F, 0x36, 0x00], # 0x42
[0x1C, 0x3E, 0x63, 0x41, 0x41, 0x63, 0x22, 0x00], # 0x43
[0x41, 0x7F, 0x7F, 0x41, 0x63, 0x3E, 0x1C, 0x00], # 0x44
[0x41, 0x7F, 0x7F, 0x49, 0x5D, 0x41, 0x63, 0x00], # 0x45
[0x41, 0x7F, 0x7F, 0x49, 0x1D, 0x01, 0x03, 0x00], # 0x46
[0x1C, 0x3E, 0x63, 0x41, 0x51, 0x73, 0x72, 0x00], # 0x47
[0x7F, 0x7F, 0x08, 0x08, 0x7F, 0x7F, 0x00, 0x00], # 0x48
[0x00, 0x41, 0x7F, 0x7F, 0x41, 0x00, 0x00, 0x00], # 0x49
[0x30, 0x70, 0x40, 0x41, 0x7F, 0x3F, 0x01, 0x00], # 0x4A
[0x41, 0x7F, 0x7F, 0x08, 0x1C, 0x77, 0x63, 0x00], # 0x4B
[0x41, 0x7F, 0x7F, 0x41, 0x40, 0x60, 0x70, 0x00], # 0x4C
[0x7F, 0x7F, 0x0E, 0x1C, 0x0E, 0x7F, 0x7F, 0x00], # 0x4D
[0x7F, 0x7F, 0x06, 0x0C, 0x18, 0x7F, 0x7F, 0x00], # 0x4E
[0x1C, 0x3E, 0x63, 0x41, 0x63, 0x3E, 0x1C, 0x00], # 0x4F
[0x41, 0x7F, 0x7F, 0x49, 0x09, 0x0F, 0x06, 0x00], # 0x50
[0x1E, 0x3F, 0x21, 0x71, 0x7F, 0x5E, 0x00, 0x00], # 0x51
[0x41, 0x7F, 0x7F, 0x09, 0x19, 0x7F, 0x66, 0x00], # 0x52
[0x26, 0x6F, 0x4D, 0x59, 0x73, 0x32, 0x00, 0x00], # 0x53
[0x03, 0x41, 0x7F, 0x7F, 0x41, 0x03, 0x00, 0x00], # 0x54
[0x7F, 0x7F, 0x40, 0x40, 0x7F, 0x7F, 0x00, 0x00], # 0x55
[0x1F, 0x3F, 0x60, 0x60, 0x3F, 0x1F, 0x00, 0x00], # 0x56
[0x7F, 0x7F, 0x30, 0x18, 0x30, 0x7F, 0x7F, 0x00], # 0x57
[0x43, 0x67, 0x3C, 0x18, 0x3C, 0x67, 0x43, 0x00], # 0x58
[0x07, 0x4F, 0x78, 0x78, 0x4F, 0x07, 0x00, 0x00], # 0x59
[0x47, 0x63, 0x71, 0x59, 0x4D, 0x67, 0x73, 0x00], # 0x5A
[0x00, 0x7F, 0x7F, 0x41, 0x41, 0x00, 0x00, 0x00], # 0x5B
[0x01, 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x00], # 0x5C
[0x00, 0x41, 0x41, 0x7F, 0x7F, 0x00, 0x00, 0x00], # 0x5D
[0x08, 0x0C, 0x06, 0x03, 0x06, 0x0C, 0x08, 0x00], # 0x5E
[0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80], # 0x5F
[0x00, 0x00, 0x03, 0x07, 0x04, 0x00, 0x00, 0x00], # 0x60
[0x20, 0x74, 0x54, 0x54, 0x3C, 0x78, 0x40, 0x00], # 0x61
[0x41, 0x7F, 0x3F, 0x48, 0x48, 0x78, 0x30, 0x00], # 0x62
[0x38, 0x7C, 0x44, 0x44, 0x6C, 0x28, 0x00, 0x00], # 0x63
[0x30, 0x78, 0x48, 0x49, 0x3F, 0x7F, 0x40, 0x00], # 0x64
[0x38, 0x7C, 0x54, 0x54, 0x5C, 0x18, 0x00, 0x00], # 0x65
[0x48, 0x7E, 0x7F, 0x49, 0x03, 0x02, 0x00, 0x00], # 0x66
[0x98, 0xBC, 0xA4, 0xA4, 0xF8, 0x7C, 0x04, 0x00], # 0x67
[0x41, 0x7F, 0x7F, 0x08, 0x04, 0x7C, 0x78, 0x00], # 0x68
[0x00, 0x44, 0x7D, 0x7D, 0x40, 0x00, 0x00, 0x00], # 0x69
[0x60, 0xE0, 0x80, 0x80, 0xFD, 0x7D, 0x00, 0x00], # 0x6A
[0x41, 0x7F, 0x7F, 0x10, 0x38, 0x6C, 0x44, 0x00], # 0x6B
[0x00, 0x41, 0x7F, 0x7F, 0x40, 0x00, 0x00, 0x00], # 0x6C
[0x7C, 0x7C, 0x18, 0x38, 0x1C, 0x7C, 0x78, 0x00], # 0x6D
[0x7C, 0x7C, 0x04, 0x04, 0x7C, 0x78, 0x00, 0x00], # 0x6E
[0x38, 0x7C, 0x44, 0x44, 0x7C, 0x38, 0x00, 0x00], # 0x6F
[0x84, 0xFC, 0xF8, 0xA4, 0x24, 0x3C, 0x18, 0x00], # 0x70
[0x18, 0x3C, 0x24, 0xA4, 0xF8, 0xFC, 0x84, 0x00], # 0x71
[0x44, 0x7C, 0x78, 0x4C, 0x04, 0x1C, 0x18, 0x00], # 0x72
[0x48, 0x5C, 0x54, 0x54, 0x74, 0x24, 0x00, 0x00], # 0x73
[0x00, 0x04, 0x3E, 0x7F, 0x44, 0x24, 0x00, 0x00], # 0x74
[0x3C, 0x7C, 0x40, 0x40, 0x3C, 0x7C, 0x40, 0x00], # 0x75
[0x1C, 0x3C, 0x60, 0x60, 0x3C, 0x1C, 0x00, 0x00], # 0x76
[0x3C, 0x7C, 0x70, 0x38, 0x70, 0x7C, 0x3C, 0x00], # 0x77
[0x44, 0x6C, 0x38, 0x10, 0x38, 0x6C, 0x44, 0x00], # 0x78
[0x9C, 0xBC, 0xA0, 0xA0, 0xFC, 0x7C, 0x00, 0x00], # 0x79
[0x4C, 0x64, 0x74, 0x5C, 0x4C, 0x64, 0x00, 0x00], # 0x7A
[0x08, 0x08, 0x3E, 0x77, 0x41, 0x41, 0x00, 0x00], # 0x7B
[0x00, 0x00, 0x00, 0x77, 0x77, 0x00, 0x00, 0x00], # 0x7C
[0x41, 0x41, 0x77, 0x3E, 0x08, 0x08, 0x00, 0x00], # 0x7D
[0x02, 0x03, 0x01, 0x03, 0x02, 0x03, 0x01, 0x00], # 0x7E
[0x70, 0x78, 0x4C, 0x46, 0x4C, 0x78, 0x70, 0x00], # 0x7F
[0x00, 0x7C, 0x7E, 0x13, 0x11, 0x7F, 0x7F, 0x00], # 0x80
[0x00, 0x7F, 0x7F, 0x49, 0x49, 0x79, 0x30, 0x00], # 0x81
[0x00, 0x7F, 0x7F, 0x49, 0x49, 0x7F, 0x36, 0x00], # 0x82
[0x00, 0x7F, 0x7F, 0x01, 0x01, 0x01, 0x01, 0x00], # 0x83
[0xC0, 0xFE, 0x7F, 0x41, 0x7F, 0xFE, 0xC0, 0x00], # 0x84
[0x00, 0x7F, 0x7F, 0x49, 0x49, 0x49, 0x41, 0x00], # 0x85
[0x63, 0x77, 0x1C, 0x7F, 0x7F, 0x1C, 0x77, 0x63], # 0x86
[0x49, 0x49, 0x49, 0x49, 0x49, 0x49, 0x49, 0x00], # 0x87
[0x00, 0x7F, 0x7F, 0x18, 0x0C, 0x7F, 0x7F, 0x00], # 0x88
[0x00, 0x7E, 0x7F, 0x19, 0x0D, 0x7F, 0x7E, 0x00], # 0x89
[0x00, 0x7F, 0x7F, 0x1C, 0x36, 0x63, 0x41, 0x00], # 0x8A
[0x00, 0x7C, 0x7E, 0x03, 0x01, 0x7F, 0x7F, 0x00], # 0x8B
[0x7F, 0x7F, 0x0E, 0x1C, 0x0E, 0x7F, 0x7F, 0x00], # 0x8C
[0x00, 0x7F, 0x7F, 0x08, 0x08, 0x7F, 0x7F, 0x00], # 0x8D
[0x00, 0x3E, 0x7F, 0x41, 0x41, 0x7F, 0x3E, 0x00], # 0x8E
[0x00, 0x7F, 0x7F, 0x01, 0x01, 0x7F, 0x7F, 0x00], # 0x8F
[0x7E, 0x81, 0x95, 0xA1, 0xA1, 0x95, 0x81, 0x7E], # 0x90
[0x00, 0x3E, 0x7F, 0x41, 0x41, 0x63, 0x22, 0x00], # 0x91
[0x00, 0x01, 0x01, 0x7F, 0x7F, 0x01, 0x01, 0x00], # 0x92
[0x00, 0x27, 0x6F, 0x48, 0x48, 0x7F, 0x3F, 0x00], # 0x93
[0x0E, 0x1F, 0x11, 0x7F, 0x7F, 0x11, 0x1F, 0x0E], # 0x94
[0x00, 0x1C, 0x3E, 0x3E, 0x3E, 0x1C, 0x00, 0x00], # 0x95
[0x00, 0x7F, 0x7F, 0x40, 0x40, 0x7F, 0xFF, 0xC0], # 0x96
[0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08], # 0x97
[0x7F, 0x7F, 0x40, 0x7F, 0x7F, 0x40, 0x7F, 0x7F], # 0x98
[0x7F, 0x7F, 0x40, 0x7F, 0x7F, 0x40, 0xFF, 0xFF], # 0x99
[0x01, 0x7F, 0x7F, 0x48, 0x48, 0x78, 0x30, 0x00], # 0x9A
[0x7F, 0x7F, 0x48, 0x78, 0x30, 0x7F, 0x7F, 0x00], # 0x9B
[0x00, 0x7F, 0x7F, 0x48, 0x48, 0x78, 0x30, 0x00], # 0x9C
[0x22, 0x41, 0x49, 0x49, 0x6B, 0x3E, 0x1C, 0x00], # 0x9D
[0x7F, 0x7F, 0x08, 0x3E, 0x7F, 0x41, 0x7F, 0x3E], # 0x9E
[0x00, 0x4E, 0x7F, 0x31, 0x11, 0x7F, 0x7F, 0x00], # 0x9F
[0x20, 0x74, 0x54, 0x54, 0x3C, 0x78, 0x40, 0x00], # 0xA0
[0x00, 0x34, 0x7E, 0x4A, 0x4A, 0x7A, 0x30, 0x00], # 0xA1
[0x00, 0x7C, 0x7C, 0x54, 0x54, 0x7C, 0x28, 0x00], # 0xA2
[0x00, 0x7C, 0x7C, 0x04, 0x04, 0x04, 0x04, 0x00], # 0xA3
[0xC0, 0xF8, 0x7C, 0x44, 0x7C, 0xFC, 0xC0, 0x00], # 0xA4
[0x00, 0x38, 0x7C, 0x54, 0x54, 0x5C, 0x18, 0x00], # 0xA5
[0x44, 0x6C, 0x38, 0x7C, 0x7C, 0x38, 0x6C, 0x44], # 0xA6
[0x00, 0x28, 0x6C, 0x44, 0x54, 0x7C, 0x28, 0x00], # 0xA7
[0x00, 0x7F, 0x7F, 0x49, 0x49, 0x49, 0x49, 0x00], # 0xA8
[0x00, 0x7C, 0x7C, 0x32, 0x1A, 0x7C, 0x7C, 0x00], # 0xA9
[0x00, 0x7C, 0x7C, 0x10, 0x38, 0x6C, 0x44, 0x00], # 0xAA
[0x00, 0x70, 0x78, 0x0C, 0x04, 0x7C, 0x7C, 0x00], # 0xAB
[0x7C, 0x7C, 0x18, 0x38, 0x18, 0x7C, 0x7C, 0x00], # 0xAC
[0x00, 0x7C, 0x7C, 0x10, 0x10, 0x7C, 0x7C, 0x00], # 0xAD
[0x00, 0x38, 0x7C, 0x44, 0x44, 0x7C, 0x38, 0x00], # 0xAE
[0x00, 0x7C, 0x7C, 0x04, 0x04, 0x7C, 0x7C, 0x00], # 0xAF
[0x00, 0xAA, 0x00, 0x55, 0x00, 0xAA, 0x00, 0x55], # 0xB0
[0xAA, 0x55, 0xAA, 0x55, 0xAA, 0x55, 0xAA, 0x55], # 0xB1
[0x55, 0xFF, 0xAA, 0xFF, 0x55, 0xFF, 0xAA, 0xFF], # 0xB2
[0x00, 0x00, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00], # 0xB3
[0x08, 0x08, 0x08, 0xFF, 0xFF, 0x00, 0x00, 0x00], # 0xB4
[0x0A, 0x0A, 0x0A, 0xFF, 0xFF, 0x00, 0x00, 0x00], # 0xB5
[0x08, 0x08, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0x00], # 0xB6
[0x08, 0x08, 0xF8, 0xF8, 0x08, 0xF8, 0xF8, 0x00], # 0xB7
[0x00, 0x38, 0x7D, 0x54, 0x54, 0x5D, 0x18, 0x00], # 0xB8
[0x7F, 0x04, 0x08, 0x10, 0x7F, 0x01, 0x01, 0x00], # 0xB9
[0x00, 0x00, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0x00], # 0xBA
[0x0A, 0x0A, 0xFA, 0xFA, 0x02, 0xFE, 0xFE, 0x00], # 0xBB
[0x0A, 0x0A, 0x0B, 0x0B, 0x08, 0x0F, 0x0F, 0x00], # 0xBC
[0x08, 0x08, 0x0F, 0x0F, 0x08, 0x0F, 0x0F, 0x00], # 0xBD
[0x0A, 0x0A, 0x0A, 0x0F, 0x0F, 0x00, 0x00, 0x00], # 0xBE
[0x08, 0x08, 0x08, 0xF8, 0xF8, 0x00, 0x00, 0x00], # 0xBF
[0x00, 0x7C, 0x7E, 0x13, 0x11, 0x7F, 0x7F, 0x00], # 0xC0
[0x00, 0x7F, 0x7F, 0x49, 0x49, 0x79, 0x30, 0x00], # 0xC1
[0x00, 0x7F, 0x7F, 0x49, 0x49, 0x7F, 0x36, 0x00], # 0xC2
[0x00, 0x7F, 0x7F, 0x01, 0x01, 0x01, 0x01, 0x00], # 0xC3
[0xC0, 0xFE, 0x7F, 0x41, 0x7F, 0xFE, 0xC0, 0x00], # 0xC4
[0x00, 0x7F, 0x7F, 0x49, 0x49, 0x49, 0x41, 0x00], # 0xC5
[0x63, 0x77, 0x1C, 0x7F, 0x7F, 0x1C, 0x77, 0x63], # 0xC6
[0x00, 0x22, 0x63, 0x49, 0x49, 0x7F, 0x36, 0x00], # 0xC7
[0x00, 0x7F, 0x7F, 0x18, 0x0C, 0x7F, 0x7F, 0x00], # 0xC8
[0x00, 0x7E, 0x7F, 0x19, 0x0D, 0x7F, 0x7E, 0x00], # 0xC9
[0x00, 0x7F, 0x7F, 0x1C, 0x36, 0x63, 0x41, 0x00], # 0xCA
[0x00, 0x7C, 0x7E, 0x03, 0x01, 0x7F, 0x7F, 0x00], # 0xCB
[0x7F, 0x7F, 0x0E, 0x1C, 0x0E, 0x7F, 0x7F, 0x00], # 0xCC
[0x00, 0x7F, 0x7F, 0x08, 0x08, 0x7F, 0x7F, 0x00], # 0xCD
[0x00, 0x3E, 0x7F, 0x41, 0x41, 0x7F, 0x3E, 0x00], # 0xCE
[0x00, 0x7F, 0x7F, 0x01, 0x01, 0x7F, 0x7F, 0x00], # 0xCF
[0x00, 0x7F, 0x7F, 0x11, 0x11, 0x1F, 0x0E, 0x00], # 0xD0
[0x00, 0x3E, 0x7F, 0x41, 0x41, 0x63, 0x22, 0x00], # 0xD1
[0x00, 0x01, 0x01, 0x7F, 0x7F, 0x01, 0x01, 0x00], # 0xD2
[0x00, 0x27, 0x6F, 0x48, 0x48, 0x7F, 0x3F, 0x00], # 0xD3
[0x0E, 0x1F, 0x11, 0x7F, 0x7F, 0x11, 0x1F, 0x0E], # 0xD4
[0x00, 0x63, 0x77, 0x1C, 0x1C, 0x77, 0x63, 0x00], # 0xD5
[0x00, 0x7F, 0x7F, 0x40, 0x40, 0x7F, 0xFF, 0xC0], # 0xD6
[0x00, 0x07, 0x0F, 0x08, 0x08, 0x7F, 0x7F, 0x00], # 0xD7
[0x7F, 0x7F, 0x40, 0x7F, 0x7F, 0x40, 0x7F, 0x7F], # 0xD8
[0x7F, 0x7F, 0x40, 0x7F, 0x7F, 0x40, 0xFF, 0xFF], # 0xD9
[0x01, 0x7F, 0x7F, 0x48, 0x48, 0x78, 0x30, 0x00], # 0xDA
[0x7F, 0x7F, 0x48, 0x78, 0x30, 0x7F, 0x7F, 0x00], # 0xDB
[0x00, 0x7F, 0x7F, 0x48, 0x48, 0x78, 0x30, 0x00], # 0xDC
[0x22, 0x41, 0x49, 0x49, 0x6B, 0x3E, 0x1C, 0x00], # 0xDD
[0x7F, 0x7F, 0x08, 0x3E, 0x7F, 0x41, 0x7F, 0x3E], # 0xDE
[0x00, 0x4E, 0x7F, 0x31, 0x11, 0x7F, 0x7F, 0x00], # 0xDF
[0x20, 0x74, 0x54, 0x54, 0x3C, 0x78, 0x40, 0x00], # 0xE0
[0x00, 0x34, 0x7E, 0x4A, 0x4A, 0x7A, 0x30, 0x00], # 0xE1
[0x00, 0x7C, 0x7C, 0x54, 0x54, 0x7C, 0x28, 0x00], # 0xE2
[0x00, 0x7C, 0x7C, 0x04, 0x04, 0x04, 0x04, 0x00], # 0xE3
[0xC0, 0xF8, 0x7C, 0x44, 0x7C, 0xFC, 0xC0, 0x00], # 0xE4
[0x00, 0x38, 0x7C, 0x54, 0x54, 0x5C, 0x18, 0x00], # 0xE5
[0x44, 0x6C, 0x38, 0x7C, 0x7C, 0x38, 0x6C, 0x44], # 0xE6
[0x00, 0x28, 0x6C, 0x44, 0x54, 0x7C, 0x28, 0x00], # 0xE7
[0x00, 0x7C, 0x7C, 0x30, 0x18, 0x7C, 0x7C, 0x00], # 0xE8
[0x00, 0x7C, 0x7C, 0x32, 0x1A, 0x7C, 0x7C, 0x00], # 0xE9
[0x00, 0x7C, 0x7C, 0x10, 0x38, 0x6C, 0x44, 0x00], # 0xEA
[0x00, 0x70, 0x78, 0x0C, 0x04, 0x7C, 0x7C, 0x00], # 0xEB
[0x7C, 0x7C, 0x18, 0x38, 0x18, 0x7C, 0x7C, 0x00], # 0xEC
[0x00, 0x7C, 0x7C, 0x10, 0x10, 0x7C, 0x7C, 0x00], # 0xED
[0x00, 0x38, 0x7C, 0x44, 0x44, 0x7C, 0x38, 0x00], # 0xEE
[0x00, 0x7C, 0x7C, 0x04, 0x04, 0x7C, 0x7C, 0x00], # 0xEF
[0x00, 0x7C, 0x7C, 0x24, 0x24, 0x3C, 0x18, 0x00], # 0xF0
[0x00, 0x38, 0x7C, 0x44, 0x44, 0x6C, 0x28, 0x00], # 0xF1
[0x00, 0x04, 0x04, 0x7C, 0x7C, 0x04, 0x04, 0x00], # 0xF2
[0x00, 0x0C, 0x5C, 0x50, 0x50, 0x7C, 0x3C, 0x00], # 0xF3
[0x18, 0x3C, 0x24, 0x7C, 0x7C, 0x24, 0x3C, 0x18], # 0xF4
[0x00, 0x44, 0x6C, 0x38, 0x38, 0x6C, 0x44, 0x00], # 0xF5
[0x00, 0x7C, 0x7C, 0x40, 0x40, 0x7C, 0xFC, 0xC0], # 0xF6
[0x00, 0x0C, 0x1C, 0x10, 0x10, 0x7C, 0x7C, 0x00], # 0xF7
[0x7C, 0x7C, 0x40, 0x7C, 0x7C, 0x40, 0x7C, 0x7C], # 0xF8
[0x7C, 0x7C, 0x40, 0x7C, 0x7C, 0x40, 0xFC, 0xFC], # 0xF9
[0x04, 0x7C, 0x7C, 0x50, 0x50, 0x70, 0x20, 0x00], # 0xFA
[0x7C, 0x7C, 0x50, 0x70, 0x20, 0x7C, 0x7C, 0x00], # 0xFB
[0x00, 0x7C, 0x7C, 0x50, 0x50, 0x70, 0x20, 0x00], # 0xFC
[0x00, 0x44, 0x54, 0x54, 0x54, 0x7C, 0x38, 0x00], # 0xFD
[0x7C, 0x7C, 0x10, 0x38, 0x7C, 0x44, 0x7C, 0x38], # 0xFE
[0x00, 0x48, 0x7C, 0x34, 0x14, 0x7C, 0x7C, 0x00], # 0xFF
] # end of UKR_FONT
# -----------------------------------------------------------
# Bit patterns for TINY_FONT
# Source: http://www.dafont.com/tiny.font
# Transposed by <NAME>
# Note: Only contains characters 0x20 - 0x7F inclusive
# All others will appear as blanks
TINY_FONT = [
[0x00, 0x00, 0x00, 0x00], # 0x00
[0x00, 0x00, 0x00, 0x00], # 0x01
[0x00, 0x00, 0x00, 0x00], # 0x02
[0x00, 0x00, 0x00, 0x00], # 0x03
[0x00, 0x00, 0x00, 0x00], # 0x04
[0x00, 0x00, 0x00, 0x00], # 0x05
[0x00, 0x00, 0x00, 0x00], # 0x06
[0x00, 0x00, 0x00, 0x00], # 0x07
[0x00, 0x00, 0x00, 0x00], # 0x08
[0x00, 0x00, 0x00, 0x00], # 0x09
[0x00, 0x00, 0x00, 0x00], # 0x0A
[0x00, 0x00, 0x00, 0x00], # 0x0B
[0x00, 0x00, 0x00, 0x00], # 0x0C
[0x00, 0x00, 0x00, 0x00], # 0x0D
[0x00, 0x00, 0x00, 0x00], # 0x0E
[0x00, 0x00, 0x00, 0x00], # 0x0F
[0x00, 0x00, 0x00, 0x00], # 0x10
[0x00, 0x00, 0x00, 0x00], # 0x11
[0x00, 0x00, 0x00, 0x00], # 0x12
[0x00, 0x00, 0x00, 0x00], # 0x13
[0x00, 0x00, 0x00, 0x00], # 0x14
[0x00, 0x00, 0x00, 0x00], # 0x15
[0x00, 0x00, 0x00, 0x00], # 0x16
[0x00, 0x00, 0x00, 0x00], # 0x17
[0x00, 0x00, 0x00, 0x00], # 0x18
[0x00, 0x00, 0x00, 0x00], # 0x19
[0x00, 0x00, 0x00, 0x00], # 0x1A
[0x00, 0x00, 0x00, 0x00], # 0x1B
[0x00, 0x00, 0x00, 0x00], # 0x1C
[0x00, 0x00, 0x00, 0x00], # 0x1D
[0x00, 0x00, 0x00, 0x00], # 0x1E
[0x00, 0x00, 0x00, 0x00], # 0x1F
[0x00, 0x00, 0x00, 0x00], # ' '
[0x00, 0x2E, 0x00, 0x00], # '!'
[0x06, 0x00, 0x06, 0x00], # '"'
[0x3E, 0x14, 0x3E, 0x00], # '#'
[0x14, 0x3E, 0x14, 0x00], # '$'
[0x34, 0x08, 0x16, 0x00], # '%'
[0x34, 0x2A, 0x3A, 0x00], # '&'
[0x00, 0x06, 0x00, 0x00], # '''
[0x1C, 0x22, 0x00, 0x00], # '('
[0x00, 0x22, 0x1C, 0x00], # ')'
[0x14, 0x08, 0x14, 0x00], # '*'
[0x08, 0x1C, 0x08, 0x00], # '+'
[0x00, 0x30, 0x00, 0x00], # ','
[0x08, 0x08, 0x08, 0x00], # '-'
[0x00, 0x20, 0x00, 0x00], # '.'
[0x30, 0x08, 0x06, 0x00], # '/'
[0x1C, 0x22, 0x1C, 0x00], # '0'
[0x24, 0x3E, 0x20, 0x00], # '1'
[0x32, 0x2A, 0x24, 0x00], # '2'
[0x22, 0x2A, 0x14, 0x00], # '3'
[0x0E, 0x08, 0x3E, 0x00], # '4'
[0x2E, 0x2A, 0x12, 0x00], # '5'
[0x3E, 0x2A, 0x3A, 0x00], # '6'
[0x02, 0x3A, 0x06, 0x00], # '7'
[0x3E, 0x2A, 0x3E, 0x00], # '8'
[0x2E, 0x2A, 0x3E, 0x00], # '9'
[0x00, 0x14, 0x00, 0x00], # ':'
[0x00, 0x34, 0x00, 0x00], # ';'
[0x08, 0x14, 0x22, 0x00], # '<'
[0x14, 0x14, 0x14, 0x00], # '='
[0x22, 0x14, 0x08, 0x00], # '>'
[0x02, 0x2A, 0x06, 0x00], # '?'
[0x1C, 0x2A, 0x1C, 0x00], # '@'
[0x3C, 0x0A, 0x3C, 0x00], # 'A'
[0x3E, 0x2A, 0x14, 0x00], # 'B'
[0x1C, 0x22, 0x22, 0x00], # 'C'
[0x3E, 0x22, 0x1C, 0x00], # 'D'
[0x3E, 0x2A, 0x22, 0x00], # 'E'
[0x3E, 0x0A, 0x02, 0x00], # 'F'
[0x1C, 0x22, 0x3A, 0x00], # 'G'
[0x3E, 0x08, 0x3E, 0x00], # 'H'
[0x22, 0x3E, 0x22, 0x00], # 'I'
[0x32, 0x22, 0x3E, 0x00], # 'J'
[0x3E, 0x08, 0x36, 0x00], # 'K'
[0x3E, 0x20, 0x20, 0x00], # 'L'
[0x3E, 0x0C, 0x3E, 0x00], # 'M'
[0x3C, 0x02, 0x3E, 0x00], # 'N'
[0x3E, 0x22, 0x3E, 0x00], # 'O'
[0x3E, 0x0A, 0x0E, 0x00], # 'P'
[0x1E, 0x12, 0x3E, 0x00], # 'Q'
[0x3E, 0x0A, 0x36, 0x00], # 'R'
[0x2E, 0x2A, 0x3A, 0x00], # 'S'
[0x02, 0x3E, 0x02, 0x00], # 'T'
[0x3E, 0x20, 0x3E, 0x00], # 'U'
[0x1E, 0x20, 0x1E, 0x00], # 'V'
[0x3E, 0x18, 0x3E, 0x00], # 'W'
[0x36, 0x08, 0x36, 0x00], # 'X'
[0x0E, 0x38, 0x0E, 0x00], # 'Y'
[0x32, 0x2A, 0x26, 0x00], # 'Z'
[0x3E, 0x22, 0x00, 0x00], # '['
[0x06, 0x08, 0x30, 0x00], # '\'
[0x00, 0x22, 0x3E, 0x00], # ']'
[0x04, 0x02, 0x04, 0x00], # '^'
[0x20, 0x20, 0x20, 0x00], # '_'
[0x00, 0x02, 0x04, 0x00], # '`'
[0x10, 0x2A, 0x3C, 0x00], # 'a'
[0x3E, 0x28, 0x10, 0x00], # 'b'
[0x18, 0x24, 0x24, 0x00], # 'c'
[0x10, 0x28, 0x3E, 0x00], # 'd'
[0x1C, 0x2A, 0x2C, 0x00], # 'e'
[0x00, 0x3C, 0x0A, 0x00], # 'f'
[0x04, 0x2A, 0x3E, 0x00], # 'g'
[0x3E, 0x08, 0x38, 0x00], # 'h'
[0x00, 0x3A, 0x00, 0x00], # 'i'
[0x20, 0x3A, 0x00, 0x00], # 'j'
[0x3C, 0x10, 0x28, 0x00], # 'k'
[0x00, 0x3C, 0x00, 0x00], # 'l'
[0x3C, 0x08, 0x3C, 0x00], # 'm'
[0x38, 0x04, 0x38, 0x00], # 'n'
[0x18, 0x24, 0x18, 0x00], # 'o'
[0x3C, 0x14, 0x08, 0x00], # 'p'
[0x08, 0x14, 0x3C, 0x00], # 'q'
[0x3C, 0x08, 0x04, 0x00], # 'r'
[0x28, 0x3C, 0x14, 0x00], # 's'
[0x08, 0x3C, 0x28, 0x00], # 't'
[0x3C, 0x20, 0x3C, 0x00], # 'u'
[0x1C, 0x20, 0x1C, 0x00], # 'v'
[0x3C, 0x10, 0x3C, 0x00], # 'w'
[0x24, 0x18, 0x24, 0x00], # 'x'
[0x0C, 0x28, 0x3C, 0x00], # 'y'
[0x24, 0x34, 0x2C, 0x00], # 'z'
[0x14, 0x2A, 0x00, 0x00], # '{'
[0x00, 0x3E, 0x00, 0x00], # '|'
[0x00, 0x2A, 0x14, 0x00], # '}'
[0x04, 0x04, 0x0C, 0x00], # '~'
[0x00, 0x00, 0x00, 0x00], # 0x7F
[0x00, 0x00, 0x00, 0x00], # 0x80
[0x00, 0x00, 0x00, 0x00], # 0x81
[0x00, 0x00, 0x00, 0x00], # 0x82
[0x00, 0x00, 0x00, 0x00], # 0x83
[0x00, 0x00, 0x00, 0x00], # 0x84
[0x00, 0x00, 0x00, 0x00], # 0x85
[0x00, 0x00, 0x00, 0x00], # 0x86
[0x00, 0x00, 0x00, 0x00], # 0x87
[0x00, 0x00, 0x00, 0x00], # 0x88
[0x00, 0x00, 0x00, 0x00], # 0x89
[0x00, 0x00, 0x00, 0x00], # 0x8A
[0x00, 0x00, 0x00, 0x00], # 0x8B
[0x00, 0x00, 0x00, 0x00], # 0x8C
[0x00, 0x00, 0x00, 0x00], # 0x8D
[0x00, 0x00, 0x00, 0x00], # 0x8E
[0x00, 0x00, 0x00, 0x00], # 0x8F
[0x00, 0x00, 0x00, 0x00], # 0x90
[0x00, 0x00, 0x00, 0x00], # 0x91
[0x00, 0x00, 0x00, 0x00], # 0x92
[0x00, 0x00, 0x00, 0x00], # 0x93
[0x00, 0x00, 0x00, 0x00], # 0x94
[0x00, 0x00, 0x00, 0x00], # 0x95
[0x00, 0x00, 0x00, 0x00], # 0x96
[0x00, 0x00, 0x00, 0x00], # 0x97
[0x00, 0x00, 0x00, 0x00], # 0x98
[0x00, 0x00, 0x00, 0x00], # 0x99
[0x00, 0x00, 0x00, 0x00], # 0x9A
[0x00, 0x00, 0x00, 0x00], # 0x9B
[0x00, 0x00, 0x00, 0x00], # 0x9C
[0x00, 0x00, 0x00, 0x00], # 0x9D
[0x00, 0x00, 0x00, 0x00], # 0x9E
[0x00, 0x00, 0x00, 0x00], # 0x9F
[0x00, 0x00, 0x00, 0x00], # 0xA0
[0x00, 0x00, 0x00, 0x00], # 0xA1
[0x00, 0x00, 0x00, 0x00], # 0xA2
[0x00, 0x00, 0x00, 0x00], # 0xA3
[0x00, 0x00, 0x00, 0x00], # 0xA4
[0x00, 0x00, 0x00, 0x00], # 0xA5
[0x00, 0x00, 0x00, 0x00], # 0xA6
[0x00, 0x00, 0x00, 0x00], # 0xA7
[0x00, 0x00, 0x00, 0x00], # 0xA8
[0x00, 0x00, 0x00, 0x00], # 0xA9
[0x00, 0x00, 0x00, 0x00], # 0xAA
[0x00, 0x00, 0x00, 0x00], # 0xAB
[0x00, 0x00, 0x00, 0x00], # 0xAC
[0x00, 0x00, 0x00, 0x00], # 0xAD
[0x00, 0x00, 0x00, 0x00], # 0xAE
[0x00, 0x00, 0x00, 0x00], # 0xAF
[0x00, 0x00, 0x00, 0x00], # 0xB0
[0x00, 0x00, 0x00, 0x00], # 0xB1
[0x00, 0x00, 0x00, 0x00], # 0xB2
[0x00, 0x00, 0x00, 0x00], # 0xB3
[0x00, 0x00, 0x00, 0x00], # 0xB4
[0x00, 0x00, 0x00, 0x00], # 0xB5
[0x00, 0x00, 0x00, 0x00], # 0xB6
[0x00, 0x00, 0x00, 0x00], # 0xB7
[0x00, 0x00, 0x00, 0x00], # 0xB8
[0x00, 0x00, 0x00, 0x00], # 0xB9
[0x00, 0x00, 0x00, 0x00], # 0xBA
[0x00, 0x00, 0x00, 0x00], # 0xBB
[0x00, 0x00, 0x00, 0x00], # 0xBC
[0x00, 0x00, 0x00, 0x00], # 0xBD
[0x00, 0x00, 0x00, 0x00], # 0xBE
[0x00, 0x00, 0x00, 0x00], # 0xBF
[0x00, 0x00, 0x00, 0x00], # 0xC0
[0x00, 0x00, 0x00, 0x00], # 0xC1
[0x00, 0x00, 0x00, 0x00], # 0xC2
[0x00, 0x00, 0x00, 0x00], # 0xC3
[0x00, 0x00, 0x00, 0x00], # 0xC4
[0x00, 0x00, 0x00, 0x00], # 0xC5
[0x00, 0x00, 0x00, 0x00], # 0xC6
[0x00, 0x00, 0x00, 0x00], # 0xC7
[0x00, 0x00, 0x00, 0x00], # 0xC8
[0x00, 0x00, 0x00, 0x00], # 0xC9
[0x00, 0x00, 0x00, 0x00], # 0xCA
[0x00, 0x00, 0x00, 0x00], # 0xCB
[0x00, 0x00, 0x00, 0x00], # 0xCC
[0x00, 0x00, 0x00, 0x00], # 0xCD
[0x00, 0x00, 0x00, 0x00], # 0xCE
[0x00, 0x00, 0x00, 0x00], # 0xCF
[0x00, 0x00, 0x00, 0x00], # 0xD0
[0x00, 0x00, 0x00, 0x00], # 0xD1
[0x00, 0x00, 0x00, 0x00], # 0xD2
[0x00, 0x00, 0x00, 0x00], # 0xD3
[0x00, 0x00, 0x00, 0x00], # 0xD4
[0x00, 0x00, 0x00, 0x00], # 0xD5
[0x00, 0x00, 0x00, 0x00], # 0xD6
[0x00, 0x00, 0x00, 0x00], # 0xD7
[0x00, 0x00, 0x00, 0x00], # 0xD8
[0x00, 0x00, 0x00, 0x00], # 0xD9
[0x00, 0x00, 0x00, 0x00], # 0xDA
[0x00, 0x00, 0x00, 0x00], # 0xDB
[0x00, 0x00, 0x00, 0x00], # 0xDC
[0x00, 0x00, 0x00, 0x00], # 0xDD
[0x00, 0x00, 0x00, 0x00], # 0xDE
[0x00, 0x00, 0x00, 0x00], # 0xDF
[0x00, 0x00, 0x00, 0x00], # 0xE0
[0x00, 0x00, 0x00, 0x00], # 0xE1
[0x00, 0x00, 0x00, 0x00], # 0xE2
[0x00, 0x00, 0x00, 0x00], # 0xE3
[0x00, 0x00, 0x00, 0x00], # 0xE4
[0x00, 0x00, 0x00, 0x00], # 0xE5
[0x00, 0x00, 0x00, 0x00], # 0xE6
[0x00, 0x00, 0x00, 0x00], # 0xE7
[0x00, 0x00, 0x00, 0x00], # 0xE8
[0x00, 0x00, 0x00, 0x00], # 0xE9
[0x00, 0x00, 0x00, 0x00], # 0xEA
[0x00, 0x00, 0x00, 0x00], # 0xEB
[0x00, 0x00, 0x00, 0x00], # 0xEC
[0x00, 0x00, 0x00, 0x00], # 0xED
[0x00, 0x00, 0x00, 0x00], # 0xEE
[0x00, 0x00, 0x00, 0x00], # 0xEF
[0x00, 0x00, 0x00, 0x00], # 0xF0
[0x00, 0x00, 0x00, 0x00], # 0xF1
[0x00, 0x00, 0x00, 0x00], # 0xF2
[0x00, 0x00, 0x00, 0x00], # 0xF3
[0x00, 0x00, 0x00, 0x00], # 0xF4
[0x00, 0x00, 0x00, 0x00], # 0xF5
[0x00, 0x00, 0x00, 0x00], # 0xF6
[0x00, 0x00, 0x00, 0x00], # 0xF7
[0x00, 0x00, 0x00, 0x00], # 0xF8
[0x00, 0x00, 0x00, 0x00], # 0xF9
[0x00, 0x00, 0x00, 0x00], # 0xFA
[0x00, 0x00, 0x00, 0x00], # 0xFB
[0x00, 0x00, 0x00, 0x00], # 0xFC
[0x00, 0x00, 0x00, 0x00], # 0xFD
[0x00, 0x00, 0x00, 0x00], # 0xFE
[0x00, 0x00, 0x00, 0x00], # 0xFF
] # end of TINY_FONT
DEFAULT_FONT = CP437_FONT
``` |
{
"source": "jppdpf/YK-BiT-SDK-Python-1",
"score": 3
} |
#### File: YK-BiT-SDK-Python-1/sample/run_bit_sample.py
```python
import base64
from os import getenv
from yk_bit import BaseUrl, Key, capture, verify_images, verify, status, setup, BiTStatus
"""
First time running:
- Run the BiT App
- Run this SDK App
- If the setup was successful:
- Comment the "setup" block
"""
# BiometricInThings API Environment Variables
EV_BASE_URL = getenv('YK_BIT_BASE_URL')
EV_API_KEY = getenv('YK_BIT_X_API_KEY')
BaseUrl.set(EV_BASE_URL)
Key.set(EV_API_KEY)
def base64_to_file(filename: str, data: str):
with open(f"{filename}.png", "wb") as fh:
fh.write(base64.decodebytes(data.encode('utf-8')))
if __name__ == "__main__":
# Setup
try:
print("Setting up BiT")
setup()
print(f"BiT Setup Successful. \n")
except Exception as ex:
print(f"BiT Setup unsuccessful. \n")
print(ex)
exit()
# Status
bit_availability = status()
print(f"BiT Availability: {bit_availability} \n")
if bit_availability == BiTStatus.Available:
# Capture
captured = capture(capture_timeout=10)
print(f"Capture: \n"
f"\t Status: {captured.capture_status} \n"
# f"\t Image: {captured.image} \n"
)
if captured.image is not None:
base64_to_file("captured", captured.image)
# Verify
verified = verify(reference_image=captured.image, capture_time_out=10, anti_spoofing=True)
print(f"Verify: \n"
f"\t Matching Score: {verified.matching_score} \n"
f"\t Status: {verified.verify_status} \n"
# f"\t Verified Image: {verified.verified_image} \n"
)
base64_to_file("verified", verified.verified_image)
# Verify Images
verified_images = verify_images(probe_image=captured.image, reference_image=verified.verified_image)
print(f"Verify Images: \n"
f"\t Matching Score: {verified_images.matching_score} \n"
f"\t Status: {verified_images.verify_images_status} \n")
```
#### File: yk_bit/models/capture_request.py
```python
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from yk_utils.models import Model
from yk_utils.models import deserialization
class CaptureRequest(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, capture_time_out: float=10.0, anti_spoofing: bool=True, live_quality_analysis: bool=False): # noqa: E501
"""CaptureRequest - a model defined in Swagger
:param capture_time_out: The capture_time_out of this CaptureRequest. # noqa: E501
:type capture_time_out: float
:param anti_spoofing: The anti_spoofing of this CaptureRequest. # noqa: E501
:type anti_spoofing: bool
:param live_quality_analysis: The live_quality_analysis of this CaptureRequest. # noqa: E501
:type live_quality_analysis: bool
"""
self.swagger_types = {
'capture_time_out': float,
'anti_spoofing': bool,
'live_quality_analysis': bool
}
self.attribute_map = {
'capture_time_out': 'capture_time_out',
'anti_spoofing': 'anti_spoofing',
'live_quality_analysis': 'live_quality_analysis'
}
self._capture_time_out = capture_time_out
self._anti_spoofing = anti_spoofing
self._live_quality_analysis = live_quality_analysis
@classmethod
def from_dict(cls, dikt) -> 'CaptureRequest':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The capture_request of this CaptureRequest. # noqa: E501
:rtype: CaptureRequest
"""
return deserialization.deserialize_model(dikt, cls)
@property
def capture_time_out(self) -> float:
"""Gets the capture_time_out of this CaptureRequest.
Capture timeout in seconds. # noqa: E501
:return: The capture_time_out of this CaptureRequest.
:rtype: float
"""
return self._capture_time_out
@capture_time_out.setter
def capture_time_out(self, capture_time_out: float):
"""Sets the capture_time_out of this CaptureRequest.
Capture timeout in seconds. # noqa: E501
:param capture_time_out: The capture_time_out of this CaptureRequest.
:type capture_time_out: float
"""
if capture_time_out is not None and capture_time_out < 0: # noqa: E501
raise ValueError("Invalid value for `capture_time_out`, must be a value greater than or equal to `0`") # noqa: E501
self._capture_time_out = capture_time_out
@property
def anti_spoofing(self) -> bool:
"""Gets the anti_spoofing of this CaptureRequest.
Activate anti-spoofing detection. # noqa: E501
:return: The anti_spoofing of this CaptureRequest.
:rtype: bool
"""
return self._anti_spoofing
@anti_spoofing.setter
def anti_spoofing(self, anti_spoofing: bool):
"""Sets the anti_spoofing of this CaptureRequest.
Activate anti-spoofing detection. # noqa: E501
:param anti_spoofing: The anti_spoofing of this CaptureRequest.
:type anti_spoofing: bool
"""
self._anti_spoofing = anti_spoofing
@property
def live_quality_analysis(self) -> bool:
"""Gets the live_quality_analysis of this CaptureRequest.
Activate ISO/ICAO-19794-5 face quality compliance checks on the live face images. # noqa: E501
:return: The live_quality_analysis of this CaptureRequest.
:rtype: bool
"""
return self._live_quality_analysis
@live_quality_analysis.setter
def live_quality_analysis(self, live_quality_analysis: bool):
"""Sets the live_quality_analysis of this CaptureRequest.
Activate ISO/ICAO-19794-5 face quality compliance checks on the live face images. # noqa: E501
:param live_quality_analysis: The live_quality_analysis of this CaptureRequest.
:type live_quality_analysis: bool
"""
self._live_quality_analysis = live_quality_analysis
```
#### File: YK-BiT-SDK-Python-1/yk_bit/utils.py
```python
import yk_utils.apis
class Key:
"""Manage YooniK BiometricInThings API Subscription Key."""
@classmethod
def set(cls, key: str):
"""Set the Subscription Key.
:param key:
:return:
"""
yk_utils.apis.Key.set(key)
class BaseUrl:
"""Manage YooniK BiometricInThings API Base URL."""
@classmethod
def set(cls, base_url: str):
yk_utils.apis.BaseUrl.set(base_url)
``` |
{
"source": "jppgibbs/aegis",
"score": 3
} |
#### File: jppgibbs/aegis/aegisui.py
```python
import subprocess
from subprocess import Popen, PIPE
import tkinter as tk
from tkinter import ttk, scrolledtext, Entry
from tkinter.ttk import Combobox
import requests
import logging
from time import sleep
win = tk.Tk()
win.title("Aegis")
win.minsize(width=1800, height=900)
win.maxsize(width=1800, height=900)
# Select File Label
ttk.Label(win, text="Remote Extraction").place(x=80, y=1)
ttk.Label(win, text="Username:").place(x=5, y=30)
# User selected file
rUser = tk.StringVar()
remoteUser = ttk.Entry(win, width=30, textvariable=rUser, )
remoteUser.place(x=5, y=50)
# Search Label
ttk.Label(win, text="Password:").place(x=5, y=75)
# Textbox Entry
rPass = tk.StringVar()
remotePass = ttk.Entry(win, width=30, textvariable=rPass, )
remotePass.place(x=5, y=95)
# Search Label
ttk.Label(win, text="Target IP:").place(x=5, y=120)
# Textbox Entry
rIP = tk.StringVar()
remoteIP = ttk.Entry(win, width=30, textvariable=rIP, )
remoteIP.place(x=5, y=140)
remoteExtract = ttk.Button(win, width=29, text="Extract", command=lambda :rExtractToScroll())
remoteExtract.place(x=5, y=170)
def rExtractToScroll():
ClearText()
userEntered = remoteUser.get()
passEntered = remotePass.get()
ipEntered = remoteIP.get()
rExtractcmd = 'python aegis.py --username '+userEntered+' --password '+<PASSWORD>Entered+' --target '+ipEntered
stdout = Popen(rExtractcmd, shell=True, stdout=PIPE).stdout
rExtractOut = stdout.read()
scr.insert(tk.INSERT, rExtractOut)
# Search Limit Label
ttk.Label(win, text="Local Extraction:").place(x=70, y=210)
localExplanation = "1. Open a command prompt as admin on the DC\n" \
"2. Run ntdsutil'ac i ntds''ifm''create full c:/temp' q q\n" \
"3. Extract c:/temp/Active Directory/ntds.dit and c:/temp/registry/SYSTEM to your computer running Aegis"
localMessage = tk.Message(win, width=220, text=localExplanation).place(x=5, y=235)
# Search Label
ttk.Label(win, text="SYSTEM file path:").place(x=5, y=382)
# Textbox Entry
sysInput = tk.StringVar()
systemInput = ttk.Entry(win, width=30, textvariable=sysInput, )
systemInput.place(x=5, y=402)
# Search Label
ttk.Label(win, text="ntds.dit file path:").place(x=5, y=427)
# Textbox Entry
ntdsInput = tk.StringVar()
ntdsutilInput = ttk.Entry(win, width=30, textvariable=ntdsInput, )
ntdsutilInput.place(x=5, y=447)
localExtract = ttk.Button(win, width=29, text="Extract", command=lambda : lExtractToScroll())
localExtract.place(x=5, y=477)
def lExtractToScroll():
ClearText()
sysEntered = systemInput.get()
ntdsEntered = ntdsutilInput.get()
lExtractcmd = 'python aegis.py --system '+sysEntered+' --ntds '+ntdsEntered+'.dit'
stdout = Popen(lExtractcmd, shell=True, stdout=PIPE).stdout
lExtractOut = stdout.read()
scr.insert(tk.INSERT, lExtractOut)
ttk.Label(win, text="Active Directory Evaluation").place(x=40, y=522)
# Search Label
ttk.Label(win, text="Pot filename:").place(x=5, y=547)
# Textbox Entry
potInput = tk.StringVar()
potFileInput = ttk.Entry(win, width=30, textvariable=potInput )
potFileInput.place(x=5, y=567)
# Search Label
ttk.Label(win, text="Domain name:").place(x=5, y=592)
# Textbox Entry
domInput = tk.StringVar()
domainInput = ttk.Entry(win, width=30, textvariable=domInput )
domainInput.place(x=5, y=612)
# domainInput.focus()
# Using a scrolled Text control
scrolW = 168
scrolH = 52
scr = scrolledtext.ScrolledText(win, width=(scrolW), height=(scrolH), font=('Calibri', 10, 'bold'), wrap=tk.WORD, background="#f0f0f0")
scr.place(x=260, y=1)
def reportToScroll():
ClearText()
potEntered = potFileInput.get()
domainEntered = domainInput.get()
reportcmd = 'python aegis.py --pot ' + potEntered + '.pot --domain ' + domainEntered
stdout = Popen(reportcmd, shell=True, stdout=PIPE).stdout
reportOut = stdout.read()
scr.insert(tk.INSERT, reportOut)
def toPassCloud():
potEntered = potFileInput.get()
domainEntered = domainInput.get()
reportcmd = 'python aegis.py --pot ' + potEntered + '.pot --domain ' + domainEntered + ' --output password_cloud'
stdout = Popen(reportcmd, shell=True, stdout=PIPE).stdout
reportOut = stdout.read()
scr.insert(tk.INSERT, reportOut)
def ClearText():
scr.delete(1.0, tk.END)
# Show Normal Map Button
outputExtract = ttk.Button(win, width=29, text="Generate Report", command=lambda: reportToScroll())
outputExtract.place(x=5, y=647)
outputExtract = ttk.Button(win, width=29, text="Generate Password Cloud", command=lambda: toPassCloud())
outputExtract.place(x=5, y=682)
def checkEmail():
ClearText()
sleep(3)
userEmail= emailInput.get()
check = requests.get('https://haveibeenpwned.com/api/v2/breachedaccount/'+userEmail)
if check.status_code == 200:
# Account was breached
breachOut = (userEmail + ' has been involved in a breach\n')
scr.insert(tk.INSERT, breachOut)
elif check.status_code == 404:
# Not breached account
noBreachOut = (userEmail+' has not been involved in a breach\n')
scr.insert(tk.INSERT, noBreachOut)
elif check.status_code == 429:
#Has been throttled
rateExceedOut = 'Rate limit has been exceeded please try again shortly\n'
scr.insert(tk.INSERT, rateExceedOut)
sleep(3)
checkEmail()
else:
# Now this is strange
breachIssueOut = ('An issue has occurred while checking '+userEmail+'\n')
scr.insert(tk.INSERT, breachIssueOut)
sleep(3)
ttk.Label(win, text="Email Compromise Check").place(x=40, y=722)
# Search Label
ttk.Label(win, text="Email:").place(x=5, y=757)
# Textbox Entry
emInput = tk.StringVar()
emailInput = ttk.Entry(win, width=30, textvariable=emInput)
emailInput.place(x=5, y=777)
# emailInput.focus()
outputEmail = ttk.Button(win, width=29, text="Check Email", command=lambda: checkEmail())
outputEmail.place(x=5, y=812)
# Clear Console Button
clearButton = ttk.Button(win, width=30, text="Clear Console ", command=lambda: ClearText())
clearButton.place(x=5, y=862)
# Place cursor into name Entry
remotePass.focus()
# Start GUI
win.mainloop()
```
#### File: jppgibbs/aegis/database.py
```python
import itertools, string
from datetime import datetime
from random import shuffle
import zxcvbn
from tinydb import TinyDB, Query
from tinydb.middlewares import CachingMiddleware
from tinydb_serialization import Serializer, SerializationMiddleware
class DateTimeSerializer(Serializer):
OBJ_CLASS = datetime
def encode(self, obj):
return obj.strftime("%Y-%m-%dT%H:%M:%S")
def decode(self, s):
return datetime.strptime(s, "%Y-%m-%dT%H:%M:%S")
class DomainDoesntExist(ValueError):
def __init__(self, message, tables):
self.message = message
self.tables = tables
class HashDatabase:
BLANK_NTLMHASH = "31d6cfe0d16ae931b73c59d7e0c089c0"
def __init__(self, db_name, domain, raise_if_table_doesnt_exist=True, only_enabled=False, only_users=False):
self.db = None
self.table = None
self.only_enabled = (Query().enabled.exists() if only_enabled else Query().ntlmhash.exists()) & ( Query().enabled == True if only_enabled else Query().ntlmhash.exists())
self.only_users = (Query().username.exists() if only_users else Query().ntlmhash.exists()) & (Query().username.test(lambda v: not v.endswith("$")) if only_users else Query().ntlmhash.exists())
serialization = SerializationMiddleware()
serialization.register_serializer(DateTimeSerializer(), "datetime")
self.db = TinyDB(db_name, storage=CachingMiddleware(serialization))
tables = list(self.db.tables())
if raise_if_table_doesnt_exist and domain not in tables:
raise DomainDoesntExist("Hashes for domain '{}' do not exist in database.".format(domain), tables)
self.table = self.db.table(domain)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.db.close()
@property
def counts(self):
total = self.table.count(self.only_enabled & self.only_users)
local_users = self.table.count((~ Query().historic.exists()) & (Query().username.test(lambda v: "\\" not in v and not v.endswith("$"))) & self.only_users)
domain_users = self.table.count((~ Query().historic.exists()) & (Query().username.test(lambda v: "\\" in v and not v.endswith("$"))) & self.only_users)
computers = self.table.count(Query().username.test(lambda v: v.endswith("$")))
return total, local_users, domain_users, computers
@property
def user_counts(self):
enabled_users = self.table.search((Query().enabled == True) & (Query().username.test(lambda v: not v.endswith("$"))))
disabled_users = self.table.search((Query().enabled == False) & (Query().username.test(lambda v: not v.endswith("$"))))
return len(enabled_users), len(disabled_users)
@property
def password_stats(self):
cracked = self.table.count((Query().password.exists()) & (Query().password != "") & self.only_users & self.only_enabled)
blank = self.table.count(Query().ntlmhash == HashDatabase.BLANK_NTLMHASH)
historic = self.table.count((Query().historic.exists()) & self.only_enabled & self.only_users)
return cracked, blank, historic
@property
def all_passwords(self):
results = self.table.search((Query().password.exists()) & (Query().password != "") & self.only_users & self.only_enabled)
return [(result["password"], z<PASSWORD>.password_strength(result["password"])["score"]) for result in results]
@property
def password_composition_stats(self):
alphanum = string.ascii_letters + string.digits
only_alpha = self.table.count(Query().password.test(lambda p: p != "" and all(c in alphanum for c in p)))
with_special = self.table.count(Query().password.test(lambda p: p != "" and any(c not in alphanum for c in p)))
only_digits = self.table.count(Query().password.test(lambda p: p != "" and all(c in string.digits for c in p)))
return only_alpha, with_special, only_digits
def get_passwords(self, sortby, reverse=True, limit=10):
results = sorted(self.table.search((Query().password.exists()) & self.only_users & self.only_enabled), key=lambda r: r["password"])
passwords = ((password, len(list(count))) for password, count in itertools.groupby(results, lambda r: r["password"]))
return sorted(list(
(password, count, zxcvbn.password_strength(password)["score"], self.__get_users_with_password(password))
for password, count in passwords), key=sortby, reverse=reverse)[:limit]
def get_passwords_where(self, where):
return self.table.search((Query().password.exists()) & (Query().password.test(where)) & self.only_users & self.only_enabled)
def update_hash_password(self, hash, password):
self.table.update({"ntlmhash": hash, "password": password, "updated": datetime.now()}, Query().ntlmhash == hash)
def insert(self, record):
record["created"] = datetime.now()
self.table.insert(record)
def __get_users_with_password(self, password):
users = self.table.search(
(Query().password.exists()) & (Query().username.exists()) & (Query().password == password)
& self.only_users & self.only_enabled
)
shuffle(users)
return users
def __get_passwords_for_user(self, user):
passwords = sorted(self.table.search((Query().password.exists()) & (Query().password != "") & (Query().username.exists()) & (Query().username == user) & self.only_enabled), key=lambda r: r["password"])
grouped_passwords = ((password, users) for password, users in itertools.groupby(passwords, lambda r: r["password"]))
return list(grouped_passwords)
```
#### File: ldap3/strategy/asyncStream.py
```python
try:
from queue import Queue
except ImportError: # Python 2
# noinspection PyUnresolvedReferences
from Queue import Queue
from io import StringIO
from os import linesep
from ..protocol.rfc2849 import decode_persistent_search_control
from ..strategy.async import AsyncStrategy
from ..core.exceptions import LDAPLDIFError
from ..utils.conv import prepare_for_stream
from ..protocol.rfc2849 import persistent_search_response_to_ldif, add_ldif_header
# noinspection PyProtectedMember
class AsyncStreamStrategy(AsyncStrategy):
"""
This strategy is asynchronous. It streams responses in a generator as they appear in the self._responses container
"""
def __init__(self, ldap_connection):
AsyncStrategy.__init__(self, ldap_connection)
self.can_stream = True
self.line_separator = linesep
self.all_base64 = False
self.stream = None
self.order = dict()
self._header_added = False
self.persistent_search_message_id = None
self.streaming = False
self.callback = None
self.events = Queue()
del self._requests # remove _requests dict from Async Strategy
def _start_listen(self):
AsyncStrategy._start_listen(self)
if self.streaming:
if not self.stream or (isinstance(self.stream, StringIO) and self.stream.closed):
self.set_stream(StringIO())
def _stop_listen(self):
AsyncStrategy._stop_listen(self)
if self.streaming:
self.stream.close()
def accumulate_stream(self, message_id, change):
if message_id == self.persistent_search_message_id:
with self.lock:
self._responses[message_id] = []
if self.streaming:
if not self._header_added and self.stream.tell() == 0:
header = add_ldif_header(['-'])[0]
self.stream.write(prepare_for_stream(header + self.line_separator + self.line_separator))
ldif_lines = persistent_search_response_to_ldif(change)
if self.stream and ldif_lines and not self.connection.closed:
fragment = self.line_separator.join(ldif_lines)
if not self._header_added and self.stream.tell() == 0:
self._header_added = True
header = add_ldif_header(['-'])[0]
self.stream.write(prepare_for_stream(header + self.line_separator + self.line_separator))
self.stream.write(prepare_for_stream(fragment + self.line_separator + self.line_separator))
else: # strategy is not streaming, events are added to a queue
notification = decode_persistent_search_control(change)
if notification:
change.update(notification)
del change['controls']['2.16.840.1.113730.3.4.7']
if not self.callback:
self.events.put(change)
else:
self.callback(change)
def get_stream(self):
if self.streaming:
return self.stream
return None
def set_stream(self, value):
error = False
try:
if not value.writable():
error = True
except (ValueError, AttributeError):
error = True
if error:
raise LDAPLDIFError('stream must be writable')
self.stream = value
self.streaming = True
```
#### File: ldap3/utils/hashed.py
```python
from .. import HASHED_NONE, HASHED_MD5, HASHED_SALTED_MD5, HASHED_SALTED_SHA, HASHED_SALTED_SHA256, \
HASHED_SALTED_SHA384, HASHED_SALTED_SHA512, HASHED_SHA, HASHED_SHA256, HASHED_SHA384, HASHED_SHA512
import hashlib
from os import urandom
from base64 import b64encode
from ..core.exceptions import LDAPInvalidHashAlgorithmError
# each tuple: (the string to include between braces in the digest, the name of the algorithm to invoke with the new() function)
algorithms_table = {
HASHED_MD5: ('md5', 'MD5'),
HASHED_SHA: ('sha', 'SHA1'),
HASHED_SHA256: ('sha256', 'SHA256'),
HASHED_SHA384: ('sha384', 'SHA384'),
HASHED_SHA512: ('sha512', 'SHA512')
}
salted_table = {
HASHED_SALTED_MD5: ('smd5', HASHED_MD5),
HASHED_SALTED_SHA: ('ssha', HASHED_SHA),
HASHED_SALTED_SHA256: ('ssha256', HASHED_SHA256),
HASHED_SALTED_SHA384: ('ssha384', HASHED_SHA384),
HASHED_SALTED_SHA512: ('ssha512', HASHED_SHA512)
}
def hashed(algorithm, value, salt=None, raw=False, encoding='utf-8'):
if str is not bytes and not isinstance(value, bytes): # Python 3
value = value.encode(encoding)
if algorithm is None or algorithm == HASHED_NONE:
return value
# algorithm name can be already coded in the ldap3 constants or can be any value passed in the 'algorithm' parameter
if algorithm in algorithms_table:
try:
digest = hashlib.new(algorithms_table[algorithm][1], value).digest()
except ValueError:
raise LDAPInvalidHashAlgorithmError('Hash algorithm ' + str(algorithm) + ' not available')
if raw:
return digest
return ('{%s}' % algorithms_table[algorithm][0]) + b64encode(digest).decode('ascii')
elif algorithm in salted_table:
if not salt:
salt = urandom(8)
digest = hashed(salted_table[algorithm][1], value + salt, raw=True) + salt
if raw:
return digest
return ('{%s}' % salted_table[algorithm][0]) + b64encode(digest).decode('ascii')
else:
# if an unknown (to the library) algorithm is requested passes the name as the string in braces and as the algorithm name
# if salt is present uses it to salt the digest
try:
if not salt:
digest = hashlib.new(algorithm, value).digest()
else:
digest = hashlib.new(algorithm, value + salt).digest() + salt
except ValueError:
raise LDAPInvalidHashAlgorithmError('Hash algorithm ' + str(algorithm) + ' not available')
if raw:
return digest
return ('{%s}' % algorithm) + b64encode(digest).decode('ascii')
``` |
{
"source": "jppgibbs/bleAnalyser",
"score": 3
} |
#### File: jppgibbs/bleAnalyser/bleAnalyser.py
```python
from bluepy.btle import Scanner
import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from datetime import datetime
import time
import Tkinter
from collections import defaultdict
from Tkinter import Label, Button, Entry
import json
import unicodedata
import yaml
import numpy as np
import pandas as pd
stop = 1
scans = []
addresses = []
dataVar = []
scanner = Scanner()
# Save Log Function
def saveData():
# Get current date/time for log title
now = datetime.now()
dt_string = now.strftime("%d_%m_%Y %H_%M_%S.log")
# Save log to disk in json format
f = open(dt_string, "a")
f.write(json.dumps(dataVar))
f.close()
# Scan loop
def scan(iterations):
# Repeat for the number of times entered in the text box
for i in range(iterations):
print('Scanning, iteration = ' + str(i+1))
devices = scanner.scan(1.0)
scans.append(devices)
deviceList = []
for device in devices:
deviceList.append({"Address": device.addr, "AddressType": device.addrType, "RSSI": device.rssi})
# When a new device is discovered, add a new entry
if device.addr not in addresses:
addresses.append(device.addr)
else:
print "Device Rediscovered"
print "Device %s (%s), RSSI=%d dB" % (device.addr, device.addrType, device.rssi)
for (adtype, desc, value) in device.getScanData():
print " %s = %s" % (desc, value)
dataVar.append(deviceList)
# Save data once scan is complete
saveData()
return True
# Stop scan button function
def stopScan():
stop = 1
saveData()
print('stop = ' + str(stop))
# Start scan button function
def startScan():
stop = 0
scan(int(e.get()))
def graph(fileName):
file = open(fileName, mode='r')
loadedData = file.read()
file.close()
addresses = []
loadedData = yaml.safe_load(loadedData)
plotData = []
polls = 1
for scan in loadedData:
polls = polls+1
for device in scan:
print(device)
if device['Address'] not in addresses:
addresses.append(device['Address'])
print(addresses)
for address in addresses:
plotData.append([])
i=0
for scan in loadedData:
i=i+1
for device in scan:
index = addresses.index(device['Address'])
plotData[index].append(device['RSSI'])
for j in range(len(addresses)):
if len(plotData[j]) < i:
plotData[j].append(-90)
print(plotData)
dataFramePrep = { 'x': range(1,polls)}
for i in range(len(addresses)):
dataFramePrep[addresses[i]] = plotData[i]
print(dataFramePrep)
df=pd.DataFrame(dataFramePrep)
for i in range(len(addresses)):
plt.subplot(len(addresses), 1, i+1)
plt.plot('x', addresses[i], data=df, marker='x', markersize=0, linewidth='2')
plt.legend(loc='upper left')
plt.show()
# -- Build UI --
# Create window
window=Tkinter.Tk()
window.minsize(200,250)
window.title("BLE Scanner")
# Create time label
timeLabel = Label(text='Enter scan duration:')
timeLabel.pack()
# Create time entry box
e = Entry(window)
e.pack()
e.focus_set()
# Create start button
startButton = Button(window, text='Start Scanning', command=lambda : startScan())
startButton.config(height = 2, width = 20)
startButton.pack()
# Create stop button
stopButton = Button(window, text='Stop Scanning', command=lambda : stopScan())
stopButton.config(height = 2, width = 20)
stopButton.pack()
# Create time label
graphLabel = Label(text='Enter log file to graph:')
graphLabel.pack()
# Create log file entry box
eGraph = Entry(window)
eGraph.pack()
# Create start button
graphButton = Button(window, text='Generate Graph', command=lambda : graph(str(eGraph.get())))
graphButton.config(height = 2, width = 20)
graphButton.pack()
# Run window
window.mainloop()
``` |
{
"source": "jppgks/kfp-tekton",
"score": 2
} |
#### File: resources/definition/training_pipeline.py
```python
import kfp
from kfp import components
from kfp import dsl
from kfp.aws import use_aws_secret
sagemaker_train_op = components.load_component_from_file("../../train/component.yaml")
@dsl.pipeline(name="SageMaker Training", description="SageMaker training job test")
def training_pipeline(
region="",
endpoint_url="",
image="",
training_input_mode="",
hyperparameters="",
channels="",
instance_type="",
instance_count="",
volume_size="",
max_run_time="",
model_artifact_path="",
output_encryption_key="",
network_isolation="",
traffic_encryption="",
spot_instance="",
max_wait_time="",
checkpoint_config="{}",
vpc_security_group_ids="",
vpc_subnets="",
role="",
):
sagemaker_train_op(
region=region,
endpoint_url=endpoint_url,
image=image,
training_input_mode=training_input_mode,
hyperparameters=hyperparameters,
channels=channels,
instance_type=instance_type,
instance_count=instance_count,
volume_size=volume_size,
max_run_time=max_run_time,
model_artifact_path=model_artifact_path,
output_encryption_key=output_encryption_key,
network_isolation=network_isolation,
traffic_encryption=traffic_encryption,
spot_instance=spot_instance,
max_wait_time=max_wait_time,
checkpoint_config=checkpoint_config,
vpc_security_group_ids=vpc_security_group_ids,
vpc_subnets=vpc_subnets,
role=role,
)
if __name__ == "__main__":
kfp.compiler.Compiler().compile(
training_pipeline, "SageMaker_training_pipeline" + ".yaml"
)
```
#### File: samples/e2e-mnist/e2e-mnist.py
```python
import json
from string import Template
import kfp
from kfp import components
from kfp.components import func_to_container_op
import kfp.dsl as dsl
# ibmc-file-gold is the recommended ReadWriteMany storageclass for IBM Cloud.
storageclass = 'ibmc-file-gold'
model_name = "mnist-demo"
user_namespace = "anonymous"
def convert_mnist_experiment_result(experiment_result) -> str:
import json
r = json.loads(experiment_result)
args = []
for hp in r:
print(hp)
args.append("%s=%s" % (hp["name"], hp["value"]))
return " ".join(args)
def add_istio_annotation(op):
op.add_pod_annotation(name='sidecar.istio.io/inject', value='false')
return op
@dsl.pipeline(
name="End to end pipeline",
description="An end to end example including hyperparameter tuning, train and inference."
)
def mnist_pipeline(
name=model_name,
namespace=user_namespace,
storageclass=storageclass,
step=4000):
# step 1: create a Katib experiment to tune hyperparameters
objectiveConfig = {
"type": "minimize",
"goal": 0.001,
"objectiveMetricName": "loss",
}
algorithmConfig = {"algorithmName" : "random"}
parameters = [
{"name": "--tf-learning-rate", "parameterType": "double", "feasibleSpace": {"min": "0.01","max": "0.03"}},
{"name": "--tf-batch-size", "parameterType": "discrete", "feasibleSpace": {"list": ["16", "32", "64"]}},
]
rawTemplate = {
"apiVersion": "kubeflow.org/v1",
"kind": "TFJob",
"metadata": {
"name": "{{.Trial}}",
"namespace": "{{.NameSpace}}"
},
"spec": {
"tfReplicaSpecs": {
"Chief": {
"replicas": 1,
"restartPolicy": "OnFailure",
"template": {
"spec": {
"containers": [
{
"command": [
"sh",
"-c"
],
"args": [
"python /opt/model.py --tf-train-steps=2000 {{- with .HyperParameters}} {{- range .}} {{.Name}}={{.Value}} {{- end}} {{- end}}"
],
"image": "liuhougangxa/tf-estimator-mnist",
"name": "tensorflow"
}
]
}
}
},
"Worker": {
"replicas": 3,
"restartPolicy": "OnFailure",
"template": {
"spec": {
"containers": [
{
"command": [
"sh",
"-c"
],
"args": [
"python /opt/model.py --tf-train-steps=2000 {{- with .HyperParameters}} {{- range .}} {{.Name}}={{.Value}} {{- end}} {{- end}}"
],
"image": "liuhougangxa/tf-estimator-mnist",
"name": "tensorflow"
}
]
}
}
}
}
}
}
trialTemplate = {
"goTemplate": {
"rawTemplate": json.dumps(rawTemplate)
}
}
metricsCollectorSpec = {
"source": {
"fileSystemPath": {
"path": "/tmp/tf",
"kind": "Directory"
}
},
"collector": {
"kind": "TensorFlowEvent"
}
}
katib_experiment_launcher_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/kubeflow/katib-launcher/component.yaml')
op1 = katib_experiment_launcher_op(
experiment_name=name,
experiment_namespace=namespace,
parallel_trial_count=3,
max_trial_count=12,
objective=str(objectiveConfig),
algorithm=str(algorithmConfig),
trial_template=str(trialTemplate),
parameters=str(parameters),
metrics_collector=str(metricsCollectorSpec),
# experiment_timeout_minutes=experimentTimeoutMinutes,
delete_finished_experiment=False)
# step2: create a TFJob to train your model with best hyperparameter tuned by Katib
tfjobjson_template = Template("""
{
"apiVersion": "kubeflow.org/v1",
"kind": "TFJob",
"metadata": {
"name": "$name",
"namespace": "$namespace",
"annotations": {
"sidecar.istio.io/inject": "false"
}
},
"spec": {
"tfReplicaSpecs": {
"Chief": {
"replicas": 1,
"restartPolicy": "OnFailure",
"template": {
"metadata": {
"annotations": {
"sidecar.istio.io/inject": "false"
}
},
"spec": {
"volumes": [
{
"name": "export-model",
"persistentVolumeClaim": {
"claimName": "$modelpvc"
}
}
],
"containers": [
{
"command": [
"sh",
"-c"
],
"args": [
"python /opt/model.py --tf-train-steps=$step --tf-export-dir=/mnt/export $args"
],
"image": "liuhougangxa/tf-estimator-mnist",
"name": "tensorflow",
"volumeMounts": [
{
"mountPath": "/mnt/export",
"name": "export-model"
}
]
}
]
}
}
},
"Worker": {
"replicas": 3,
"restartPolicy": "OnFailure",
"template": {
"metadata": {
"annotations": {
"sidecar.istio.io/inject": "false"
}
},
"spec": {
"volumes": [
{
"name": "export-model",
"persistentVolumeClaim": {
"claimName": "$modelpvc"
}
}
],
"containers": [
{
"command": [
"sh",
"-c"
],
"args": [
"python /opt/model.py --tf-train-steps=$step --tf-export-dir=/mnt/export $args"
],
"image": "liuhougangxa/tf-estimator-mnist",
"name": "tensorflow",
"volumeMounts": [
{
"mountPath": "/mnt/export",
"name": "export-model"
}
]
}
]
}
}
}
}
}
}
""")
convert_op = func_to_container_op(convert_mnist_experiment_result)
op2 = convert_op(op1.output)
volume_template = Template("""
{
"apiVersion": "v1",
"kind": "PersistentVolumeClaim",
"metadata": {
"name": "{{workflow.name}}-modelpvc",
"namespace": "$namespace"
},
"spec": {
"accessModes": ["ReadWriteMany"],
"resources": {
"requests": {
"storage": "1Gi"
}
},
"storageClassName": "$storageclass"
}
}
""")
volopjson = volume_template.substitute({'namespace': namespace, 'storageclass': storageclass})
volop = json.loads(volopjson)
modelvolop = dsl.ResourceOp(
name="modelpvc",
k8s_resource=volop
)
tfjobjson = tfjobjson_template.substitute(
{'args': op2.output,
'name': name,
'namespace': namespace,
'step': step,
'modelpvc': modelvolop.outputs["name"]
})
tfjob = json.loads(tfjobjson)
train = dsl.ResourceOp(
name="train",
k8s_resource=tfjob,
success_condition='status.replicaStatuses.Worker.succeeded==3,status.replicaStatuses.Chief.succeeded==1'
)
# step 3: model inferencese by KFServing Inferenceservice
inferenceservice_template = Template("""
{
"apiVersion": "serving.kubeflow.org/v1alpha2",
"kind": "InferenceService",
"metadata": {
"name": "$name",
"namespace": "$namespace"
},
"spec": {
"default": {
"predictor": {
"tensorflow": {
"storageUri": "pvc://$modelpvc/"
}
}
}
}
}
""")
inferenceservicejson = inferenceservice_template.substitute({'modelpvc': modelvolop.outputs["name"],
'name': name,
'namespace': namespace})
inferenceservice = json.loads(inferenceservicejson)
inference = dsl.ResourceOp(
name="inference",
k8s_resource=inferenceservice,
success_condition='status.url').after(train)
dsl.get_pipeline_conf().add_op_transformer(add_istio_annotation)
if __name__ == '__main__':
from kfp_tekton.compiler import TektonCompiler
TektonCompiler().compile(mnist_pipeline, __file__.replace('.py', '.yaml'))
``` |
{
"source": "jppgks/materials-synthesis-generative-models",
"score": 2
} |
#### File: materials-synthesis-generative-models/models/material_generator.py
```python
import numpy as np
from keras import backend as K
from keras import metrics
from keras.layers import (GRU, Conv1D, Dense, Embedding, Flatten, Input,
Lambda, RepeatVector, TimeDistributed)
from keras.layers.merge import Concatenate, Subtract
from keras.models import Model
from keras.optimizers import Adam
class MaterialGenerator(object):
def __init__(self):
self.vae = None
self.encoder = None
self.decoder = None
def build_nn_model(self, element_dim=103,
conv_window=3, conv_filters=64,
rnn_dim=64, recipe_latent_dim=8,
intermediate_dim=64, latent_dim=8,
max_material_length=10, charset_size=50,):
self.latent_dim = latent_dim
self.recipe_latent_dim = recipe_latent_dim
self.original_dim = max_material_length * charset_size
x_mat = Input(shape=(max_material_length, charset_size), name="material_in")
conv_x1 = Conv1D(conv_filters, conv_window, padding="valid", activation="relu", name='conv_enc_1')(x_mat)
conv_x2 = Conv1D(conv_filters, conv_window, padding="valid", activation="relu", name='conv_enc_2')(conv_x1)
conv_x3 = Conv1D(conv_filters, conv_window, padding="valid", activation="relu", name='conv_enc_3')(conv_x2)
h_flatten = Flatten()(conv_x3)
h = Dense(intermediate_dim, activation="relu", name="hidden_enc")(h_flatten)
z_mean_func = Dense(latent_dim, name="means_enc")
z_log_var_func = Dense(latent_dim, name="vars_enc")
z_mean = z_mean_func(h)
z_log_var = z_log_var_func(h)
def sample(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(latent_dim,), mean=0.0, stddev=1.0)
return z_mean + K.exp(z_log_var / 2) * epsilon
z = Lambda(sample, name="lambda_sample")([z_mean, z_log_var])
c_element = Input(shape=(element_dim,), name="cond_element_in")
c_latent_recipe = Input(shape=(recipe_latent_dim,), name="cond_latent_recipe_in")
z_conditional = Concatenate(name="concat_cond")([z, c_latent_recipe, c_element])
decoder_h = Dense(intermediate_dim, activation="relu", name="hidden_dec")
decoder_h_repeat = RepeatVector(max_material_length, name="h_rep_dec")
decoder_h_gru_1 = GRU(rnn_dim, return_sequences=True, name="recurrent_dec_1")
decoder_h_gru_2 = GRU(rnn_dim, return_sequences=True, name="recurrent_dec_2")
decoder_h_gru_3 = GRU(rnn_dim, return_sequences=True, name="recurrent_dec_3")
decoder_mat = TimeDistributed(Dense(charset_size, activation='softmax'), name="means_material_dec")
h_decoded = decoder_h(z_conditional)
h_decode_repeat = decoder_h_repeat(h_decoded)
gru_h_decode_1 = decoder_h_gru_1(h_decode_repeat)
gru_h_decode_2 = decoder_h_gru_2(gru_h_decode_1)
gru_h_decode_3 = decoder_h_gru_3(gru_h_decode_2)
x_decoded_mat = decoder_mat(gru_h_decode_3)
def vae_xent_loss(x, x_decoded_mean):
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
rec_loss = self.original_dim * metrics.binary_crossentropy(x, x_decoded_mean)
kl_loss = -0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return rec_loss + kl_loss
encoder = Model(inputs=[x_mat], outputs=[z_mean])
decoder_x_input = Input(shape=(latent_dim,))
decoder_inputs = Concatenate(name="concat_cond_dec")([decoder_x_input, c_latent_recipe, c_element])
_h_decoded = decoder_h(decoder_inputs)
_h_decode_repeat = decoder_h_repeat(_h_decoded)
_gru_h_decode_1 = decoder_h_gru_1(_h_decode_repeat)
_gru_h_decode_2 = decoder_h_gru_2(_gru_h_decode_1)
_gru_h_decode_3 = decoder_h_gru_3(_gru_h_decode_2)
_x_decoded_mat = decoder_mat(_gru_h_decode_3)
decoder = Model(inputs=[decoder_x_input, c_latent_recipe, c_element],
outputs=[_x_decoded_mat])
vae = Model(inputs=[x_mat, c_latent_recipe, c_element],
outputs=[x_decoded_mat])
vae.compile(
optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=True),
loss=vae_xent_loss,
metrics=['categorical_accuracy']
)
self.vae = vae
self.encoder = encoder
self.decoder = decoder
def train(self, inputs, outputs, epochs=200, val_data=None, val_split=0.0, batch_size=16):
fitting_results = self.vae.fit(
x=inputs,
y=outputs,
epochs=epochs,
validation_data=val_data,
validation_split=val_split,
batch_size=batch_size
)
return fitting_results.history
def save_models(self, model_variant="default", save_path="bin"):
self.vae.save_weights(f"{save_path}/{model_variant}_mat_gen_vae.h5")
self.encoder.save_weights(f"{save_path}/{model_variant}_mat_gen_encoder.h5")
self.decoder.save_weights(f"{save_path}/{model_variant}_mat_gen_decoder.h5")
def load_models(self, model_variant="default", load_path="bin"):
self.vae.load_weights(f"{load_path}/{model_variant}_mat_gen_vae.h5")
self.encoder.load_weights(f"{load_path}/{model_variant}_mat_gen_encoder.h5")
self.decoder.load_weights(f"{load_path}/{model_variant}_mat_gen_decoder.h5")
``` |
{
"source": "jppgks/proc-gen",
"score": 2
} |
#### File: proc_gen/data/example_tokenizer.py
```python
from copy import deepcopy
__all__ = ["tokenize_example", "detokenize_example"]
from proc_gen.data.to_example import TranslationExample, SPECIAL_TOKENS, REQUIREMENT_SEP
def tokenize_example(
example: TranslationExample, tokenizer="moses"
) -> TranslationExample:
if tokenizer not in ("moses",):
raise NotImplementedError("Only moses tokenizer currently supported.")
example = deepcopy(example)
if tokenizer == "moses":
from sacremoses import MosesTokenizer
tokenizer = MosesTokenizer("en")
tokenizer_kwargs = {
"aggressive_dash_splits": True,
"return_str": True,
"escape": False,
"protected_patterns": SPECIAL_TOKENS, # Protect special tokens
}
tokenize = tokenizer.tokenize
example.src = tokenize(example.src, **tokenizer_kwargs)
example.tgt = tokenize(example.tgt, **tokenizer_kwargs)
return example
def detokenize_example(
example: TranslationExample, tokenizer="moses"
) -> TranslationExample:
if tokenizer != "moses":
raise NotImplementedError("Only moses tokenizer is currently supported.")
example = deepcopy(example)
if tokenizer == "moses":
from sacremoses import MosesDetokenizer
tokenizer = MosesDetokenizer("en")
example.src = tokenizer.detokenize(example.src.split())
example.tgt = tokenizer.detokenize(example.tgt.split())
return example
```
#### File: proc_gen/data/to_example.py
```python
from dataclasses import dataclass
from typing import Iterable, List, Union
from proc_gen.data.schema import Procedure, Requirement, Method
from proc_gen.problems import Problem
__all__ = [
"procedure_to_example",
"TranslationExample",
"SPECIAL_TOKENS",
"REQUIREMENT_SEP",
"string_to_requirements",
"string_to_tasks",
"example_to_procedure",
]
TARGET_PRODUCT_SEP = "<tps>"
REQUIREMENT_SEP = "<eor>"
TASK_SEP = "<eot>"
REQUIREMENTS_TASKS_SEP = REQUIREMENTS_TP_SEP = "<rts>"
SPECIAL_TOKENS = [
REQUIREMENT_SEP,
TASK_SEP,
TARGET_PRODUCT_SEP,
REQUIREMENTS_TASKS_SEP,
REQUIREMENTS_TP_SEP,
]
@dataclass
class TranslationExample:
src: str
tgt: str
def tasks_to_string(tasks: List[str], tp: str = None) -> str:
if tp:
return tp + f" {TARGET_PRODUCT_SEP} " + tasks_to_string(tasks)
return f" {TASK_SEP} ".join(tasks)
def string_to_tasks(tasks_string: str, parse_tp=False) -> List[str]:
if parse_tp:
tp, tasks_string = tasks_string.split(f" {TARGET_PRODUCT_SEP} ")
return [tp] + string_to_tasks(tasks_string)
else:
return tasks_string.split(f" {TASK_SEP} ")
def requirements_to_string(
requirements: Iterable[Requirement], tp: str = None, tp_last=False
) -> str:
if tp:
if tp_last:
return requirements_to_string(requirements) + f" {TARGET_PRODUCT_SEP} " + tp
else:
return tp + f" {TARGET_PRODUCT_SEP} " + requirements_to_string(requirements)
req_str = f" {REQUIREMENT_SEP} ".join((req.to_string() for req in requirements))
return req_str
def string_to_requirements(
requirements_string: str, parse_tp=False
) -> List[Union[str, Requirement]]:
if parse_tp:
tp, requirements_string = requirements_string.split(f" {TARGET_PRODUCT_SEP} ")
return [tp] + string_to_requirements(requirements_string)
else:
req_strings = requirements_string.split(f" {REQUIREMENT_SEP} ")
return [Requirement.from_string(req_str) for req_str in req_strings]
def procedure_to_example(proc: Procedure, problem: str) -> TranslationExample:
assert len(proc.methods) > 0, (
f"Procedure {proc} didn't contain any methods. "
f"Cannot convert to translation example."
)
method = proc.methods[0]
if problem is Problem.Requirements_TO_TargetProductAndTasks:
src_entry = requirements_to_string(method.requirements)
tgt_entry = tasks_to_string(method.tasks, tp=proc.target_product)
elif problem is Problem.RequirementsAndTargetProductAndTasks:
src_entry = (
requirements_to_string(method.requirements)
+ f" {REQUIREMENTS_TP_SEP} "
+ tasks_to_string(method.tasks, tp=proc.target_product)
)
# language modeling task has no target language
tgt_entry = ""
elif problem is Problem.TargetProductAndRequirements_TO_Tasks:
tp_last = True
src_entry = requirements_to_string(
method.requirements, tp=proc.target_product, tp_last=tp_last
)
tgt_entry = tasks_to_string(method.tasks)
elif problem is Problem.TargetProductAndRequirementsAndTasks:
src_entry = (
requirements_to_string(method.requirements, tp=proc.target_product)
+ f" {REQUIREMENTS_TASKS_SEP} "
+ tasks_to_string(method.tasks)
)
# language modeling task has no target language
tgt_entry = ""
elif problem is Problem.Requirements_TO_TargetProduct:
src_entry = requirements_to_string(method.requirements)
tgt_entry = proc.target_product
elif problem is Problem.TargetProduct_TO_Requirements:
src_entry = proc.target_product
tgt_entry = requirements_to_string(method.requirements)
elif problem is Problem.Tasks_TO_TargetProduct:
src_entry = tasks_to_string(method.tasks)
tgt_entry = proc.target_product
else:
raise NotImplementedError(
f"Unable to parse translation example. No parsing implementation for problem {problem}."
)
return TranslationExample(src=src_entry, tgt=tgt_entry)
def example_to_procedure(example: TranslationExample, problem: str) -> Procedure:
method = Method(requirements=[], tasks=[])
proc = Procedure(target_product="", methods=[method])
if problem is Problem.Requirements_TO_TargetProductAndTasks:
method.requirements = string_to_requirements(example.src)
tp_and_tasks = string_to_tasks(example.tgt, parse_tp=True)
proc.target_product, method.tasks = tp_and_tasks[0], tp_and_tasks[1:]
elif problem is Problem.TargetProductAndRequirements_TO_Tasks:
tp_and_reqs = string_to_requirements(example.src, parse_tp=True)
proc.target_product, method.requirements = tp_and_reqs[0], tp_and_reqs[1:]
method.tasks = string_to_tasks(example.tgt)
elif problem is Problem.Requirements_TO_TargetProduct:
method.requirements = string_to_requirements(example.src)
proc.target_product = example.tgt
elif problem is Problem.TargetProduct_TO_Requirements:
proc.target_product = example.src
method.requirements = string_to_requirements(example.tgt)
elif problem is Problem.Tasks_TO_TargetProduct:
method.tasks = string_to_tasks(example.src)
proc.target_product = example.tgt
else:
raise NotImplementedError(
f"Unable to parse procedure "
f"from translation example {example}. No parsing implementation for problem {problem} "
)
assert proc.target_product
assert proc.methods[0].requirements and proc.methods[0].tasks
return proc
``` |
{
"source": "jpphooper/ds-art",
"score": 2
} |
#### File: src/route/object.py
```python
import streamlit as st
def app():
st.title('Data Art - Create an Object')
```
#### File: src/utils/sound.py
```python
import librosa
from librosa.display import waveshow, specshow
import streamlit as st
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
from voxelfuse.voxel_model import VoxelModel
from voxelfuse.mesh import Mesh
from voxelfuse.primitives import generateMaterials
class ExtractMusicFeatures:
def __init__(self,
filepath,
duration,
offset,
sampling_rate,
hop_length,
n_mfcc):
self.filepath = filepath
self.duration = duration
self.offset = offset
self.sampling_rate = sampling_rate
self.hop_length = hop_length
self.n_mfcc = n_mfcc
y, sr = librosa.load(
self.filepath, sr=self.sampling_rate, duration=self.duration, offset=self.offset)
harm_perc_dict = self._extract_harmonic_percussive(y)
tempo_beat_frame_dict = self._extract_beats(y, sr)
mfcc_dict = self._extract_mfcc(y)
beat_mfcc_delta_dict = self._extract_beat_mfcc_delta(
mfcc_dict['mfcc'], tempo_beat_frame_dict['beat_frames'])
chromagram_dict = self._extract_chromagram(harm_perc_dict['y_harm'])
beat_chroma_dict = self._extract_beat_chroma(chromagram_dict['chromagram'],
tempo_beat_frame_dict['beat_frames'])
music_features = {'y': y,
'sr': sr,
**self._extract_beats(y, sr),
**harm_perc_dict,
**mfcc_dict,
**beat_mfcc_delta_dict,
**chromagram_dict,
**beat_chroma_dict,
**self._extract_beat_features(beat_chroma_dict['beat_chroma'],
beat_mfcc_delta_dict['beat_mfcc_delta'])
}
self.music_features = music_features
def _extract_beats(self, y, sr) -> dict:
tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr)
return {'tempo': tempo, 'beat_frames': beat_frames}
def _extract_harmonic_percussive(self, y) -> dict:
y_harm, y_perc = librosa.effects.hpss(y)
return {'y_harm': y_harm, 'y_perc': y_perc}
def _extract_mfcc(self, y):
mfcc = librosa.feature.mfcc(
y=y,
sr=self.sampling_rate,
hop_length=self.hop_length,
n_mfcc=self.n_mfcc)
return {'mfcc': mfcc}
def _extract_beat_mfcc_delta(self, mfcc, beat_frames) -> dict:
beat_mfcc_delta = librosa.util.sync(
np.vstack([mfcc, librosa.feature.delta(mfcc)]), beat_frames)
return {'beat_mfcc_delta': beat_mfcc_delta}
def _extract_chromagram(self, y_harm) -> dict:
chromagram = librosa.feature.chroma_cqt(
y=y_harm, sr=self.sampling_rate)
return {'chromagram': chromagram}
def _extract_beat_chroma(self, chromagram, beat_frames) -> dict:
beat_chroma = librosa.util.sync(
chromagram, beat_frames, aggregate=np.median)
return {'beat_chroma': beat_chroma}
def _extract_beat_features(self, beat_chroma, beat_mfcc_delta):
beat_features = np.vstack([beat_chroma, beat_mfcc_delta])
return {'beat_features': beat_features}
def visualise_waveshow(self, waveshow_list):
fig, ax = plt.subplots(nrows=1, figsize=(30, 7))
if 'Mono' in waveshow_list:
waveshow(
self.music_features['y'], sr=self.music_features['sr'], alpha=0.5, ax=ax, color='b', label='Mono')
if 'Percussive' in waveshow_list:
waveshow(
self.music_features['y_perc'], sr=self.music_features['sr'], alpha=0.5, ax=ax, color='r', label='Percussive')
if 'Harmonic' in waveshow_list:
waveshow(
self.music_features['y_harm'], sr=self.music_features['sr'], alpha=0.5, ax=ax, color='g', label='Harmonic')
ax.set(title='Wave Show')
ax.label_outer()
ax.legend()
return fig
def visualise_specshow(self, spec_option):
fig, ax = plt.subplots(nrows=1, figsize=(30, 7))
if spec_option == 'Chromagram':
specshow(self.music_features['chromagram'], sr=self.music_features['sr'],
hop_length=self.hop_length, cmap='YlOrBr')
ax.set(title=f'Spec Show - {spec_option}')
ax.label_outer()
ax.legend()
elif spec_option == 'MFCC':
specshow(self.music_features['mfcc'], sr=self.music_features['sr'],
hop_length=self.hop_length, cmap='YlOrBr')
ax.set(title=f'Spec Show - {spec_option}')
ax.label_outer()
ax.legend()
elif spec_option == 'Beat MFCC Delta':
specshow(self.music_features['beat_mfcc_delta'], sr=self.music_features['sr'],
hop_length=self.hop_length, cmap='YlOrBr')
ax.set(title=f'Spec Show - {spec_option}')
ax.label_outer()
ax.legend()
elif spec_option == 'Beat Chroma':
specshow(self.music_features['beat_chroma'], sr=self.music_features['sr'],
hop_length=self.hop_length, cmap='YlOrBr')
ax.set(title=f'Spec Show - {spec_option}')
ax.label_outer()
ax.legend()
elif spec_option == 'Beat Features':
specshow(self.music_features['beat_features'], sr=self.music_features['sr'],
hop_length=self.hop_length, cmap='YlOrBr')
ax.set(title=f'Spec Show - {spec_option}')
ax.label_outer()
ax.legend()
return fig
def visualise_tile(self, final_tile_option, size_of_tile):
fig, ax = plt.subplots(nrows=2, figsize=(30, 7))
music_feature_options = {
'Harmonic': self.music_features['y_harm'],
'Percussive': self.music_features['y_perc'],
'Mono': self.music_features['y'],
'Chromagram': self.music_features['chromagram']
}
first_arr = music_feature_options[final_tile_option[0]]
if 'Chromogram' not in final_tile_option:
if len(final_tile_option) == 2:
second_arr = music_feature_options[final_tile_option[1]]
first_matrix, second_matrix = [], []
for _ in range(len(first_arr[:size_of_tile])):
first_matrix.append(first_arr[:size_of_tile])
if len(final_tile_option) == 2:
second_matrix.append(second_arr[:size_of_tile])
tile = np.array(first_matrix)
if len(final_tile_option) == 2:
second_tile = np.array(second_matrix)
tile = np.multiply(100 * tile, 200 * np.transpose(second_tile))
elif 'Chromagram' in final_tile_option:
first_arr = music_feature_options['Chromagram'][0]
final_tile_option.remove('Chromagram')
first_matrix = []
for arr in first_arr:
loop = True
row = []
while loop:
row.extend(arr)
if len(row) > size_of_tile:
first_matrix.append(row[:size_of_tile])
loop = False
loop = True
for row in first_matrix:
while loop:
first_matrix.append(row)
if len(first_matrix) > size_of_tile:
first_matrix = first_matrix[:size_of_tile]
loop = False
tile = first_matrix
if len(final_tile_option) == 1:
second_arr = music_feature_options[final_tile_option[0]]
second_matrix = []
for _ in range(len(second_arr[:size_of_tile])):
second_matrix.append(second_arr[:size_of_tile])
second_tile = np.array(second_matrix)
tile = np.add(tile, 0.5 * np.transpose(second_tile))
# Set up a figure twice as tall as it is wide
fig = plt.figure(figsize=plt.figaspect(2.))
# First subplot
ax = fig.add_subplot(2, 1, 1)
ax.set(title='Tile 2D')
ax.imshow(tile, interpolation='bilinear',
norm=colors.Normalize(), cmap='YlOrBr')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Second subplot
ax = fig.add_subplot(2, 1, 2, projection='3d')
ax.set(title='Tile 3D')
x = np.arange(0, size_of_tile, 1)
y = np.arange(0, size_of_tile, 1)
tile = tile - tile.min()
xs, ys = np.meshgrid(x, y)
ax.plot_surface(xs, ys, tile)
return fig, tile
def create_3d_tile():
sponge = [
[
[1, 1, 1],
[1, 0, 1],
[1, 1, 1]
],
[
[1, 0, 1],
[0, 0, 0],
[1, 0, 1]
],
[
[1, 1, 1],
[1, 0, 1],
[1, 1, 1]
]
]
model = VoxelModel(sponge, generateMaterials(4)) # 4 is aluminium.
mesh = Mesh.fromVoxelModel(model)
return mesh
``` |
{
"source": "jp-pino/CarND-Advanced-Lane-Lines",
"score": 3
} |
#### File: src/camera/calibration.py
```python
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
# Camera calibration class
# - Find chessboard corners
# - Correct camera distortion
# - Apply to images
class Calibration:
def __init__(self, route = 'camera_cal/calibration*.jpg', preview = False):
# Prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob(route)
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
if preview:
cv2.imshow('img',img)
cv2.waitKey(500)
# Close preview window
if preview:
cv2.destroyAllWindows()
# Get camera distortion correction parameters
retval, self.cameraMatrix, self.distCoeffs, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[1::-1], None, None)
def undistort(self, img):
return cv2.undistort(img, self.cameraMatrix, self.distCoeffs)
if __name__ == "__main__":
cal = Calibration()
fig = plt.figure(figsize=(10, 5))
fig.add_subplot(1, 2, 1)
plt.imshow(plt.imread('test_images/test2.jpg'))
plt.title("Distorted")
fig.add_subplot(1, 2, 2)
plt.imshow(cal.undistort(plt.imread('test_images/test2.jpg')))
plt.title("Undistorted")
plt.show(block = True)
```
#### File: src/lane/curvature.py
```python
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
# Lane curvature class
# - Calculates real world curvature from an image's lane equations
class Curvature():
def __init__(self, ym_per_pix = 30/720, xm_per_pix = 3.7/700):
self.ym_per_pix = ym_per_pix
self.xm_per_pix = xm_per_pix
def calculate(self, y, poly):
y = y * self.ym_per_pix
coeff0 = np.asarray(poly)[0] * (self.xm_per_pix / (self.ym_per_pix ** 2))
coeff1 = np.asarray(poly)[1] * (self.xm_per_pix / self.ym_per_pix)
return ((1 + (2 * coeff0 * y + coeff1) ** 2) ** (3/2)) / abs(2 * coeff0)
def x_pos(self, y, poly):
y = y * self.ym_per_pix
coeff0 = np.asarray(poly)[0] * (self.xm_per_pix / (self.ym_per_pix ** 2))
coeff1 = np.asarray(poly)[1] * (self.xm_per_pix / self.ym_per_pix)
coeff2 = np.asarray(poly)[2] * self.xm_per_pix
return coeff0 * (y ** 2) + coeff1 * y + coeff2
``` |
{
"source": "jpplatzer/Learning",
"score": 4
} |
#### File: Learning/towers/towers.py
```python
def move(srcStack, dstStack):
print('Before move:', srcStack, ',', dstStack)
top = srcStack.pop(0)
assert len(dstStack) == 0 or dstStack[0] > top
dstStack.insert(0, top)
print('After move:', srcStack, ',', dstStack)
def move_stack(stacks, size, src, dst, oth):
if size == 1:
move(stacks[src], stacks[dst])
else:
move_stack(stacks, size-1, src, oth, dst)
move(stacks[src], stacks[dst])
move_stack(stacks, size-1, oth, dst, src)
def show_stacks(stacks):
for num, stack in enumerate(stacks):
print('Stack', num, ':', stack)
def init(stacks, num, src):
for i in range(3):
stacks.append([])
stack = stacks[src]
for i in range(num):
stack.append(i+1)
stacks = []
src = 0
num = 5
init(stacks, num, src)
show_stacks(stacks)
move_stack(stacks, num, src, 2, 1)
show_stacks(stacks)
```
#### File: Learning/triangle/triangle.py
```python
import sys
def maxTrianglePath(filename):
# maxColSum is the running max sum for each column
# It will have num columns + 1 entries to support n + 1 comparisons
maxColSum = [0,0]
# File automatically closes when deleted
f = open(filename, 'r')
for rowNum, line in enumerate(f):
# Convert string of space separated numbers into a list of ints
row = list(map(int, line.split()))
assert(len(row) == rowNum+1)
prevColMax = 0
for colNum in range(0, rowNum+1):
currColMax = maxColSum[colNum]
maxColSum[colNum] = row[colNum] + max(prevColMax, currColMax, maxColSum[colNum+1])
prevColMax = currColMax
maxColSum.append(0)
return max(maxColSum)
if len(sys.argv) >= 2:
filename = sys.argv[1]
print('Maximum total for', filename, 'is', maxTrianglePath(filename))
else:
print('Must specify the triangle file path as the first command line parameter')
``` |
{
"source": "JPPorcel/restaurantes",
"score": 2
} |
#### File: restaurantes/restaurantes/models.py
```python
from mongoengine import *
from sitio_web.settings import DBNAME
connect(DBNAME)
class restaurants(Document):
name = StringField(required=True, max_length=80)
restaurant_id = StringField()
cuisine = StringField()
borough = StringField()
city = StringField()
address = StringField()
image = ImageField(size=(600, 400, True))
@property
def gridfile_attr(self):
return str(self.image.grid_id)
```
#### File: restaurantes/restaurantes/views.py
```python
from django.shortcuts import render, HttpResponse, redirect, Http404
from restaurantes.models import restaurants
from restaurantes.forms import RestaurantForm
import requests
from random import randint
from mongoengine.connection import get_db
from gridfs import GridFS, NoFile
from bson.objectid import ObjectId
from django.contrib.auth.decorators import login_required
import json
#import urllib as ur
import urllib.request as ur # for python 2.7
from django.http import JsonResponse
from io import BytesIO
from PIL import Image
from .serializers import *
from rest_framework_mongoengine import viewsets
from rest_framework.decorators import detail_route
from rest_framework import renderers
from rest_framework import permissions
API_KEY = <KEY>"
# Create your views here.
def index(request):
context = {} # Aqui van la las variables para la plantilla
return render(request,'index.html', context)
def listar(request):
context = {
"restaurantes": restaurants.objects[:20], # los veinte primeros
"menu": "listar",
}
return render (request, 'listar.html', context)
def serve_file(request, file_id):
db = get_db()
fs = GridFS(db)
try:
f = fs.get(ObjectId(file_id))
except NoFile:
fs = GridFS(db, collection='images') # mongoengine stores images in a separate collection by default
try:
f = fs.get(ObjectId(file_id))
except NoFile:
raise Http404
response = HttpResponse(f.read(), content_type=f.content_type)
# add other header data like etags etc. here
return response
def restaurante(request, id):
r = restaurants.objects(restaurant_id=str(id)).first()
context = {
"restaurante": r,
} # Aqui van la las variables para la plantilla
return render (request, 'restaurante.html', context)
def parseURL(addr):
addr = addr.encode('utf-8')
addr = addr.replace("á","a")
addr = addr.replace("é","e")
addr = addr.replace("í","i")
addr = addr.replace("ó","o")
addr = addr.replace("ú","u")
addr = addr.replace("ñ","n")
addr = addr.replace(" ","_")
return addr
@login_required(login_url='/accounts/login/')
def getPhoto(request, address):
url = "https://maps.googleapis.com/maps/api/streetview?size=800x600&location="+address+"&key="+API_KEY
return HttpResponse(ur.urlopen(url).read(), content_type="image/jpeg")
def getCity(request, city):
url = "https://maps.googleapis.com/maps/api/place/autocomplete/json?input="+city+"&types=(cities)&language=es&key="+API_KEY
response = ur.urlopen(url)
data = json.loads(response.read())
cities=[]
for c in data["predictions"]:
cities.append(c["description"])
return JsonResponse(cities, safe=False)
def getAddress(request, name):
url = "http://maps.googleapis.com/maps/api/geocode/json?address="+name+"&language=es"
response = ur.urlopen(url)
data = json.loads(response.read())
print(data)
address=[]
for c in data["results"]:
address.append(c["formatted_address"])
if len(address) == 0:
address.append("error")
return JsonResponse(address[0], safe=False)
@login_required(login_url='/accounts/login/')
def add(request):
if request.method == 'GET':
formulario = RestaurantForm()
# GET
context = {
'formulario': formulario,
"menu": "add",
} # Aqui van la las variables para la plantilla
return render(request, 'add.html', context)
if request.method == 'POST':
formulario = RestaurantForm(request.POST, request.FILES)
formulario.is_valid()
name = formulario.cleaned_data.get('name')
cuisine = formulario.cleaned_data.get('cuisine')
borough = formulario.cleaned_data.get('borough')
city = formulario.cleaned_data.get('city')
address = formulario.cleaned_data.get('address')
image_default = formulario.cleaned_data.get('image_default')
if(image_default == "yes"):
URL = "https://maps.googleapis.com/maps/api/streetview?size=800x600&location="+address+"&key="+API_KEY
URL = parseURL(URL)
image = BytesIO(ur.urlopen(URL).read())
else:
image = request.FILES.get('image_file')
# crear id aleatorio, un numero de 8 cifras y que no se encuentre ya en la base de datos
restaurant_id = randint(10000000,99999999)
while(restaurants.objects(restaurant_id=str(restaurant_id)).first() != None):
restaurant_id = randint(10000000,99999999)
if(name != "" and cuisine != "" and borough != "" and city != "" and address != ""):
r = restaurants(name=name, restaurant_id=str(restaurant_id), cuisine=cuisine, borough=borough, city=city, address=address, image=image)
r.save()
return redirect('restaurante', id=restaurant_id)
def search(request):
context = {
"restaurantes": restaurants.objects(cuisine__icontains=request.GET.get('cocina')),
}
return render (request, 'listar.html', context)
class restaurantsViewSet(viewsets.ModelViewSet):
lookup_field = 'restaurant_id'
queryset = restaurants.objects.all()
serializer_class = restaurantsSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
@detail_route(renderer_classes=[renderers.StaticHTMLRenderer])
def highlight(self, request, *args, **kwargs):
m = self.get_object()
return Response(m)
def pre_save(self, obj):
obj.owner = self.request.user
class restaurantListViewSet(viewsets.ModelViewSet):
lookup_field = "restaurant_id"
queryset = restaurants.objects.all()
serializer_class = restaurantListSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
``` |
{
"source": "jpposadas/sngrams",
"score": 3
} |
#### File: jpposadas/sngrams/parser.py
```python
import warnings
#Detected warnings caused by the update of numpy v1.15
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import sys
import codecs
import spacy
import textacy
import en_core_web_sm
import es_core_news_md
def normalization(text):
prep_1=textacy.preprocess.fix_bad_unicode(text, normalization='NFC')
prep_1=textacy.preprocess.normalize_whitespace(prep_1)
prep_1=textacy.preprocess.preprocess_text(prep_1, no_contractions=True, no_accents=True)
return prep_1
def sentence_segmenter(text,language):
aux=[]#variable that contains the sentences
if language=="en":
nlp = en_core_web_sm.load()
else:
nlp = es_core_news_md.load()
doc = nlp(text)
for sent in doc.sents:
aux.append(sent.text)
#print sent.text
return aux,nlp
def dependency_parser(aux,input_file,nlp):
output_file = input_file
output_file = output_file.replace(".txt",".parse")
#id=1
try:
#**Read the input file and identify the format (includes POS tags or not)
outf=codecs.open(output_file,'w',encoding='utf-8',errors='ignore')
except IOError as e:
print input_file + "I/O error({0}): {1}".format(e.errno, e.strerror)
exit(1)
for sent in aux:
#print "Processing sentence "+str(id)
#id+=1
ln=""
sent = textacy.preprocess.remove_punct(sent)
sent = sent.replace("\n"," ")
sent = sent.lower()
sent = textacy.preprocess.normalize_whitespace(sent)
if len(sent)>1:
doc = nlp(sent)
for token in doc:
if token.lemma_=="-PRON-":
ln+=token.text+"/"+token.text+"/"+token.pos_+" "
else:
ln+=token.text+"/"+token.lemma_+"/"+token.pos_+" "
ln=ln.rstrip()
ln+="\n\n"
xx=""
for token in doc:
if(token.dep_.lower()=="root"):
xx+=token.dep_.lower()+"(ROOT-0, "+token.head.text+"-"+str(token.head.i+1)+")\n"
else:
xx+=token.dep_.lower()+"("+token.head.text+"-"+str(token.head.i+1)+", "+token.text+"-"+str(token.i+1)+")\n"
#xx = textacy.preprocess.normalize_whitespace(aux)
xx+="\n"
outf.write(ln+xx)
outf.flush()
outf.close()
############### MAIN ################################
if __name__ == '__main__':
#How to use:
#python parser.py input [en,es]
encod = 'utf-8' #'utf-8' or other encoding like '1252'
#print sys.argv
if len(sys.argv) != 3:
print "Usage with ecxactly two parameters:"
print "python parser.py input [en,es]"
exit(1)
input_file = sys.argv[1]
language = sys.argv[2]
text=""
try:
#**Read the input file and identify the format (includes POS tags or not)
f1 = codecs.open (input_file, "rU", encoding = encod, errors='ignore')
text="".join(f1.readlines())
f1.close()
except IOError as e:
print input_file + "I/O error({0}): {1}".format(e.errno, e.strerror)
exit(1)
text = normalization(text)
aux,nlp = sentence_segmenter(text,language)
dependency_parser(aux,input_file,nlp)
print "Done."
``` |
{
"source": "jppzanetti/rank-distance",
"score": 4
} |
#### File: rank-distance/count/count_intermediates.py
```python
from math import factorial, ceil, floor
import sys
import argparse
def cycle_intermediates(k):
"""Number of intermediates for a k-cycle."""
x = k / 2
return int(factorial(k) / (factorial(x) * factorial(x) * (x + 1)))
def path_intermediates(k):
"""Number of intermediates for a k-path."""
v = k + 1
return int(factorial(v) / factorial(floor(v / 2)) / factorial(ceil(v / 2)))
def count_intermediates(cycles = [], paths = []):
"""Computes the number of intermediates of a genome."""
total = 1
for p in paths:
total *= path_intermediates(p)
for c in cycles:
total *= cycle_intermediates(c)
return total
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Counts number of intermediates.')
parser.add_argument('-c', nargs='*', type=int, default=[],
help='list of lengths of cycles')
parser.add_argument('-p', nargs='*', type=int, default=[],
help='list of lengths of paths')
if len(sys.argv) == 1:
args = parser.parse_args('-h'.split())
else:
args = parser.parse_args()
k_cycles = []
for k in args.c:
if k < 0:
print("ERROR: Negative cycle size.")
exit(-1)
if k == 0:
print("ERROR: Null cycle size.")
exit(-1)
if k % 2 == 1:
print("ERROR: Odd cycle size.")
exit(-1)
if k > 2:
k_cycles.append(k)
k_paths = []
for k in args.p:
if k < 0:
print("ERROR: Negative path length.")
exit(-1)
if k > 0:
k_paths.append(k)
print(count_intermediates(cycles = k_cycles, paths = k_paths))
``` |
{
"source": "jp-quant/qfengine",
"score": 3
} |
#### File: alpha/expected_returns/daily_close_ER.py
```python
from qfengine.alpha.expected_returns.expected_returns_weights import ExpectedReturnsAlpha
import numpy as np
import pandas as pd
class DailyCloseExpectedReturnsAlpha(ExpectedReturnsAlpha):
"""
AlphaModel that returns weights of all assets in given universe, base on their
daily close returns calculated from historical price data grabbed from data_handler given.
Parameters
----------
signal_weights : `dict{str: float}`
The signal weights per asset symbol.
universe : `Universe`, optional
The Assets to make signal forecasts for.
data_handler : `DataHandler`, optional
An optional DataHandler used to preserve interface across AlphaModels.
"""
def __init__(
self,
universe,
data_handler,
logarithmic_returns:bool = True,
ret_filter_op = None,
**kwargs
):
super().__init__(universe = universe, data_handler = data_handler)
self.logarithmic_returns = logarithmic_returns
self.ret_filter_op = ret_filter_op
def _closes_to_returns_df(self, closes_df:pd.DataFrame)->pd.DataFrame:
return (
np.log(closes_df/closes_df.shift(1)).dropna()
if self.logarithmic_returns else
closes_df.pct_change().dropna()
)
def _get_universe_historical_daily_close_df(self, dt, **kwargs)->pd.DataFrame:
return self.data_handler.get_assets_historical_closes(
self.universe.get_assets(dt),
end_dt = dt)
def _filter_returns_df(self, returns_df:pd.DataFrame)->pd.DataFrame:
if self.ret_filter_op:
return self.ret_filter_op(returns_df)
else:
return returns_df
def get_returns_df(self, dt, **kwargs):
return self._filter_returns_df(
self._closes_to_returns_df(
self._get_universe_historical_daily_close_df(dt, **kwargs),
**kwargs
)
)
# TODO: Redesign architecture for forecasting implementations
def calculate_assets_expected_returns(self, dt, **kwargs):
ret_df = self.get_returns_df(dt, **kwargs)
return dict(ret_df.mean())
```
#### File: qfengine/data/backtest_data_handler.py
```python
import pandas as pd
import numpy as np
import logging
from qfengine.data.price.backtest_price_handler import BacktestPriceHandler
from qfengine import settings
logger = logging.getLogger(__name__)
class BacktestDataHandler(BacktestPriceHandler):
def __init__(
self,
price_data_sources:list,
universe = None,
**kwargs
):
super().__init__(universe = universe,
price_data_sources = price_data_sources,
**kwargs
)
def copy(self):
handler = BacktestPriceHandler(
universe = self.universe,
price_data_sources = [d.create_price_source_copy() for d in self.price_data_sources],
preload_bid_ask_data = False,
)
handler._assets_bid_ask_frames = self._assets_bid_ask_frames.copy()
# TODO: Add more renewals once more types of data sources are available
return handler
```
#### File: qfengine/data/data.py
```python
from abc import ABCMeta, abstractmethod
import pandas as pd
import numpy as np
import logging
logger = logging.getLogger(__name__)
class TimeSeriesData(object):
__metaclass__ = ABCMeta
def __init__(self,
dt:pd.Timestamp,
):
self.dt = dt
```
#### File: data/price/price_handler.py
```python
from qfengine.data.price.price_source import PriceDataSource
from qfengine.data.data_handler import DataHandler
from qfengine.asset.universe.universe import Universe
from abc import ABCMeta, abstractmethod
import pandas as pd
class PriceHandler(DataHandler):
__metaclass__ = ABCMeta
def __init__(self,
price_data_sources,
universe = None,
**kwargs
):
try:
iter(price_data_sources)
except TypeError:
price_data_sources = [price_data_sources]
assert (PriceDataSource in d.__class__.mro() for d in price_data_sources)
if universe:
assert Universe in universe.__class__.mro()
self.universe = universe
self.price_data_sources = list(price_data_sources)
# TODO: Add preference source or ordered of calls until fit cond.
@abstractmethod
def assetsDF(self,*kwargs):
raise NotImplementedError("Implement assetsDF()")
@abstractmethod
def assetsList(self, **kwargs):
raise NotImplementedError("Implement assetsList()")
#!---| Bid & Ask Functions |---!#
@abstractmethod
def get_asset_latest_bid_price(self, dt, asset_symbol)->float:
raise NotImplementedError("Implement get_asset_latest_bid_price()")
@abstractmethod
def get_asset_latest_ask_price(self, dt, asset_symbol)->float:
raise NotImplementedError("Implement get_asset_latest_ask price()")
@abstractmethod
def get_asset_latest_bid_ask_price(self, dt, asset_symbol)->tuple:
raise NotImplementedError("Implement get_asset_latest_bid_ask_price()")
@abstractmethod
def get_asset_latest_mid_price(self, dt, asset_symbol)->float:
raise NotImplementedError("Implement get_asset_latest_mid_price()")
#!---| Daily Price (OHLCV) Functions |---!#
@abstractmethod
def get_assets_historical_opens(self, start_dt, end_dt, asset_symbols, adjusted=False)->pd.DataFrame:
raise NotImplementedError("Implement get_assets_historical_prices()")
@abstractmethod
def get_assets_historical_closes(self, start_dt, end_dt, asset_symbols, adjusted=False)->pd.DataFrame:
raise NotImplementedError("Implement get_assets_historical_prices()")
@abstractmethod
def get_assets_historical_highs(self, start_dt, end_dt, asset_symbols, adjusted=False)->pd.DataFrame:
raise NotImplementedError("Implement get_assets_historical_prices()")
@abstractmethod
def get_assets_historical_lows(self, start_dt, end_dt, asset_symbols, adjusted=False)->pd.DataFrame:
raise NotImplementedError("Implement get_assets_historical_prices()")
@abstractmethod
def get_assets_historical_volumes(self, start_dt, end_dt, asset_symbols, adjusted=False)->pd.DataFrame:
raise NotImplementedError("Implement get_assets_historical_prices()")
```
#### File: vendor_api/Alpaca/alpaca.py
```python
import alpaca_trade_api as tradeapi
from alpaca_trade_api.rest import APIError
import os, os.path
import pandas as pd
import numpy as np
import datetime
from tqdm.auto import tqdm
from copy import deepcopy
import concurrent.futures
from qfengine import settings
#--------------| <-ALPACA API-> |
class Alpaca(object):
name = 'Alpaca'
website_url = 'https://alpaca.markets/'
api_endpoint_url = 'https://paper-api.alpaca.markets'
api_key_id = settings.API['Alpaca']['id']
api_key = settings.API['Alpaca']['key']
(os.environ["ALPACA_ID"],
os.environ["ALPACA_KEY"]) = api_key_id,api_key
def __init__(self):
#-----FIXED----#
self._path = os.path.dirname(os.path.realpath(__file__))
self._dbPath = {
"1D":os.path.join(self._path,"1D"),
"1Min":os.path.join(self._path,"1Min")
}
base_url = Alpaca.api_endpoint_url if Alpaca.api_key_id.startswith("PK") else 'https://api.alpaca.markets'
self._REST = tradeapi.REST(Alpaca.api_key_id,Alpaca.api_key,base_url)
self._StreamConn = tradeapi.StreamConn(Alpaca.api_key_id,Alpaca.api_key,base_url)
def get_barset(self,symbols,timeframe,start_date=None,end_date=None):
if start_date is not None:
start_ = str(pd.Timestamp(start_date).date())+"T00:00:00.000Z"
else:
start_ = (
"2001-01-01T00:00:00.000Z" if (timeframe == '1D')
else (
str(pd.Timestamp.now(tz='America/New_York').floor('1min').year)+"-01-01T00:00:00.000Z"
)
)
if end_date is not None:
try:
end_ = str(pd.Timestamp(end_date).date()) + "T00:00:00.000Z"
except:
end_ = None
else:
end_ = None
df = self._REST.get_barset(symbols,timeframe,start=start_,end=end_).df
df.index = pd.DatetimeIndex(df.index.date)
df.columns.names = ('symbols','columns')
return df
''' Archived Method
def _get_barset(symbol:str,
timeframe:str,
start_date=None,
end_date=None,
bars_ago:int=None
): # < live data output>--> DataFrame>
conn = tradeapi.REST(Alpaca.api_key_id,
Alpaca.api_key,
("https://paper-api.alpaca.markets" if Alpaca.api_key_id.startswith("PK") else None)
)
if start_date is not None:
start_ = str(pd.Timestamp(start_date).date())+"T00:00:00.000Z"
else:
start_ = (
"2001-01-01T00:00:00.000Z" if (timeframe == '1D')
else (
str(pd.Timestamp.now(tz='America/New_York').floor('1min').year)+"-01-01T00:00:00.000Z"
)
)
if end_date is not None:
try:
end_ = str(pd.Timestamp(end_date).date()) + "T00:00:00.000Z"
except:
end_ = None
else:
end_ = None
new_data = conn.get_barset(symbol,timeframe,start=start_)[symbol]
stamps = []
opens = []
closes = []
highs = []
lows = []
volumes = []
for bar in new_data:
stamps.append(str(datetime.datetime.strftime(bar.t,'%Y-%m-%d %H:%M:%S')))
opens.append(bar.o)
closes.append(bar.c)
highs.append(bar.h)
lows.append(bar.l)
volumes.append(bar.v)
stamps = np.array(stamps)
opens = np.array(opens,dtype=np.float64)
closes = np.array(closes,dtype=np.float64)
highs = np.array(highs,dtype=np.float64)
lows = np.array(lows,dtype=np.float64)
volumes = np.array(volumes,dtype=np.float64)
result = pd.DataFrame()
result['open'] = pd.Series(data = opens,index=stamps)
result['high'] = pd.Series(data=highs,index=stamps)
result['low'] = pd.Series(data=lows,index=stamps)
result['close'] = pd.Series(data=closes,index=stamps)
result['volume'] = pd.Series(data=volumes,index=stamps)
result.index = pd.DatetimeIndex(result.index)
if start_date is not None:
result = result[result.index >= pd.Timestamp(start_date)]
if end_date is not None:
result = result[result.index <= pd.Timestamp(end_)]
return result
if isinstance(symbols,str):
result = _get_barset(symbols,timeframe,start_date,end_date)
else: #---| parallelizing staticmethod calls and concat to return multiIndexed Dataframe
pool = concurrent.futures.ProcessPoolExecutor()
iterables = ((s, timeframe, start_date, end_date,None)
for s in symbols)
iterables = zip(*iterables)
barsets = pool.map(_get_barset, *iterables)
#_raw = list(barsets)
#_concat = pd.concat(_raw,axis=1)
_toConcat = []
multi_cols = [[],[]]
for s,df in zip(symbols,list(barsets)):
multi_cols[0] += [s for _ in range(df.shape[1])]
multi_cols[1] += list(df.columns)
_toConcat.append(df)
multi_cols = [np.array(c) for c in multi_cols]
_concat = pd.concat(_toConcat,axis=1)
result = pd.DataFrame(
data = _concat.values,
index = _concat.index,
columns = multi_cols
)
return result
'''
def get_account(self):
return self._REST.get_account()
def companyInfomation(self,symbol):
return self._REST.polygon.company(symbol)
def list_positions(self):
return self._REST.list_positions()
def list_orders(self,status=None):
return self._REST.list_orders() if status is None else [o for o in self._REST.list_orders() if o.status == status]
def get_position(self,symbol):
try:
return self._REST.get_position(symbol)
except Exception:
return None
def submit_order(self, symbol, qty, side, order_type, time_in_force,
limit_price=None, stop_price=None, client_order_id=None,
extended_hours=None):
try:
return self._REST.submit_order(symbol,qty,side,order_type,time_in_force)
except APIError as e:
return e
def cancel_order(self,order_id):
self._REST.cancel_order(order_id)
def cancel_all_orders(self):
self._REST.cancel_all_orders()
def ts(self,string=False):
ts = pd.Timestamp.now(tz='America/New_York').floor('1min')
return ts if not string else str(ts)
def marketOpen(self):
now = self.ts()
return ((now >= now.replace(hour=9, minute=30)) and (now <= now.replace(hour=15,minute=59)))
def last_daily_close(self,symbol):
return self.get_barset(symbol,"1D",5).iloc[-1].close
#------OTHERS
def get_account_configurations(self):
return self._REST.get_account_configurations()
def get_clock(self):
return self._REST.get_clock()
```
#### File: risk/covariance/covariance.py
```python
from qfengine.risk.risk_model import RiskModel
from abc import ABCMeta
import numpy as np
import pandas as pd
class CovarianceMatrixRiskModel(RiskModel):
__metaclass__ = ABCMeta
def __init__(self,
universe,
data_handler,
logarithmic_returns:bool = True,
ret_filter_op = None,
ret_std_op = None,
ret_corr_op = None,
**kwargs
):
self.universe = universe
self.data_handler = data_handler
self.logarithmic_returns = logarithmic_returns
self.ret_filter_op = ret_filter_op
self.ret_std_op = ret_std_op
self.ret_corr_op = ret_corr_op
#---| Computing Returns TimeSeries Data
def _closes_to_returns_df(self, closes_df:pd.DataFrame, **kwargs)->pd.DataFrame:
return (
np.log(closes_df/closes_df.shift(1)).dropna()
if self.logarithmic_returns else
closes_df.pct_change().dropna()
)
def _get_universe_historical_daily_close_df(self, dt, **kwargs)->pd.DataFrame:
return self.data_handler.get_assets_historical_closes(
self.universe.get_assets(dt),
end_dt = dt)
def _filter_returns_df(self, returns_df:pd.DataFrame, **kwargs)->pd.DataFrame:
if self.ret_filter_op:
return self.ret_filter_op(returns_df)
else:
return returns_df
def get_returns_df(self, dt, **kwargs):
return self._filter_returns_df(
self._closes_to_returns_df(
closes_df = self._get_universe_historical_daily_close_df(dt, **kwargs),
**kwargs
)
)
#---| Computing Covariance Matrix
def _returns_volatility(self, ret):
if self.ret_std_op is not None:
assert callable(self.ret_std_op)
std = self.ret_std_op(ret)
assert len(std) == ret.shape[1]
assert set(std.index).issubset(set(ret.columns))
return std
else:
return ret.std()
def _returns_correlation(self, ret):
if self.ret_corr_op is not None:
assert callable(self.ret_corr_op)
corr = self.ret_corr_op(ret)
assert corr.shape[0] == corr.shape[1] == ret.shape[1]
assert set(corr.index).issubset(set(ret.columns))
assert set(corr.columns).issubset(set(ret.columns))
return corr
else:
return ret.corr()
def _is_symmetric(self, matrix:pd.DataFrame, rtol=1e-05, atol=1e-08):
return matrix.shape[0] == matrix.shape[1]
# Covariance = VOL' * CORR * VOL
def _compute_covariance_matrix(self, std:pd.Series, corr:pd.DataFrame):
assert self._is_symmetric(corr)
assert set(std.index).issubset(set(corr.index))
assert set(corr.columns).issubset(set(corr.index))
vol = std.copy().reindex(corr.columns).dropna()
assert len(vol) == len(std), str([i for i in corr.columns if i not in vol.index])
vol = np.diag(vol)
return pd.DataFrame(
data = (np.dot(vol,np.dot(corr,vol))),
index = corr.index,
columns = corr.columns
)
def calculate_returns_covariance_matrix(self, ret):
std = self._returns_volatility(ret)
corr = self._returns_correlation(ret)
return self._compute_covariance_matrix(std = std, corr = corr)
#---| __call__()
def __call__(self, dt, **kwargs):
ret_df = self.get_returns_df(dt, **kwargs)
return self.calculate_returns_covariance_matrix(ret_df)
```
#### File: risk/covariance/tailing_RMT_covariance.py
```python
from qfengine.risk.covariance.covariance import CovarianceMatrixRiskModel
import numpy as np
import pandas as pd
class TailingRMTCovarianceRiskModel(CovarianceMatrixRiskModel):
'''
Random Matrix Theory (RMT) Implementation of Marchenko-Pastur Filtering
of noisy eigen values
'''
def __init__(self,
universe,
data_handler,
tailing_time_delta:str,
Q = None,
sigma = None,
**kwargs
):
try:
pd.Timedelta(tailing_time_delta)
except:
raise
def _ret_filter(ret_df, time_delta = tailing_time_delta):
return ret_df[
ret_df.index >= (
ret_df.index[-1] - pd.Timedelta(tailing_time_delta)
)
]
def RMTFilteredCorrelation(ret, Q = Q, sigma = sigma):
T,N = ret.shape
Q = Q if Q is not None else (T/N) #---| optimizable
sigma = sigma if sigma is not None else 1 #---| optimizable
#! Marchenko-Pastur Theoretical Range Equation
min_theoretical_eval, max_theoretical_eval = (
np.power(sigma*(1 - np.sqrt(1/Q)),2),
np.power(sigma*(1 + np.sqrt(1/Q)),2)
)
raw_corr = ret.corr()
eVals,eVecs = np.linalg.eigh(raw_corr.values)
# noise_eVals = eVals[eVals <= max_theoretical_eval]
# outlier_eVals = eVals[eVals > max_theoretical_eval]
#---| Filter eigen values by replacing those in theoretical range to 0 (noises)
filtered_eVals = [(0 if ((i >= min_theoretical_eval) and (i<= max_theoretical_eval)) else i) for i in eVals]
#-----| Part 2b: Construct Filtered Correlation Matrix from Filtered eVals
filtered_corr = np.dot(eVecs,np.dot(
np.diag(filtered_eVals),np.transpose(eVecs)
))
np.fill_diagonal(filtered_corr,1)
return pd.DataFrame(data=filtered_corr,index=raw_corr.index,columns=raw_corr.columns)
super().__init__(universe = universe,
data_handler = data_handler,
ret_filter_op = _ret_filter,
ret_corr_op= RMTFilteredCorrelation,
**kwargs
)
self.tailing_time_delta = tailing_time_delta
def __repr__(self):
return self.__class__.__name__ + "(%s)" %str(self.tailing_time_delta)
```
#### File: risk/covariance/tailing_sample_covariance.py
```python
from qfengine.risk.covariance.covariance import CovarianceMatrixRiskModel
import numpy as np
import pandas as pd
class TailingSampleCovarianceRiskModel(CovarianceMatrixRiskModel):
def __init__(self,
universe,
data_handler,
tailing_time_delta:str,
**kwargs
):
try:
pd.Timedelta(tailing_time_delta)
except:
raise
def _ret_filter(ret_df, time_delta = tailing_time_delta):
return ret_df[
ret_df.index >= (
ret_df.index[-1] - pd.Timedelta(tailing_time_delta)
)
]
super().__init__(universe = universe,
data_handler = data_handler,
ret_filter_op = _ret_filter,
**kwargs
)
self.tailing_time_delta = tailing_time_delta
def __repr__(self):
return self.__class__.__name__ + "(%s)" %str(self.tailing_time_delta)
```
#### File: qfengine/signals/vol.py
```python
import numpy as np
import pandas as pd
from qfengine.signals.signal import Signal
class VolatilitySignal(Signal):
"""
Indicator class to calculate lookback-period daily
volatility of returns, which is then annualised.
If the number of available returns is less than the
lookback parameter the volatility is calculated on
this subset.
Parameters
----------
start_dt : `pd.Timestamp`
The starting datetime (EDT) of the signal.
universe : `Universe`
The universe of assets to calculate the signals for.
lookbacks : `list[int]`
The number of lookback periods to store prices for.
"""
def __init__(self, universe, lookbacks, start_dt = None):
bumped_lookbacks = [lookback + 1 for lookback in lookbacks]
super().__init__(start_dt, universe, bumped_lookbacks)
@staticmethod
def _asset_lookback_key(asset, lookback):
"""
Create the buffer dictionary lookup key based
on asset name and lookback period.
Parameters
----------
asset : `str`
The asset symbol name.
lookback : `int`
The lookback period.
Returns
-------
`str`
The lookup key.
"""
return '%s_%s' % (asset, lookback + 1)
def _vol(self, asset, lookback):
"""
Calculate the volatility for the provided
lookback period based on the price buffers for a
particular asset.
Parameters
----------
asset : `str`
The asset symbol name.
lookback : `int`
The lookback period.
Returns
-------
`float`
The annualised volatility of returns.
"""
series = pd.Series(
self.buffers.prices[
VolatilitySignal._asset_lookback_key(
asset, lookback
)
]
)
returns = series.pct_change().dropna().to_numpy()
if len(returns) < 1:
return 0.0
else:
return np.std(returns)
def __call__(self, asset, lookback):
"""
Calculate the annualised volatility of
returns for the asset.
Parameters
----------
asset : `str`
The asset symbol name.
lookback : `int`
The lookback period.
Returns
-------
`float`
The annualised volatility of returns.
"""
return self._vol(asset, lookback)
class AnnualizedVolatilitySignal(Signal):
"""
Indicator class to calculate lookback-period daily
volatility of returns, which is then annualised.
If the number of available returns is less than the
lookback parameter the volatility is calculated on
this subset.
Parameters
----------
start_dt : `pd.Timestamp`
The starting datetime (EDT) of the signal.
universe : `Universe`
The universe of assets to calculate the signals for.
lookbacks : `list[int]`
The number of lookback periods to store prices for.
"""
def __init__(self, start_dt, universe, lookbacks):
bumped_lookbacks = [lookback + 1 for lookback in lookbacks]
super().__init__(start_dt, universe, bumped_lookbacks)
@staticmethod
def _asset_lookback_key(asset, lookback):
"""
Create the buffer dictionary lookup key based
on asset name and lookback period.
Parameters
----------
asset : `str`
The asset symbol name.
lookback : `int`
The lookback period.
Returns
-------
`str`
The lookup key.
"""
return '%s_%s' % (asset, lookback + 1)
def _annualised_vol(self, asset, lookback):
"""
Calculate the annualised volatility for the provided
lookback period based on the price buffers for a
particular asset.
Parameters
----------
asset : `str`
The asset symbol name.
lookback : `int`
The lookback period.
Returns
-------
`float`
The annualised volatility of returns.
"""
series = pd.Series(
self.buffers.prices[
VolatilitySignal._asset_lookback_key(
asset, lookback
)
]
)
returns = series.pct_change().dropna().to_numpy()
if len(returns) < 1:
return 0.0
else:
return np.std(returns) * np.sqrt(252)
def __call__(self, asset, lookback):
"""
Calculate the annualised volatility of
returns for the asset.
Parameters
----------
asset : `str`
The asset symbol name.
lookback : `int`
The lookback period.
Returns
-------
`float`
The annualised volatility of returns.
"""
return self._annualised_vol(asset, lookback)
```
#### File: qfengine/trading/trading_session.py
```python
from abc import ABCMeta, abstractmethod
class TradingSession(object):
"""
Interface to a live or backtested trading session.
"""
__metaclass__ = ABCMeta
@abstractmethod
def run(self):
raise NotImplementedError(
"Should implement run()"
)
``` |
{
"source": "jpquiroga/tensor2tensor",
"score": 2
} |
#### File: tensor2tensor/layers/reversible_layers_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensor2tensor.layers import reversible_layers as reversible
from tensor2tensor.utils import test_utils
import tensorflow as tf
from tensorflow_probability import edward2 as ed
tf.compat.v1.enable_eager_execution()
class ReversibleLayersTest(parameterized.TestCase, tf.test.TestCase):
@test_utils.run_in_graph_and_eager_modes()
def testActNorm(self):
np.random.seed(83243)
batch_size = 25
length = 15
channels = 4
inputs = 3. + 0.8 * np.random.randn(batch_size, length, channels)
inputs = tf.cast(inputs, tf.float32)
layer = reversible.ActNorm()
outputs = layer(inputs)
mean, variance = tf.nn.moments(outputs, axes=[0, 1])
self.evaluate(tf.global_variables_initializer())
mean_val, variance_val = self.evaluate([mean, variance])
self.assertAllClose(mean_val, np.zeros(channels), atol=1e-3)
self.assertAllClose(variance_val, np.ones(channels), atol=1e-3)
inputs = 3. + 0.8 * np.random.randn(batch_size, length, channels)
inputs = tf.cast(inputs, tf.float32)
outputs = layer(inputs)
mean, variance = tf.nn.moments(outputs, axes=[0, 1])
self.evaluate(tf.global_variables_initializer())
mean_val, variance_val = self.evaluate([mean, variance])
self.assertAllClose(mean_val, np.zeros(channels), atol=0.25)
self.assertAllClose(variance_val, np.ones(channels), atol=0.25)
@test_utils.run_in_graph_and_eager_modes()
def testMADELeftToRight(self):
np.random.seed(83243)
batch_size = 2
length = 3
channels = 1
units = 5
network = reversible.MADE(units, [4], activation=tf.nn.relu)
inputs = tf.zeros([batch_size, length, channels])
outputs = network(inputs)
num_weights = sum([np.prod(weight.shape) for weight in network.weights])
# Disable lint error for open-source. pylint: disable=g-generic-assert
self.assertEqual(len(network.weights), 4)
# pylint: enable=g-generic-assert
self.assertEqual(num_weights, (3*1*4 + 4) + (4*3*5 + 3*5))
self.evaluate(tf.global_variables_initializer())
outputs_val = self.evaluate(outputs)
self.assertAllEqual(outputs_val[:, 0, :], np.zeros((batch_size, units)))
self.assertEqual(outputs_val.shape, (batch_size, length, units))
@test_utils.run_in_graph_and_eager_modes()
def testMADERightToLeft(self):
np.random.seed(1328)
batch_size = 2
length = 3
channels = 5
units = 1
network = reversible.MADE(units, [4, 3],
input_order='right-to-left',
activation=tf.nn.relu,
use_bias=False)
inputs = tf.zeros([batch_size, length, channels])
outputs = network(inputs)
num_weights = sum([np.prod(weight.shape) for weight in network.weights])
# Disable lint error for open-source. pylint: disable=g-generic-assert
self.assertEqual(len(network.weights), 3)
# pylint: enable=g-generic-assert
self.assertEqual(num_weights, 3*5*4 + 4*3 + 3*3*1)
self.evaluate(tf.global_variables_initializer())
outputs_val = self.evaluate(outputs)
self.assertAllEqual(outputs_val[:, -1, :], np.zeros((batch_size, units)))
self.assertEqual(outputs_val.shape, (batch_size, length, units))
@test_utils.run_in_graph_and_eager_modes()
def testMADENoHidden(self):
np.random.seed(532)
batch_size = 2
length = 3
channels = 5
units = 4
network = reversible.MADE(units, [], input_order='left-to-right')
inputs = tf.zeros([batch_size, length, channels])
outputs = network(inputs)
num_weights = sum([np.prod(weight.shape) for weight in network.weights])
# Disable lint error for open-source. pylint: disable=g-generic-assert
self.assertEqual(len(network.weights), 2)
# pylint: enable=g-generic-assert
self.assertEqual(num_weights, 3*5*3*4 + 3*4)
self.evaluate(tf.global_variables_initializer())
outputs_val = self.evaluate(outputs)
self.assertAllEqual(outputs_val[:, 0, :], np.zeros((batch_size, units)))
self.assertEqual(outputs_val.shape, (batch_size, length, units))
@test_utils.run_in_graph_and_eager_modes()
def testTransformedRandomVariable(self):
class Exp(tf.keras.layers.Layer):
"""Exponential activation function for reversible networks."""
def __call__(self, inputs, *args, **kwargs):
if not isinstance(inputs, ed.RandomVariable):
return super(Exp, self).__call__(inputs, *args, **kwargs)
return reversible.TransformedRandomVariable(inputs, self)
def call(self, inputs):
return tf.exp(inputs)
def reverse(self, inputs):
return tf.log(inputs)
def log_det_jacobian(self, inputs):
return -tf.log(inputs)
x = ed.Normal(0., 1.)
y = Exp()(x)
y_sample = self.evaluate(y.distribution.sample())
y_log_prob = self.evaluate(y.distribution.log_prob(y_sample))
self.assertGreater(y_sample, 0.)
self.assertTrue(np.isfinite(y_log_prob))
if __name__ == '__main__':
tf.test.main()
``` |
{
"source": "jpra2/impress",
"score": 2
} |
#### File: preprocessor/meshHandle/meshComponents.py
```python
import numpy as np
from pymoab import types, rng
from ..geoUtil import geoTools as gtool
class GetItem(object):
def __init__(self, adj):
self.fun = adj
def __call__(self, item):
return self.fun(item)
def __getitem__(self, item):
return self.fun(item)
class MeshEntities(object):
def __init__(self, core, entity_type):
self.mb = core.mb
self.mtu = core.mtu
self.meshset = core.root_set
self.nodes = core.all_nodes
self.num = {"nodes": 0, "node": 0, "edges": 1, "edge": 1, "faces": 2, "face": 2, "volumes": 3, "volume": 3,
0: 0, 1: 1, 2: 2, 3: 3}
string = {0: "nodes", 1: "edges", 2: "faces", 3: "volumes"}
if core.level == 0:
self.id_name = "GLOBAL_ID"
self.father_id_name = "GLOBAL_ID"
self.id_name = "GLOBAL_ID"
elif core.level == 1:
self.father_id_name = core.father_core.id_name
self.id_name = "LOCAL_ID_L" + str(core.level) + "-" + str(core.coarse_num)
else:
self.father_id_name = core.father_core.id_name
self.id_name = self.father_id_name + str("L") + str(self.level) + "-" + str(self.coarse_num)
entity_num = self.num[entity_type]
if entity_num == 0:
self.elements_handle = core.all_nodes
self.internal_elements = core.internal_nodes
self.boundary_elements = core.boundary_nodes
self.vID = 0
elif entity_num == 1:
self.elements_handle = core.all_edges
self.internal_elements = core.internal_edges
self.boundary_elements = core.boundary_edges
self.vID = 1
elif entity_num == 2:
self.elements_handle = core.all_faces
self.internal_elements = core.internal_faces
self.boundary_elements = core.boundary_faces
self.vID = 2
elif entity_num == 3:
self.elements_handle = core.all_volumes
self.internal_elements = core.internal_volumes
self.boundary_elements = core.boundary_volumes
self.vID = 3
self.entity_type = string[entity_num]
self.tag_handle = core.handleDic[self.id_name]
self.global_handle = core.handleDic['GLOBAL_ID']
self.father_handle = core.handleDic[self.father_id_name]
if self.vID == 0:
self.adjacencies = GetItem(self._adjacencies_for_nodes)
self.coords = GetItem(self._coords)
else:
self.adjacencies = GetItem(self._adjacencies)
self.connectivities = GetItem(self._connectivities)
self.classify_element = GetItem(self._classify_element)
self.center = GetItem(self._center)
# self.global_id = GetItem(self._global_id)
# self.father_id = GetItem(self._father_id)
if (self.vID == 1) & (core.dimension == 2):
self.normal = GetItem(self._normal)
elif (self.vID == 2) & (core.dimension == 3):
self.normal = GetItem(self._normal)
# initialize specific flag dic in accordance with type of the object create
self.flag = {key: self.read(value[self.vID]) for key, value in core.flag_dic.items()
if value[self.vID].empty() is not True}
# print("Mesh Entity type {0} successfully initialized".format(entity_type))
def bridge_adjacencies(self, index, interface, target):
"""
Get the adjacencies of a set of entities (or a single entity) connected through an especific interface.
-- Example --
volumes_ids = M.volumes.all
volumes_adjacencies = M.volumes.bridge_adjacencies(M.volumes.all, 2, 3)
-- Parameters --
index : integer
Indexes of entity or entities to get adjacent entities from.
interface : integer
Dimension of the interface entities.
target : integer
Dimension of the target entities.
--Returns--
An array containing the indexes of adjacents entities
"""
# lacks support for indexing with multiple numbers
range_vec = self.create_range_vec(index)
all_bridge = [self.mtu.get_bridge_adjacencies(el_handle, self.num[interface], self.num[target]) for el_handle
in self.range_index(range_vec)]
inside_meshset = self.mb.get_entities_by_handle(self.meshset)
all_brige_in_meshset = [rng.intersect(el_handle, inside_meshset) for el_handle in all_bridge]
all_briges_in_meshset_id = np.array([self.read(el_handle) for el_handle in all_brige_in_meshset])
return all_briges_in_meshset_id
def _coords(self, index):
range_vec = self.create_range_vec(index)
element_handle = self.range_index(range_vec, True)
return np.reshape(self.mb.get_coords(element_handle),(-1,3))
# def _global_id(self, index):
# range_vec = self.create_range_vec(index)
# element_handle = self.range_index(range_vec)
# return self.mb.tag_get_data(self.global_handle, element_handle).ravel()
# def _father_id(self, index):
# range_vec = self.create_range_vec(index)
# element_handle = self.range_index(range_vec)
# return self.mb.tag_get_data(self.father_handle, element_handle).ravel()
def _adjacencies_for_nodes(self, index):
return self.create_range_vec(index)
def _adjacencies(self, index,flag_nodes=False):
range_vec = self.create_range_vec(index)
if not flag_nodes:
dim_tag = self.vID - 1
else:
dim_tag = 0
all_adj = [self.mb.get_adjacencies(el_handle, dim_tag) for el_handle in self.range_index(range_vec)]
adj_id = np.array([self.read(el_handle) for el_handle in all_adj])
return adj_id
def _center(self,index):
range_vec = self.create_range_vec(index)
centers = np.zeros(( np.shape(range_vec)[0],3 ))
if self.vID == 0:
centers = self._coords(range_vec)
elif self.vID == 1:
edges_adj = self.connectivities[range_vec]
centers = 0.5* (self._coords(edges_adj[:,0]) + self._coords(edges_adj[:,1]))
elif self.vID == 2:
classified_elements = self.classify_element(range_vec)
tri_face = (classified_elements == types.MBTRI)
quad_face = (classified_elements == types.MBQUAD)
poly_face = (classified_elements == types.MBPOLYGON)
tri_faces_adj = self.connectivities[range_vec[tri_face]]
if tri_face.sum() != 0:
centers[tri_face] = gtool.get_average([self._coords(tri_faces_adj[:,col]) for col in range(tri_faces_adj.shape[1])])
if quad_face.sum() != 0:
quad_faces_adj = self.connectivities[range_vec[quad_face]]
centers[quad_face] = gtool.get_average([self._coords(quad_faces_adj[:,col]) for col in range(quad_faces_adj.shape[1])])
if poly_face.sum() != 0:
poly_faces_adj = self.connectivities[range_vec[poly_face]]
centers[poly_face] = gtool.get_average([self._coords(poly_faces_adj[:,col]) for col in range(poly_faces_adj.shape[1])])
elif self.vID == 3:
classified_elements = self.classify_element(range_vec)
tetra_volume = (classified_elements == types.MBTET)
pyramid_volume = (classified_elements == types.MBPYRAMID)
prism_volume = (classified_elements == types.MBPRISM)
knife_volume = (classified_elements == types.MBKNIFE)
hex_volume = (classified_elements == types.MBHEX)
if tetra_volume.sum() != 0:
tetra_volumes_adj = self.connectivities[range_vec[tetra_volume]]
centers[tetra_volume] = gtool.get_average([self._coords(tetra_volumes_adj[:,col]) for col in range(tetra_volumes_adj.shape[1])])
if pyramid_volume.sum() != 0:
pyramid_volumes_adj = self.connectivities[range_vec[pyramid_volume]]
centers[pyramid_volume] = gtool.get_average([self._coords(pyramid_volumes_adj[:,col]) for col in range(pyramid_volumes_adj.shape[1])])
if prism_volume.sum() != 0:
prism_volumes_adj = self.connectivities[range_vec[prism_volume]]
centers[prism_volume] = gtool.get_average([self._coords(prism_volumes_adj[:,col]) for col in range(prism_volumes_adj.shape[1])])
if knife_volume.sum() != 0:
knife_volumes_adj = self.connectivities[range_vec[knife_volume]]
centers[knife_volume] = gtool.get_average([self._coords(knife_volumes_adj[:,col]) for col in range(knife_volumes_adj.shape[1])])
if hex_volume.sum() != 0:
hex_volumes_adj = self.connectivities[range_vec[hex_volume]]
centers[hex_volume] = gtool.get_average([self._coords(hex_volumes_adj[:,col]) for col in range(hex_volumes_adj.shape[1])])
return centers
def _normal(self,index):
range_vec = self.create_range_vec(index)
#normal_vec = np.zeros(( np.shape(range_vec)[0],3 ))
if self.vID == 1:
all_adj = self.connectivities[range_vec]
return gtool.normal_vec_2d(self._coords(all_adj[:,0]),self._coords(all_adj[:,1]))
#edges_adj = self.connectivities[range_vec]
#centers = 0.5* (self._coords(edges_adj[:,0]) + self._coords(edges_adj[:,1]))
elif self.vID == 2:
classified_elements = self.classify_element(range_vec)
all_adj = np.zeros(( np.shape(range_vec)[0],3 ))
tri_face = (classified_elements == types.MBTRI)
quad_face = (classified_elements == types.MBQUAD)
poly_face = (classified_elements == types.MBPOLYGON)
if tri_face.sum() != 0:
all_adj[tri_face] = self.connectivities[range_vec[tri_face]]
if quad_face.sum() != 0:
all_adj[quad_face] = self.connectivities[range_vec[quad_face]][:,0:3]
if poly_face.sum() != 0:
all_adj[poly_face] = self.connectivities[range_vec[poly_face]][:,0:3]
return gtool.normal_vec(self._coords(all_adj[:,0]),self._coords(all_adj[:,1]),self._coords(all_adj[:,2]))
def _connectivities(self,index):
connectivities_id = None
range_vec = self.create_range_vec(index)
all_connectivities = [self.mb.get_connectivity(el_handle) for el_handle in self.range_index(range_vec)]
connectivities_id = np.array([self.read(el_handle) for el_handle in all_connectivities])
return connectivities_id
def create_range_vec(self, index):
range_vec = None
if isinstance(index, int) or isinstance(index, np.integer):
range_vec = np.array([index]).astype("uint")
elif isinstance(index, np.ndarray):
if index.dtype == "bool":
range_vec = np.where(index)[0]
else:
range_vec = index
elif isinstance(index, slice):
start = index.start
stop = index.stop
step = index.step
if start is None:
start = 0
if stop is None:
stop = len(self)
if step is None:
step = 1
if start < 0:
start = len(self) + start + 1
if stop < 0:
stop = len(self) + stop + 1
range_vec = np.arange(start, stop, step).astype('uint')
elif isinstance(index, list):
range_vec = np.array(index)
return range_vec
def _classify_element(self, index):
range_vec = self.create_range_vec(index)
range = self.range_index(range_vec)
type_list = np.array([self.mb.type_from_handle(el) for el in range])
return type_list
def range_index(self, vec_index, flag_nodes=False):
if not flag_nodes:
range_handle = self.elements_handle
else:
range_handle = self.nodes
if vec_index.dtype == "bool":
vec = np.where(vec_index)[0]
else:
vec = vec_index.astype("uint")
handles = np.asarray(range_handle)[vec.astype("uint")].astype("uint")
return handles
# return rng.Range(handles)
def __str__(self):
string = "{0} object \n Total of {1} {0} \n {2} boundary {0} \n {3} internal {0}".format(self.entity_type,
len(self.elements_handle), len(self.boundary_elements), len(self.internal_elements))
return string
def __len__(self):
return len(self.elements_handle)
def __call__(self):
return self.all
def read(self, handle):
return self.mb.tag_get_data(self.tag_handle, handle).ravel()
@property
def all_flagged_elements(self):
return np.array( list(self.flag.values())).astype(int)
@property
def all_flags(self):
return np.array(list(self.flag.keys())).astype(int)
@property
def all(self):
return self.read(self.elements_handle)
@property
def boundary(self):
return self.read(self.boundary_elements)
@property
def internal(self):
return self.read(self.internal_elements)
class MoabVariable(object):
def __init__(self, core, name_tag, var_type="volumes", data_size=1, data_format="float", data_density="sparse",
entity_index=None):
# pdb.set_trace()
self.mb = core.mb
self.var_type = var_type
self.data_format = data_format
self.data_size = data_size
self.data_density = data_density
self.name_tag = name_tag
self.custom = False
if var_type == "nodes":
self.elements_handle = core.all_nodes
elif var_type == "edges":
self.elements_handle = core.all_edges
elif var_type == "faces":
self.elements_handle = core.all_faces
elif var_type == "volumes":
self.elements_handle = core.all_volumes
if entity_index is not None:
self.elements_handle = self.range_index(entity_index)
self.custom = True
if data_density == "dense":
data_density = types.MB_TAG_DENSE
elif data_density == "sparse":
data_density = types.MB_TAG_SPARSE
elif data_density == "bit":
data_density = types.MB_TAG_BIT
else:
print("Please define a valid tag type")
if data_format == 'float':
data_format = types.MB_TYPE_DOUBLE
elif data_format == "int":
data_format = types.MB_TYPE_INTEGER
elif data_format == "bool":
data_format = types.MB_TYPE_BIT
self.tag_handle = self.mb.tag_get_handle(name_tag, data_size, data_format, data_density, True)
print("Component class {0} successfully intialized".format(self.name_tag))
def __call__(self):
return self.mb.tag_get_data(self.tag_handle, self.elements_handle)
def __setitem__(self, index, data):
range_vec = self.create_range_vec(index)
if isinstance(data, int) or isinstance(data, float) or isinstance(data, bool):
data = data * np.ones((range_vec.shape[0], self.data_size)).astype(self.data_format)
elif (isinstance(data, np.ndarray)) and (len(data) == self.data_size):
data = data * np.tile(data, (range_vec.shape[0], 1)).astype(self.data_format)
elif isinstance(data, list) & (len(data) == self.data_size):
data = np.array(data)
data = data * np.tile(data, (range_vec.shape[0], 1)).astype(self.data_format)
self.set_data(data, index_vec=range_vec)
def __getitem__(self, index):
range_vec = self.create_range_vec(index)
if isinstance(index, int):
return self.read_data(range_vec)[0][:]
else:
return self.read_data(range_vec)
def __str__(self):
string = "{0} variable: {1} based - Type: {2} - Length: {3} - Data Type: {4}"\
.format(self.name_tag.capitalize(), self.var_type.capitalize(), self.data_format.capitalize(),
self.data_size, self.data_density.capitalize())
if self.custom:
string = string + " - Custom variable"
return string
def __len__(self):
return len(self.elements_handle)
def create_range_vec(self, index):
range_vec = None
if isinstance(index, int):
range_vec = np.array([index]).astype("uint")
elif isinstance(index, np.ndarray):
if index.dtype == "bool":
range_vec = np.where(index)[0]
else:
range_vec = index
elif isinstance(index, slice):
start = index.start
stop = index.stop
step = index.step
if start is None:
start = 0
if stop is None:
stop = len(self)
if step is None:
step = 1
if start < 0:
start = len(self) + start + 1
if stop < 0:
stop = len(self) + stop + 1
range_vec = np.arange(start, stop, step).astype('uint')
elif isinstance(index, list):
range_vec = np.array(index)
return range_vec
def range_index(self, vec_index):
range_handle = self.elements_handle
if vec_index.dtype == "bool":
vec = np.where(vec_index)[0]
else:
vec = vec_index.astype("uint")
handles = np.asarray(range_handle)[vec.astype("uint")].astype("uint")
return rng.Range(handles)
def set_data(self, data, index_vec=np.array([])):
if index_vec.size > 0:
range_el = self.range_index(index_vec)
else:
range_el = self.elements_handle
self.mb.tag_set_data(self.tag_handle, range_el, data)
def read_data(self, index_vec=np.array([])):
if index_vec.size > 0:
range_el = self.range_index(index_vec)
else:
range_el = self.elements_handle
return self.mb.tag_get_data(self.tag_handle, range_el)
``` |
{
"source": "jpradass/AmazonSpider",
"score": 3
} |
#### File: AmazonSpider/helper/logger_helper.py
```python
import logging
from logging import Logger
class Log:
logger: Logger = None
def __new__(cls) -> Logger:
if Log.logger is None:
Log.logger = logging.getLogger("spider")
Log.logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter('[%(asctime)s] %(levelname)s - %(message)s'))
Log.logger.addHandler(ch)
return Log.logger
``` |
{
"source": "jpradass/CoinbaseBot",
"score": 3
} |
#### File: CoinbaseBot/coinbase/api.py
```python
from typing import Dict
from coinbase.auth import auth
from coinbase.constants import COINBASE_API_URL
from coinbase.exceptions import APIException
import requests
class APIRequest:
def __init__(self) -> None:
pass
@classmethod
def get_request(cls, resource: str) -> Dict:
res = requests.get(COINBASE_API_URL + resource, auth=auth)
if res.status_code != requests.codes.OK:
raise APIException("There was some kind of problem with the request")
return res.json()
@classmethod
def post_request(cls, resource: str, params: Dict):
res = requests.post(COINBASE_API_URL + resource, auth=auth, json=params)
if res.status_code != requests.codes.CREATED:
raise APIException("There was some kind of problem with the request")
return res.json()
```
#### File: CoinbaseBot/coinbase/user.py
```python
from coinbase.api import APIRequest
class User:
"""
Generic user information. By default, only public information is shared without any scopes.
More detailed information or email can be requested with additional scopes.
"""
resource = 'user'
def __init__(self, *args: object, **kwargs: object) -> None:
self.id = kwargs.get('id')
self.name = kwargs.get('name')
self.username = kwargs.get('username')
self.email = kwargs.get('email')
@classmethod
def me(cls) -> "User":
res = APIRequest.get_request(cls.resource)
return cls(**res['data'])
```
#### File: CoinbaseBot/coingecko/api.py
```python
import requests
from typing import Dict
from coingecko.constants import COINGECKO_API_URL
from coingecko.exceptions import APIException
class APIRequest:
def __init__(self) -> None:
pass
@classmethod
def get_request(self, resource) -> Dict:
res = requests.get(COINGECKO_API_URL + resource)
if res.status_code != requests.codes.OK:
raise APIException("There was some kind of problem with the request")
return res.json()
``` |
{
"source": "jpradass/Raspberry-Utils",
"score": 3
} |
#### File: Raspberry-Utils/Cpu-FanWatcher/cpufan_watcher.py
```python
from gpiozero import CPUTemperature
import RPi.GPIO as GPIO
import time
max_threashold = float(55.0)
min_threashold = float(35.0)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(8, GPIO.OUT)
state = not GPIO.input(8)
cpu = CPUTemperature()
print("CPU temperature now:", cpu.temperature)
def change_state(switch, state):
print("+ Switching", switch, "fan")
print("+ CPU temperature now:", cpu.temperature)
GPIO.output(8, state)
return not state
try:
while 1:
if cpu.temperature >= max_threashold and not state:
state = change_state("on", state)
elif cpu.temperature < min_threashold and state:
state = change_state("off", state)
else:
time.sleep(5)
except KeyboardInterrupt:
GPIO.cleanup()
except Exception as e:
GPIO.cleanup()
print(e)
```
#### File: Raspberry-Utils/Monitoring/dht22_monitor.py
```python
import time
import requests
INFLUX_URL = 'http://localhost:8086/write?db=DHT22'
def sendDataToGrafana(humidity, temp, pressure):
requests.post(INFLUX_URL, data='temperature value=' + str(temp))
requests.post(INFLUX_URL, data='humidity value=' + str(humidity))
requests.post(INFLUX_URL, data='pressure value=' + str(pressure))
``` |
{
"source": "JPrakke/in-a-nutshell",
"score": 4
} |
#### File: in-a-nutshell/nutshell/nutshell.py
```python
import pyperclip as pc
from nutshell import text
print(text.intro)
def meme():
counter = 1
user_input = input("What would you like put in a nutshell? ")
input_split = list(user_input.lower())
split_string = []
for i in input_split:
if counter % 2 != 0:
split_string.append(i)
elif counter % 2 == 0:
split_string.append(i.upper())
counter += 1
output ="".join(split_string)
print(f"Someone: \"{user_input}\"")
print(f"Me: {output}")
copy_query = input(text.copy_it).lower()
while copy_query != "y" or copy_query != "n":
if copy_query == "y":
print(f"Copied \"{output}\".")
pc.copy(output)
copy_query = ""
break
elif copy_query == "n":
break
else:
print(text.error)
copy_query = input(text.copy_it).lower()
def restart_meme():
restart = input(text.restart_it).lower()
while restart != "y" or restart != "n":
if restart == "y":
restart = ""
meme()
elif restart == "n":
print(text.outro)
break
else:
print(text.error)
restart = input(text.restart_it).lower()
def run():
meme()
restart_meme()
if __name__ == '__main__':
run()
``` |
{
"source": "jpramos123/kerlescan",
"score": 3
} |
#### File: kerlescan/tests/test_service_interface.py
```python
import unittest
import responses
from mock import MagicMock as mm
from kerlescan import service_interface
from kerlescan.exceptions import IllegalHttpMethodError
class FetchUrlTests(unittest.TestCase):
@responses.activate
def test_http_call(self):
url = "http://example.com/api/123"
auth_header = {}
logger = mm()
time_metric = mm()
exception_metric = mm()
method = "get"
service_interface._validate_service_response = mm()
responses.add(
**{
"method": responses.GET,
"url": url,
"body": '{"result": "called the http"}',
"status": 200,
"content_type": "application/json",
"adding_headers": {"X-Foo": "Bar"},
}
)
result = service_interface.fetch_url(
url, auth_header, logger, time_metric, exception_metric, method
)
self.assertEqual(result, {"result": "called the http"})
def test_invalid_http_method(self):
url = "http://example.com/api/123"
auth_header = {}
logger = mm()
time_metric = mm()
exception_metric = mm()
method = "invalid_method"
service_interface._validate_service_response = mm()
with self.assertRaises(IllegalHttpMethodError):
service_interface.fetch_url(
url, auth_header, logger, time_metric, exception_metric, method
)
``` |
{
"source": "jpramos123/notifications-backend",
"score": 3
} |
#### File: backend/helpers/helpers.py
```python
import uuid
import requests
def set_path_prefix(base_path):
"""Set up the paths to use"""
if base_path is None:
raise RuntimeError("No base path passed")
global __APPLICATION_PREFIX
global __BUNDLES_PREFIX
global event_types_prefix
global integrations_prefix
global notifications_prefix
__APPLICATION_PREFIX = base_path + "/internal/applications"
__BUNDLES_PREFIX = base_path + "/internal/bundles"
event_types_prefix = base_path + "/internal/eventTypes"
integrations_prefix = base_path + "/api/integrations/v1.0"
notifications_prefix = base_path + "/api/notifications/v1.0"
def find_application(bundle_id, app_name):
"""Find an application by name and return its UUID or return None
:param bundle_id Id of the bundle under which the app resides
:param app_name: Name of the application
"""
r = requests.get(__BUNDLES_PREFIX + "/" + bundle_id + "/applications")
if r.status_code != 200:
return None
j = r.json()
for app in j:
if app["name"] == app_name:
return app["id"]
return None
def add_application(bundle_id, name, display_name):
"""Adds an application if it does not yet exist
:param bundle_id: id of the bundle we add the application to
:param name: Name of the application, [a-z0-9-]+
:param display_name: Display name of the application
"""
# First try to find it.
ret = find_application(bundle_id, name)
if ret is not None:
return ret
# The app does not yet exist, so try to create
app_json = {"name": name,
"display_name": display_name,
"bundle_id": bundle_id}
r = requests.post(__APPLICATION_PREFIX, json=app_json)
print(r.status_code)
response_json = r.json()
print(response_json)
if r.status_code / 10 != 20:
exit(1)
aid = response_json['id']
return aid
def delete_application(app_id):
"""Deletes an application by its id"""
r = requests.delete(__APPLICATION_PREFIX + "/" + app_id)
print(r.status_code)
def delete_bundle(bundle_id):
"""Deletes a bundle by its id"""
r = requests.delete(__BUNDLES_PREFIX + "/" + bundle_id)
print(r.status_code)
def add_event_type(application_id, name, display_name):
"""Add an EventType by name
:param application_id: UUID of the application
:param name: Name of the type
:param display_name: Display name of the type
"""
# First try to find it
ret = find_event_type(application_id, name)
if ret is not None:
return ret
# It does not exist, so create it
et_json = {"name": name, "display_name": display_name, "application_id": application_id}
r = requests.post(event_types_prefix, json=et_json)
response_json = r.json()
print(response_json)
if r.status_code / 10 != 20:
exit(2)
return response_json['id']
def add_bundle(name, display_name):
"""Adds a bundle if it does not yet exist
:param name: Name of the bundle, [a-z0-9-]+
:param display_name: Display name of the application
"""
# First try to find it.
ret = find_bundle(name)
if ret is not None:
return ret
# It does not yet exist, so try to create
bundle_json = {"name": name,
"display_name": display_name}
r = requests.post(__BUNDLES_PREFIX, json=bundle_json)
print(r.status_code)
response_json = r.json()
print(response_json)
if r.status_code / 10 != 20:
exit(1)
aid = response_json['id']
return aid
def find_bundle(name):
"""Find a bundle by name and return its UUID or return None
:param name: Name of the bundle
"""
result = requests.get(__BUNDLES_PREFIX)
if result.status_code != 200:
return None
result_json = result.json()
for bundle in result_json:
if bundle["name"] == name:
return bundle["id"]
return None
def find_event_type(application_id, name):
"""Find an event type by name for an application.
Returns the full type or None if not found
"""
r = requests.get(__APPLICATION_PREFIX + "/" + application_id + "/eventTypes")
if r.status_code != 200:
return None
j = r.json()
for et in j:
if et["name"] == name:
return et
return None
def create_endpoint(name, xrhid, properties, ep_type="webhook"):
"""Creates an endpoint"""
ep_uuid = uuid.uuid4()
ep_id = str(ep_uuid)
properties["endpointId"] = ep_id
ep_json = {"name": name,
"description": name,
"enabled": True,
"properties": properties,
"type": ep_type}
h = {"x-rh-identity": xrhid}
r = requests.post(integrations_prefix + "/endpoints", json=ep_json, headers=h)
print(r.status_code)
if r.status_code / 100 != 2:
print(r.reason)
exit(1)
response_json = r.json()
epid = response_json["id"]
print(epid)
return epid
def find_behavior_group(display_name, bundle_id, x_rhid):
"""Find a behavior group by its display name"""
headers = {"x-rh-identity": x_rhid}
r = requests.get(notifications_prefix + "/notifications/bundles/" + bundle_id + "/behaviorGroups",
headers=headers)
if r.status_code != 200:
return None
j = r.json()
for bg in j:
if bg["display_name"] == display_name:
return bg["id"]
return None
def create_behavior_group(name, bundle_id, x_rhid):
"""Creates a behavior group"""
bg_id = find_behavior_group(name, bundle_id, x_rhid)
if bg_id is not None:
return bg_id
bg_json = {"display_name": name,
"bundle_id": bundle_id}
headers = {"x-rh-identity": x_rhid}
r = requests.post(notifications_prefix + "/notifications/behaviorGroups",
json=bg_json,
headers=headers)
print(r.status_code)
if r.status_code / 100 != 2:
print(r.reason)
exit(1)
response_json = r.json()
bg_id = response_json["id"]
print(bg_id)
return bg_id
def link_bg_endpoint(bg_id, ep_id, x_rhid):
"""Link the behavior group to the endpoint"""
headers = {"x-rh-identity": x_rhid}
ep_list = [ep_id]
r = requests.put(notifications_prefix + "/notifications/behaviorGroups/" + bg_id + "/actions",
json=ep_list,
headers=headers)
def add_endpoint_to_event_type(event_type_id, endpoint_id, x_rhid):
headers = {"x-rh-identity": x_rhid}
r = requests.put(notifications_prefix + "/notifications/eventTypes/" + event_type_id + "/" + endpoint_id,
headers=headers)
print(r.status_code)
def shorten_path(path):
"""Shorten an incoming domain name like path to
only have the first char of each segment except the last
e.g. foo.bar.baz -> f.b.baz
"""
out = ""
segments = path.split(".")
l = len(segments)
i = 0
while i < l:
element = segments[i]
if i < l-1:
out = out + element[0]
out = out + "."
else:
out = out + element
i += 1
return out
def print_history_for_event_type(bundle_id, app_id, event_type_name, x_rhid):
headers = {"x-rh-identity": x_rhid}
params={"bundleIds": bundle_id,
"appIds": app_id,
"includeDetails": True,
"eventTypeDisplayName": event_type_name}
r = requests.get(notifications_prefix + "/notifications/events/",
params=params,
headers=headers)
if r.status_code != 200:
print (r.reason)
exit(1)
response_json = r.json()
data = response_json['data']
for entry in data:
print("Entry created at " + entry["created"] )
for action in entry["actions"]:
print(f" Type {action['endpoint_type']}, success= {action['invocation_result']}")
if action['endpoint_type'] == 'camel':
details = action['details']
if details is None:
print(" No details provided")
else:
print(" sub_type " + shorten_path(details['type']))
print(" target url " + details['target'])
print(" outcome " + details['outcome'])
def add_event_type_to_behavior_group(et_id, bg_id, x_rh_id):
bg_set = [bg_id]
headers = {"x-rh-identity": x_rh_id}
r = requests.put(notifications_prefix + "/notifications/eventTypes/" + et_id + "/behaviorGroups",
json=bg_set,
headers=headers)
print(r.status_code)
return None
``` |
{
"source": "Jpranay1/Logistic",
"score": 2
} |
#### File: Jpranay1/Logistic/LogisticRegression.py
```python
import numpy as np
import random
def gen(theta, n, m):
beta = np.random.randint(0,9, (m+1,1))
X = np.random.uniform(-1,1, (n, m+1))
X[:,0] = 1
z = np.dot(X, beta)
y = np.ones(len(z))
y = 1/(1 + np.exp(-z))
y = np.where(y > 0.5, 1, 0)
noise = np.random.binomial(1, theta, (n,1))
y = y + noise
y = np.where(y > 1, 0, y)
b = int((2/3)*n)
Xtrain = X[0:b,:]
Ytrain = y[0:b]
Xtest = X[b:,:]
Ytest = y[b:]
return beta, X, y, Xtrain, Ytrain, Xtest, Ytest
def costfun(x,y,beta):
m = len(x[0,:])
n = len(x[:,0])
z = np.dot(x, beta)
h = 1/(1 + np.exp(-z))
h = np.transpose(h)
return (1/n)*(-np.dot(np.log(h), y) - np.dot(np.log(1-h), (1-y)))
def grad_desc(x,y,beta,alpha,k,tou):
m = len(x[0,:])-1
n = len(x[:,0])
precost = 0
for i in range(k+1):
if(costfun(x,y,beta) - precost > tou):
z = np.dot(x, beta)
h = 1/(1 + np.exp(-z))
r = h - y
p = np.transpose(r)
precost = costfun(x,y,beta)
beta = beta - (alpha/n)*np.transpose(np.dot(p,x))
else:
break
J = costfun(x,y,beta)
return beta, J
def log_reg(X,y,k,alpha):
m = len(X[0,:])-1
n = len(X[:,0])
beta = np.random.randint(0,9, (m+1,1))
beta, J = grad_desc(X,y,beta,alpha,k,tou)
return beta, J
beta, X, y, Xa1, Ya1, Xa2, Ya2 = gen(0,500,10)
print('The value of beta :','\n', beta,'\n')
print('The array X : ','\n', X,'\n')
print('Binary array Y : ','\n', y,'\n')
k = 100
alpha = 0.5
tou = 0.001
betaN, J = log_reg(Xa1,Ya1,k,alpha)
print('The beta after gradient Descent : ', betaN,'\n')
print('The fnial cost value after GD : ', J,'\n')
def predict(beta,X):
z = np.dot(X, beta)
y = np.ones(len(z))
y = 1/(1 + np.exp(-z))
y = np.where(y > 0.5, 1, 0)
return y
#yp1 = predict(betaN,Xa1)
#print(yp,'\n')
Yp2 = predict(betaN,Xa2)
def matrix(y1, y2):
n = len(y1)
mat = np.where(y1 == y2, 1,0)
TP = np.sum(np.where((y1==1) & (y2==1), 1,0))
FN = np.sum(np.where((y1==1) & (y2==0), 1,0))
TN = np.sum(np.where((y1==0) & (y2==0), 1,0))
FP = np.sum(np.where((y1==0) & (y2==1), 1,0))
acc = np.sum(mat)/n
TPR = TP/(TP + FN)
FPR = FP/(FP + TN)
TNR = TN/(TN + FP)
FNR = FN/(FN + TN)
print('accuracy : ',acc,'\n','TPR : ', TPR, '\n','FPR : ', FPR, '\n','TNR : ', TNR, '\n','FNR : ', FNR)
matrix(Ya2,Yp2)
################################################################################
#print('\n','\n','This is by using L1 regularisation : ','\n')
def costfuncL1(x,y,beta,lam):
m = len(x[0,:])
n = len(x[:,0])
z = np.dot(x, beta)
h = 1/(1 + np.exp(-z))
h = np.transpose(h)
l1 = np.sum.abs(beta[1:])
return (1/n)*(-np.dot(np.log(h), y) - np.dot(np.log(1-h), (1-y))) + (lam/n)*l1
def grad_descL1(x,y,beta,alpha,k,tou,lam):
m = len(x[0,:])-1
n = len(x[:,0])
precost = 0
for i in range(k+1):
if(costfuncL1(x,y,beta,lam) - precost > tou):
z = np.dot(x, beta)
h = 1/(1 + np.exp(-z))
r = h - y
p = np.transpose(r)
precost = costfuncL1(x,y,beta,lam)
l1 = np.sum(np.abs(beta[1:])/beta[1:])
beta = beta - (alpha/n)*np.transpose(np.dot(p,x)) + (lam/n)*l1
beta[0] = beta[0] - (lam/n)*l1
else:
break
J = costfuncL1(x,y,beta,lam)
return beta, J
def log_regL1(X,y,k,alpha,lam):
m = len(X[0,:])-1
n = len(X[:,0])
beta = np.random.randint(0,9, (m+1,1))
beta, J = grad_descL1(X,y,beta,alpha,k,tou,lam)
return beta, J
###############################################################################
def costfuncL2(x,y,beta,lam):
m = len(x[0,:])
n = len(x[:,0])
z = np.dot(x, beta)
h = 1/(1 + np.exp(-z))
h = np.transpose(h)
l1 = np.dot(beta[1:],beta[1:])
return (1/n)*(-np.dot(np.log(h), y) - np.dot(np.log(1-h), (1-y))) + (lam/n)*l1
def grad_descL2(x,y,beta,alpha,k,tou,lam):
m = len(x[0,:])-1
n = len(x[:,0])
precost = 0
for i in range(k+1):
if(costfuncL2(x,y,beta,lam) - precost > tou):
z = np.dot(x, beta)
h = 1/(1 + np.exp(-z))
r = h - y
p = np.transpose(r)
precost = costfuncL2(x,y,beta,lam)
l1 = np.sum(beta[1:])
beta = beta - (alpha/n)*np.transpose(np.dot(p,x)) + (lam/n)*l1
beta[0] = beta[0] - (lam/n)*l1
else:
break
J = costfuncL2(x,y,beta,lam)
return beta, J
def log_regL2(X,y,k,alpha,lam):
m = len(X[0,:])-1
n = len(X[:,0])
beta = np.random.randint(0,9, (m+1,1))
beta, J = grad_descL2(X,y,beta,alpha,k,tou,lam)
return beta, J
``` |
{
"source": "jprao/reliable_pos",
"score": 2
} |
#### File: reliable_pos/crm/models.py
```python
from django.db import models
from master_data.models import Country
class LoyaltyCard(models.Model):
"""Loyalty Cards"""
number = models.CharField(max_length=20, unique=True,
null=False, blank=False)
active = models.BooleanField(default=True)
issue_date = models.DateField()
valid_to = models.DateField()
deleted = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
db_table = "loyalty_cards"
verbose_name = "LoyaltyCard"
verbose_name_plural = "LoyaltyCards"
def __str__(self):
return self.number
class Customer(models.Model):
""" Customers """
firstname = models.CharField(max_length=80)
lastname = models.CharField(max_length=80, null=False, blank=False)
email = models.EmailField()
telephone = models.CharField(max_length=20)
fax = models.CharField(max_length=20)
mobile = models.CharField(max_length=20)
address1 = models.CharField(max_length=80)
address2 = models.CharField(max_length=80)
postal_code = models.CharField(max_length=12, null=False, blank=False)
city = models.CharField(max_length=50)
region = models.CharField(max_length=50)
coutry = models.ForeignKey(Country, on_delete=models.PROTECT,
related_name='country')
notes = models.TextField()
maxdebt = models.FloatField(default=0.0)
current_debt = models.FloatField(default=0.0)
image = models.BinaryField()
loyalty_card = models.ForeignKey(LoyaltyCard, on_delete=models.PROTECT,
related_name='loyalty_card')
class Meta:
db_table = 'customers'
verbose_name = "Customer"
verbose_name_plural = "Customers"
def __str__(self):
return self.name
``` |
{
"source": "jprasanthkumar28/Promantia_Local",
"score": 2
} |
#### File: doctype/employee_deduction/employee_deduction.py
```python
import frappe
from frappe.model.document import Document
from datetime import datetime
import datetime
from datetime import date
from dateutil.relativedelta import relativedelta
class EmployeeDeduction(Document):
def validate(self):
bal = 0
total_bal = 0
today = date.today()
today = str(today).split('-')
month = datetime.date(1900, int(today[1]), 1).strftime('%b')
month = month + "-" + today[0]
for row in self.deduction_calculation:
if row.total == 0:
frappe.db.sql("""delete from `tabDeduction Calculation` where name = %s""", row.name)
# self.reload()
bal = row.total - row.actual_paid
row.balance = bal
if row.month == month:
self.month_total_balance = bal
for ind in range(1,13):
rec_month = datetime.date(1900, ind, 1).strftime('%b')
rec_month = rec_month + "-" + today[0]
# print(int(today[1]))
if int(today[1]) >= ind and row.month == rec_month:
print(rec_month,"=", row.month, "\n\n\n")
bal = row.total - row.actual_paid
total_bal += bal
self.grand_total = total_bal
@frappe.whitelist()
def add_data(doc, deduction_type=None, s_date=None, amount=None):
s_date = s_date.split('-')
month = datetime.date(1900, int(s_date[1]), 1).strftime('%b')
month = month + "-" + s_date[0]
onetime_amt = int(amount)
recurring_amt = 0
total = onetime_amt + recurring_amt
update_doc = {}
update_doc.update({'month': month, 'recurring': recurring_amt, 'onetime': onetime_amt, 'total': total})
return update_doc
@frappe.whitelist()
def update_recurring(doc, deduction_type=None, s_date=None, e_date=None, amount=None):
s_date = s_date.split('-')
e_date = e_date.split('-')
onetime_amt = 0
recurring_amt = int(amount)/(int(e_date[1])+1 - int(s_date[1]))
total = onetime_amt + recurring_amt
update_doc = []
for i in range(int(s_date[1]), int(e_date[1])+1, 1):
rec_month = datetime.date(1900, i, 1).strftime('%b')
rec_month = rec_month + "-" + s_date[0]
print(rec_month, "\n")
update_doc.append({'month': rec_month, 'recurring': recurring_amt, 'onetime': onetime_amt, 'total': total})
# print(update_doc)
return update_doc
@frappe.whitelist()
def get_month(doc, month, bal):
name = frappe.db.get_value('Deduction Calculation', {'parent': doc, 'month': month}, ['name'])
if name == None:
return 0
else:
return name
@frappe.whitelist()
def get_end_date(e_date):
print(e_date)
# e_date = datetime.date(2022, 2, 16) + relativedelta(day=31)
# formatted_date = datetime.strptime(e_date, "%d-%m-%Y")
date = datetime.datetime.strptime(e_date, "%d-%m-%Y")
# # frappe.db.set_value('Deduction Details', {'parent': doc} , 'end_date', formatted_date)
print(type(date) ,"\n\n\n")
return date
```
#### File: doctype/employee_deduction/pyyy.py
```python
import frappe
from frappe.model.document import Document
import datetime
class EmployeeDeduction(Document):
pass
@frappe.whitelist()
def add_data(doc, deduction_type=None, s_date=None, amount=None):
s_date = s_date.split('-')
month = datetime.date(1900, int(s_date[1]), 1).strftime('%b')
onetime_amt = int(amount)
recurring_amt = 0
total = onetime_amt + recurring_amt
update_doc = {}
update_doc.update({'month': month, 'recurring': recurring_amt, 'onetime': onetime_amt, 'total': total})
return update_doc
@frappe.whitelist()
def update_recurring(doc, deduction_type=None, s_date=None, e_date=None, amount=None):
s_date = s_date.split('-')
e_date = e_date.split('-')
onetime_amt = 0
recurring_amt = int(amount)/(int(e_date[1])+1 - int(s_date[1]))
total = onetime_amt + recurring_amt
update_doc = []
for i in range(int(s_date[1]), int(e_date[1])+1, 1):
rec_month = datetime.date(1900, i, 1).strftime('%b')
print(rec_month, "\n")
update_doc.append({'month': rec_month, 'recurring': recurring_amt, 'onetime': onetime_amt, 'total': total})
print(update_doc)
return update_doc
@frappe.whitelist()
def get_month(doc, month, bal):
name = frappe.db.get_value('Deduction Calculation', {'parent': doc, 'month': month}, ['name'])
total = frappe.db.get_list('Deduction Calculation', {'parent': doc }, ['total'], pluck='total')
print(doc, month ,"\n\n\n\n")
print(sum(total), bal,"\n\nTotal\n")
# frappe.db.set_value('Employee Deduction', doc , 'grand_total', sum(total)+int(bal))
if name == None:
return 0
else:
return name
```
#### File: doctype/quotation/quotation.py
```python
from __future__ import unicode_literals
import frappe
def on_submit(doc,method):
print(doc.customer_name,"\n\n\n\n")
# frappe.throw("Testing")
for val in doc.items:
print(val.item_code, doc.valid_till, val.qty, val.rate, val.amount, "\n\n\n\n")
si_doc=frappe.get_doc(dict(doctype = 'Sales Order',
customer=doc.customer_name,
delivery_date=doc.valid_till,
total_qty=doc.total_qty,
total_net_weight=doc.total_net_weight,
total=doc.total,
grand_total=doc.grand_total,
rounding_adjustment=doc.rounding_adjustment,
rounded_total=doc.rounded_total,
base_grand_total=doc.base_grand_total,
base_rounded_total=doc.base_rounded_total
)).insert(ignore_mandatory=True)
print(si_doc,"Debugger2<------\n\n\n")
for val in doc.items:
si_doc.append('items', {
'item_code':val.item_code,
'delivery_date':doc.valid_till,
'qty':val.qty,
'rate':val.rate,
'amount':val.amount
})
si_doc.save()
```
#### File: doctype/supplier/supplier.py
```python
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
import json
import erpnext
from frappe import _
from frappe.desk.treeview import get_all_nodes,get_children
class Supplier(Document):
pass
@frappe.whitelist()
def get_root_supplier(supplier):
root_supplier = supplier
parent = frappe.db.get_value(
'Supplier', {'name': supplier}, 'parent_Supplier')
if parent:
root_supplier = get_root_supplier(parent)
return root_supplier
@frappe.whitelist()
def get_descendents(doctype, parent=None, **filters):
if not parent or parent == "Supplier":
return get_children(doctype)
if parent:
supplier_doc = frappe.get_cached_doc(
"Supplier", parent)
frappe.has_permission("Supplier", doc=supplier_doc, throw=True)
child_suppliers = frappe.get_all('Supplier',
fields=['parent_supplier',
'name as value', 'is_group'],
filters=[
['parent_supplier', '=', parent]],
order_by='idx')
for supplier in child_suppliers:
supplier.expanded = 0 if supplier.is_group == 0 else 1
supplier.expandable = 0 if supplier.is_group == 0 else 1
return child_suppliers
``` |
{
"source": "jprasanthkumar28/Testing",
"score": 2
} |
#### File: report/wip_report/wip_report.py
```python
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns = get_columns()
conditions = get_conditions(filters)
data = get_data(filters,conditions)
return columns, data
def get_columns():
columns = [
{
"fieldname": "blanket_order",
"label": _("Blanket Order"),
"fieldtype": "Link",
"options": "Blanket Order",
"width": 200
},
{
"fieldname": "style",
"label": _("Style"),
"fieldtype": "Link",
"options": "Item",
"width": 200
},
{
"fieldname": "work_order",
"label": _("Work Order / PO"),
"fieldtype": "Dynamic Link",
"options" : "doctype",
"width": 200
},
{
"fieldname": "process",
"label": _("Process"),
"fieldtype": "data",
"width": 200
},
{
"fieldname": "workstation",
"label": _("Workstation"),
"fieldtype": "data",
"width": 200
},
{
"fieldname": "issue_qty",
"label": _("Issue Qty"),
"fieldtype": "Float",
"width": 200
},
{
"fieldname": "receive_qty",
"label": _("Receive Qty"),
"fieldtype": "Float",
"width": 200
},
{
"fieldname": "balance_qty",
"label": _("Balance Qty"),
"fieldtype": "Float",
"width": 200
},
{
"fieldname": "reject_qty",
"label": _("Rejected Qty"),
"fieldtype": "Float",
"width": 200
},
{
"fieldname": "returned_qty",
"label": _("Return without jobwork"),
"fieldtype": "float",
"width": 200
}
]
return columns
def get_data(filters,conditions):
query="""SELECT t1.production_plan, t1.blanket_order, t1.work_order, t1.doctype, t1.style, t1.process, t1.workstation, COALESCE(t1.issue_qty,0) as "issue_qty", COALESCE(t1.receive_qty,0) as "receive_qty", COALESCE((t1.issue_qty-t1.receive_qty),0) as "balance_qty", COALESCE(t1.reject_qty, 0) as "reject_qty", COALESCE(t1.returned_qty,0) as "returned_qty" from
(SELECT DISTINCT
pp.name as "production_plan",
soi.blanket_order,
wo.name as "work_order",
"Work Order" as doctype,
(SELECT i.variant_of from `tabItem` i where i.name = soi.item_code) as "style",
woo.operation as "process",
woo.workstation ,
(SELECT SUM(woi.transferred_qty) from `tabWork Order Item` woi inner join `tabItem` fti on woi.item_code = fti.name
where woi.parent = wo.name and (fti.fabric_or_yarn = 1 or fti.intermediate_product = 1) GROUP BY woi.parent) as "issue_qty",
wo.produced_qty as "receive_qty",
(SELECT sum(se.fg_completed_qty) from `tabStock Entry` se where se.work_order = wo.name and se.stock_entry_type = 'Manufacturing Reject') as "reject_qty",
(SELECT sum(se.fg_completed_qty) from `tabStock Entry` se where se.work_order = wo.name and se.stock_entry_type = 'Manufacturing Return') as "returned_qty"
from `tabProduction Plan` pp
inner join `tabProduction Plan Sales Order` ppso
on pp.name = ppso.parent
inner join `tabSales Order Item` soi
on ppso.sales_order = soi.parent
inner join `tabWork Order` wo
on pp.name = wo.production_plan
inner Join `tabWork Order Operation` woo
on woo.parent = wo.name
where pp.docstatus = 1
UNION
SELECT DISTINCT
tpp.name as "production_plan",
tsoi.blanket_order,
poi.parent as "work_order",
"Purchase Order" as doctype,
(SELECT ti.variant_of from `tabItem` ti where ti.name = tsoi.item_code) as "style",
(SELECT bo.operation from `tabBOM Operation` bo WHERE bo.parent = poi.bom) as "process",
(SELECT po.supplier from `tabPurchase Order` po where po.name = poi.parent) as "workstation",
(SELECT sum(pois.supplied_qty) from `tabPurchase Order Item Supplied`
pois inner join `tabItem` it on it.item_code = pois.rm_item_code
where pois.parent = poi.parent and poi.item_code = pois.main_item_code and (it.fabric_or_yarn = 1 or it.intermediate_product = 1)) as "issue_qty",
poi.received_qty as "receive_qty",
(Select sum(pri.rejected_qty) from `tabPurchase Receipt Item` pri where poi.parent = pri.purchase_order and poi.item_code = pri.item_code) as "reject_qty",
(SELECT sum(pois.returned_qty) from `tabPurchase Order Item Supplied` pois
inner join `tabItem` it on it.item_code = pois.rm_item_code
where pois.parent = poi.parent and poi.item_code = pois.main_item_code and (it.fabric_or_yarn = 1 or it.intermediate_product = 1)) as "returned_qty"
from `tabProduction Plan` tpp
inner join `tabProduction Plan Sales Order` tppso
on tpp.name = tppso.parent
inner join `tabSales Order Item` tsoi
on tppso.sales_order = tsoi.parent
inner join `tabPurchase Order Item` poi
on poi.production_plan = tpp.name
where tpp.docstatus =1
UNION
SELECT
mpp.name as "production_plan",
mri.blanket_order ,
mwo.name as "work_order",
"Work Order" as doctype,
(SELECT mi.variant_of from `tabItem` mi where mi.name = mri.item_code) as "style",
mwoo.operation as "process",
mwoo.workstation ,
(SELECT SUM(mwoi.transferred_qty) from `tabWork Order Item` mwoi inner join `tabItem` mti on mwoi.item_code = mti.name
where mwoi.parent = mwo.name and (mti.fabric_or_yarn = 1 or mti.intermediate_product = 1) GROUP BY mwoi.parent) as "issue_qty",
mwo.produced_qty as "receive_qty",
(SELECT sum(mse.fg_completed_qty) from `tabStock Entry` mse where mse.work_order = mwo.name and mse.stock_entry_type = 'Manufacturing Reject') as "reject_qty",
(SELECT sum(mse.fg_completed_qty) from `tabStock Entry` mse where mse.work_order = mwo.name and mse.stock_entry_type = 'Manufacturing Return') as "returned_qty"
FROM `tabProduction Plan` mpp
inner join `tabProduction Plan Material Request` ppmr
on mpp.name = ppmr.parent
inner join `tabMaterial Request Item` mri
on ppmr.material_request = mri.parent
inner join `tabWork Order` mwo
on mpp.name = mwo.production_plan
inner Join `tabWork Order Operation` mwoo
on mwoo.parent = mwo.name
where mpp.docstatus = 1
UNION
SELECT
mtpp.name as "production_plan",
tmri.blanket_order,
mpoi.parent as "work_order",
"Purchase Order" as doctype,
(SELECT mti.variant_of from `tabItem` mti where mti.name = tmri.item_code) as "style",
(SELECT mbo.operation from `tabBOM Operation` mbo WHERE mbo.parent = mpoi.bom) as "process",
(SELECT mpo.supplier from `tabPurchase Order` mpo where mpo.name = mpoi.parent) as "workstation",
(SELECT sum(tpois.supplied_qty) from `tabPurchase Order Item Supplied` tpois
inner join `tabItem` it on it.item_code = tpois.rm_item_code
where tpois.parent = mpoi.parent and mpoi.item_code = tpois.main_item_code and (it.fabric_or_yarn = 1 or it.intermediate_product = 1)) as "issue_qty",
mpoi.received_qty as "receive_qty",
(Select sum( mpri.rejected_qty) from `tabPurchase Receipt Item` mpri where mpoi.parent = mpri.purchase_order and mpoi.item_code = mpri.item_code ) as "reject_qty",
(SELECT sum(tpois.returned_qty) from `tabPurchase Order Item Supplied` tpois
inner join `tabItem` it on it.item_code = tpois.rm_item_code
where tpois.parent = mpoi.parent and mpoi.item_code = tpois.main_item_code and (it.fabric_or_yarn = 1 or it.intermediate_product = 1)) as "returned_qty"
from `tabProduction Plan` mtpp
inner join `tabProduction Plan Material Request` tppmr
on mtpp.name = tppmr.parent
inner join `tabMaterial Request Item` tmri
on tppmr.material_request = tmri.parent
inner join `tabPurchase Order Item` mpoi
on mpoi.production_plan = mtpp.name
WHERE mtpp.docstatus =1
) as t1 where t1.blanket_order is not null {conditions}""".format(conditions=conditions)
orders=frappe.db.sql(query, as_dict=True)
return orders
def get_conditions(filters):
conditions=""
if filters.get('blanket_order'):
conditions += " AND t1.blanket_order = '{}'".format(filters.get('blanket_order'))
if filters.get('item'):
conditions += " AND t1.style = '{}'".format(filters.get('item'))
if filters.get('work_order') and not filters.get('purchase_order'):
conditions += " AND t1.work_order = '{}'".format(filters.get('work_order'))
if not filters.get('work_order') and filters.get('purchase_order'):
conditions += " AND t1.work_order = '{}'".format(filters.get('purchase_order'))
if filters.get('work_order') and filters.get('purchase_order'):
conditions += " AND t1.work_order = '{}'".format(filters.get('work_order'))
conditions += " OR t1.work_order = '{}'".format(filters.get('purchase_order'))
if filters.get('production_plan'):
conditions += " AND t1.production_plan = '{}'".format(filters.get('production_plan'))
return conditions
``` |
{
"source": "jprashant21/ElNeuKGQA",
"score": 3
} |
#### File: dataset_tools/dataset_case/base_case.py
```python
from typing import Optional
class BaseCaseMethodNotImplemented(Exception):
"""
Exception when a BaseCase method has not been implemented.
"""
pass
class BaseCase:
"""
Base class for representing a dataset case with a mandatory question text
and an optional question id and sparql query answer.
"""
@property
def question_id(self) -> Optional[int]:
"""
Get the question id, if exists.
:return: question integer identifier or None if there is no identifier.
"""
return None
@property
def question_text(self) -> str:
"""
Get the question text.
:exception: BaseCaseMethodNotImplemented if method has not been implemented.
:return: question string.
"""
raise BaseCaseMethodNotImplemented
@property
def sparql_query(self) -> Optional[str]:
"""
Get the SPARQL query answer.
:return: SPARQL query string or None if there is no SPARQL query answer.
"""
return None
class QuestionCase(BaseCase):
"""
Question case class for representing a dataset question case with
a mandatory question text and an optional question id. No SPARQL query answer required.
"""
def __init__(self, question_text: str, question_id: Optional[int] = None):
"""
Question case constructor.
:param question_text: question case string.
:param question_id: question case identifier.
"""
self.__id = question_id
self.__text = question_text
@property
def question_id(self) -> Optional[int]:
"""
Get the question id, if exists.
:return: question integer identifier or None if there is no identifier.
"""
return self.__id
@property
def question_text(self) -> str:
"""
Get the question text.
:return: question string.
"""
return self.__text
class Question(QuestionCase):
"""
Question class for representing a question case that only requires a question text.
No question id nor SPARQL query answer required.
"""
def __init__(self, question_text: str):
"""
Question constructor.
:param question_text: question case string.
"""
super().__init__(question_text)
```
#### File: dataset_tools/dataset_case/normalized_case.py
```python
from typing import Optional, Dict, List
from dataset_tools.dataset_case.base_case import BaseCase, QuestionCase, Question
class NormalizedCase(BaseCase):
"""
Class for representing a normalized case which includes a mandatory question text
and might include:
- Question id
- SPARQL query answer.
- Entities included in the SPARQL query with the corresponding label in the question text.
- Query template with placeholders deduced from the SPARQL query answer.
- Slots that includes the mapping entity label -> query placeholder.
"""
def __init__(self, case: Dict):
"""
NormalizedCase class constructor.
The case dict have to contain al least the question text.
:param case: normalized case dictionary.
"""
self.case = case
@property
def question_id(self) -> Optional[int]:
"""
Get the question id, if exists.
:exception: AssertionError if the question case id is not an integer.
:return: question integer identifier or None if there is no identifier.
"""
if 'question_id' not in self.case:
return None
assert type(self.case['question_id']) == int
return self.case['question_id']
@property
def question_text(self) -> str:
"""
Get the question text.
:exception: AssertionError if the question text is not included.
:exception: AssertionError if the question case text is not an string.
:return: question string.
"""
assert 'natural_language_question' in self.case
assert type(self.case['natural_language_question']) == str
return self.case['natural_language_question']
def __get_query_answer_dict(self) -> Optional[Dict]:
"""
Obtain query answer dict that should contain the SPARQL query, entities, slots and query template.
If exists.
:exception: AssertionError if the query answer field is not a list.
:exception: AssertionError if the question answer first element is not a dict.
:return: query answer dict, None if there is no query answer dict.
"""
if 'query_answer' not in self.case or not self.case['query_answer']:
return None
assert type(self.case['query_answer']) == list
assert type(self.case['query_answer'][0]) == dict
return self.case['query_answer'][0]
@property
def sparql_query(self) -> Optional[str]:
"""
Get the SPARQL query answer.
:exception: AssertionError if the sparql_query field is not in the query_answer_dict
:exception: AssertionError if the SPARQL query is not a string.
:return: SPARQL query string or None if there is no SPARQL query answer.
"""
query_answer_dict = self.__get_query_answer_dict()
if not query_answer_dict:
return None
assert 'sparql_query' in query_answer_dict
assert type(query_answer_dict['sparql_query']) == str
return query_answer_dict['sparql_query']
@property
def question_case(self) -> QuestionCase:
"""
Return a QuestionCase instance with the question id and question text.
:return: QuestionCase instance.
"""
return QuestionCase(self.question_text, self.question_id)
@property
def question(self) -> Question:
"""
Return a Question instance with the question text.
:return: Question instance.
"""
return Question(self.question_text)
@property
def entities(self) -> Optional[List[Dict]]:
"""
Return entities included in the SPARQL query with the corresponding label in the question text.
The format of each case is the following:
entities = [
{
'label': 'Pedrito',
'entity': 'wd:Q121'
}, ...
]
:exception: AssertionError if the entities are not a list. The format of each case is not checked.
:return: entity case list, None if is not included.
"""
query_answer_dict = self.__get_query_answer_dict()
if not query_answer_dict or 'entities' not in query_answer_dict:
return None
assert type(query_answer_dict['entities']) == list
return query_answer_dict['entities']
@property
def slots(self) -> Optional[List[Dict]]:
"""
Return slots that includes the mapping entity label -> query template placeholder.
The format of each case is the following:
slots = [
{
'label': 'Pedrito',
'slot': '<sbj_1>'
}, ...
]
:exception: AssertionError if the slots are not a list. The format of each case is not checked.
:return: slot case list, None if is not included.
"""
query_answer_dict = self.__get_query_answer_dict()
if not query_answer_dict or 'slots' not in query_answer_dict:
return None
assert type(query_answer_dict['slots']) == list
return query_answer_dict['slots']
@property
def query_template(self) -> Optional[str]:
"""
Return query template with placeholders deduced from the SPARQL query answer.
:exception: AssertionError if the query template is not a string.
:return: query template string or None if is not included.
"""
query_answer_dict = self.__get_query_answer_dict()
if not query_answer_dict or 'sparql_template' not in query_answer_dict:
return None
assert type(query_answer_dict['sparql_template']) == str
return query_answer_dict['sparql_template']
@property
def answers(self) -> Optional[Dict]:
"""
Return query binding answers from the SPARQL query answer.
:exception: AssertionError if the query template is not a string.
:return: binding answers dict or None if is not included.
"""
query_answer_dict = self.__get_query_answer_dict()
if not query_answer_dict or 'answers' not in query_answer_dict or query_answer_dict['answers'] is None:
return None
assert type(query_answer_dict['answers']) == dict
return query_answer_dict['answers']
```
#### File: ElNeuKGQA/entity_linking/dbpedia_spotlight_system.py
```python
from typing import Dict, Optional, List
import requests
from requests.exceptions import ConnectionError as requestConnectionError
from dataset_tools import QuestionCase
from entity_linking.base_entitity_linking_system import EntityLinkingSystem, EntityLinkingDict
from mapping.mapper import MapEntitiesDBpediaToWikidata
from query_tools import WIKIDATA_ENDPOINT_URL
DBPEDIA_SPOTLIGHT_URL = "http://api.dbpedia-spotlight.org/en/annotate"
class DBpediaSpotlight(EntityLinkingSystem):
"""
Class for the DBpedia Spotlight Entity Linking system.
More details: https://www.dbpedia-spotlight.org/
"""
def __init__(self, endpoint_url: Optional[str] = None, skip_mapping: bool = False):
"""
DBpediaSpotlight class constructor. Use the DBpedia Spotlight web service.
:param endpoint_url: DBpediaSpotlight API url.
:param skip_mapping: if True, skip mapping process to Wikidata resources.
"""
endpoint_url = endpoint_url if endpoint_url else DBPEDIA_SPOTLIGHT_URL
entity_mapper = MapEntitiesDBpediaToWikidata(WIKIDATA_ENDPOINT_URL) if not skip_mapping else None
super().__init__(endpoint_url, entity_mapper)
def __str__(self):
"""
Get the DBpedia Spotlight system string representation.
:return: string representation.
"""
return 'DBpedia Spotlight'
def _request(self, params: Dict) -> Optional[Dict]:
"""
Perform a request to the DBpedia Spotlight web service given a set or parameters.
:param params: query parameters.
:return: request json response dict, None if there is no successful response.
"""
headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}
res = None
try:
res = requests.get(self._get_url(), params, headers=headers)
res_json = res.json()
except requestConnectionError: # if the DBpedia web service shutdowns
return None
except Exception as e:
print(res)
raise e
return res_json
def construct_query_params(self, question_string: str) -> Dict:
"""
Given a question string, construct the web service parameters.
:param question_string: question string.
:return: query parameters dict.
"""
return {'text': question_string, 'confidence': 0.2, 'support': 20}
def get_entity_extracted(
self, question_case: QuestionCase, num_entities_expected: Optional[int] = None
) -> List[Dict]:
"""
Perform entity annotation over the given question case.
Expected DBpedia Spotlight format:
entity_annotation = {
'ini': 0,
'fin': 6,
'label': "Pedrito",
'url': "http://dbpedia.org/resource/Pedrito",
'score_list' : [
{
'value': 0.99,
'field_name': '@similarityScore'
},
{
'value': 0.51,
'field_name': '@percentageOfSecondRank'
}
]
}
A number of maximum expected entities can be passed.
If the given QuestionCase contains a question id, such id is ignored.
:param question_case: QuestionCase instance.
:param num_entities_expected: maximum number of entities expected.
:return: list of entity annotations.
"""
if not question_case.question_text: # if empty text is provided
return list()
# construct web service parameters and get annotations
params = self.construct_query_params(question_case.question_text)
results = self.get_response(params)
# if not results, return empty list
if not results:
return list()
# adapt annotations results to the desired output
summary = list()
for case in results['Resources'] if 'Resources' in results else list():
try:
start = int(case['@offset'])
label = case['@surfaceForm']
data = {
'ini': start,
'fin': start + len(label),
'label': label,
'url': case['@URI'],
'score_list': [
{
'value': float(case['@similarityScore']),
'field_name': '@similarityScore'
},
{
'value': float(case['@percentageOfSecondRank']),
'field_name': '@percentageOfSecondRank'
}
]
}
summary.append(data)
except KeyError: # usually a mention without annotation
continue
return self.map_summary(summary)
# add DBpediaSpotlight to the EntityLinkingDict for the load_model method
EntityLinkingDict['dbpedia_spotlight'] = DBpediaSpotlight
```
#### File: ElNeuKGQA/entity_linking/precision_priority_system.py
```python
import re
from pathlib import Path
from typing import Optional, List, Union, Dict, Set
import nltk
from nltk.corpus import stopwords
from dataset_tools import QuestionCase
from entity_linking.base_entitity_linking_system import EntityLinkingDict
from entity_linking.ensemble_entity_linking_system import EnsembleEntityLinkingSystem, MAX_THRESHOLD
from query_tools import WIKIDATA_ENTITY_PATTERN
class PrecisionPrioritySystem(EnsembleEntityLinkingSystem):
"""
Class for the Precision Priority system.
It enhance the performance in the Entity Linking task by following a priority annotation process
for the given individual Entity Linking systems. Systems with higher precision have higher priority.
"""
def __init__(self, system_priority: Optional[List[str]] = None, joined_results: Optional[Union[Path, str]] = None,
threshold: Optional[int] = None, filter_stopwords: bool = False, tiebreak: bool = True):
"""
PrecisionPrioritySystem class constructor.
:param system_priority: Set system name priority. By default is: Aida - OpenTapioca - TAGME - DBpedia Spotlight
:param joined_results: offline joined results from individual Entity Linking systems.
If no joined results are provided, the system performs a default online setting.
:param threshold: number of expected entities to be returned. By default is 3.
:param filter_stopwords: if True, applies mention stopwords filtering.
:param tiebreak: if True, applies tiebreak feature to resolves ties by using entities' scores.
"""
super().__init__(joined_results, offline=joined_results is not None)
self.system_priority = system_priority if system_priority else \
["Aida", "Open Tapioca", "TAGME", "DBpedia Spotlight"]
self.threshold = threshold if threshold else MAX_THRESHOLD
# Load stopwords, otherwise download first
try:
self._stopwords = set(stopwords.words('english'))
except LookupError:
nltk.download('stopwords')
self._stopwords = set(stopwords.words('english'))
self.filter_stopwords = filter_stopwords
self.tiebreak = tiebreak
def __str__(self):
"""
Get the string representation. Usually the system name.
:return: ComposedEntityLinkingSystem string representation.
"""
return 'Precision Priority'
def _valid_entity(self, entity_name: str, found_uris: Set[str], mention_label: str) -> bool:
"""
Return True if the given entity name is valid to be added to the final output annotations.
An entity is valid if satisfy the following conditions:
(1) entity is a Wikidata entity
(2) entity has not been found
(3) the stopwords filter is on and the mention label is not a stopword
:param entity_name: entity name string.
:param found_uris: URIs found so far (to avoid duplicates).
:param mention_label: mention label string.
:return:
"""
return re.match(WIKIDATA_ENTITY_PATTERN, entity_name) and ( # (1)
entity_name not in found_uris) and ( # (2)
not self.filter_stopwords or mention_label.lower() not in self._stopwords) # (3)
def get_entity_extracted(
self, question_case: QuestionCase, num_entities_expected: Optional[int] = None
) -> List[Dict]:
"""
Perform entity annotation over the given question case.
A number of maximum expected entities can be passed.
:param question_case: QuestionCase instance.
:param num_entities_expected: maximum number of entities expected.
:return: list of entity annotations.
"""
# gather offline results or perform joined entity annotation
results = self.gather_results(question_case)
# if not results, return empty list
if not results:
return list()
# adapt annotations results to the desired output
summary = list()
found_uris = set()
# Set number of expected entities to be returned
num_expected_entities = num_entities_expected if num_entities_expected else self.threshold
# sort systems' annotations by priority, ascending
for case in sorted(results["annotations"], key=lambda a_case: self.system_priority.index(a_case['system'])):
# sort each annotation by entity score, descending
for output in sorted(case['output'], key=lambda output_case: -output_case['score']):
# compress Wikidata Entity URI
entity_name = output['url'] if 'wd:' in output['url'] else ('wd:' + output['url'])
# add only if entity is valid
if self._valid_entity(entity_name, found_uris, output['label']):
found_uris.add(entity_name)
data = dict(
ini=int(output['ini']),
fin=int(output['fin']),
label=output['label'],
url=entity_name,
score_list=output['score_list']
)
summary.append(data)
# if tiebreak setting is on and we reach the number of expected entities
if self.tiebreak and len(summary) >= num_expected_entities:
return summary
# if current valid annotations exceed number of expected entities
if len(summary) >= num_expected_entities:
break
return summary
# add PrecisionPrioritySystem to the EntityLinkingDict for the load_model method
EntityLinkingDict['PrecisionPrioritySystem'] = PrecisionPrioritySystem
```
#### File: ElNeuKGQA/entity_linking/tagme_system.py
```python
import re
from typing import Dict, Optional, List
import requests
from dataset_tools import QuestionCase
from .base_entitity_linking_system import EntityLinkingSystem, EntityLinkingDict
from mapping.mapper import MapEntitiesWikipediaToWikidata
from query_tools import WIKIDATA_ENDPOINT_URL
token = ""
TAGME_URL = "https://tagme.d4science.org/tagme/tag"
TAGME_WAT_URL = "https://wat.d4science.org/wat/tag/tag"
class BaseTagMe(EntityLinkingSystem):
"""
Class for the Base TAGME Entity Linking system.
"""
def __init__(self, endpoint_url: str, skip_mapping: bool = False):
"""
Base TAGME class constructor.
:param endpoint_url: system API url.
:param skip_mapping: if True, skip mapping process to Wikidata resources.
"""
entity_mapper = MapEntitiesWikipediaToWikidata(WIKIDATA_ENDPOINT_URL) if not skip_mapping else None
super().__init__(endpoint_url, entity_mapper)
def _request(self, params: Dict) -> Optional[Dict]:
"""
Perform a request to the TAGME web service given a set or parameters.
:param params: query parameters.
:return: request json response dict, None if there is no successful response.
"""
# headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}
res = requests.get(self._get_url(), params=params) # , headers=headers
res_json = res.json()
return res_json
def construct_query_params(self, question_string: str) -> Dict:
"""
Given a question string, construct the web service parameters.
:param question_string: question string.
:return: query parameters dict.
"""
return {'text': question_string, 'gcube-token': token, 'lang': 'en'}
def get_entity_extracted(
self, question_case: QuestionCase, num_entities_expected: Optional[int] = None
) -> List[Dict]:
"""
Perform entity annotation over the given question case.
Expected TAGME format:
entity_annotation = {
'ini': 0,
'fin': 6,
'label': "Pedrito",
'url': "https://en.wikipedia.org/wiki/Pedrito",
'score_list' : [
{
'value': 0.99,
'field_name': 'rho'
}
]
}
A number of maximum expected entities can be passed.
If the given QuestionCase contains a question id, such id is ignored.
:param question_case: QuestionCase instance.
:param num_entities_expected: maximum number of entities expected.
:return: list of entity annotations.
"""
if not question_case.question_text: # if empty text is provided
return list()
# construct web service parameters and get annotations
params = self.construct_query_params(question_case.question_text)
results = self.get_response(params)
# if not results, return empty list
if not results:
return list()
# adapt annotations results to the desired output
summary = list()
for case in results['annotations'] if 'annotations' in results else list():
try:
data = {
'ini': case['start'],
'fin': case['end'],
'label': case['spot'],
'url': 'wiki:' + re.sub(r'\s+', '_', case['title']),
'score_list': [
{
'value': case['rho'],
'field_name': 'rho'
}
]
}
summary.append(data)
except KeyError as e: # usually a mention without annotation
print(e)
print(case)
return self.map_summary(summary)
class TagMe(BaseTagMe):
"""
Class for the TAGME Entity Linking system.
More details: https://sobigdata.d4science.org/group/tagme/tagme-help
"""
def __init__(self, endpoint_url: Optional[str] = None, skip_mapping: bool = False):
"""
TAGME class constructor. Use the TAGME web service.
:param endpoint_url: TAGME API url.
:param skip_mapping: if True, skip mapping process to Wikidata resources.
"""
endpoint_url = endpoint_url if endpoint_url else TAGME_URL
super().__init__(endpoint_url, skip_mapping)
def __str__(self):
"""
Get the TAGME system string representation.
:return: string representation.
"""
return 'TAGME'
class TagMeWAT(BaseTagMe):
"""
Class for the TAGME WAT Entity Linking system.
More details: https://sobigdata.d4science.org/web/tagme/wat-api
"""
def __init__(self, endpoint_url: Optional[str] = None, skip_mapping: bool = False):
"""
TAGME WAT class constructor. Use the TAGME WAT web service.
:param endpoint_url: TAGME WAT API url.
:param skip_mapping: if True, skip mapping process to Wikidata resources.
"""
endpoint_url = endpoint_url if endpoint_url else TAGME_WAT_URL
super().__init__(endpoint_url, skip_mapping)
def __str__(self):
"""
Get the TAGME WAT system string representation.
:return: string representation.
"""
return 'TAGME_WAT'
# add TagMe to the EntityLinkingDict for the load_model method
EntityLinkingDict['TAGME'] = TagMe
# add TagMeWAT to the EntityLinkingDict for the load_model method
EntityLinkingDict['tagme_wat'] = TagMeWAT
```
#### File: filenames/file_managers/entity_linking.py
```python
from pathlib import Path
from typing import Optional
from filenames.file_managers.file_manager import FileManager
###########################################################################################
################################### ENTITY LINKING ########################################
###########################################################################################
class EntityLinkingFiles(FileManager):
def __init__(self,
dataset_name: str = 'lcquad2',
dataset_variant: str = 'standard',
only_train: bool = False, only_valid: bool = False, only_test: bool = False,
base_folder: Optional[Path] = None,
benchmark_folder: Optional[Path] = None,
make_dir: bool = True):
super().__init__('entity_linking', dataset_name, dataset_variant, only_train,
only_valid, only_test, base_folder, benchmark_folder, make_dir=make_dir)
```
#### File: filenames/file_managers/file_manager.py
```python
import os
from pathlib import Path
from typing import Optional
PROJECT_ROOT = Path(os.path.dirname(__file__)).parents[1]
PROJECT_DATA_FOLDER = PROJECT_ROOT / 'data'
PROJECT_WIKIDATA_QA_DATA_FOLDER = PROJECT_DATA_FOLDER / 'datasets'
PROJECT_WIKIDATA_BENCHMARK_FOLDER = PROJECT_DATA_FOLDER / 'benchmark'
PROJECT_WIKIDATA_MODELS_FOLDER = PROJECT_DATA_FOLDER / 'models'
class FileManager():
def __init__(self, task_name: str, dataset_name: str, dataset_variant: str,
only_train: bool = False, only_valid: bool = False, only_test: bool = False,
base_folder: Optional[Path] = None,
data_folder: Optional[Path] = None, benchmark_folder: Optional[Path] = None,
models_folder: Optional[Path] = None, make_dir: bool = True):
assert not (int(only_train) + int(only_valid) + int(only_test)) > 1
self.dataset_name = dataset_name
self.dataset_variant = dataset_variant
self.task_name = task_name
self.only_train = only_train
self.only_valid = only_valid
self.only_test = only_test
self.make_dir = make_dir
self.base_folder = base_folder if base_folder else PROJECT_DATA_FOLDER
self.make_dir_exists_ok(self.base_folder)
self.data_folder = data_folder if data_folder else (self.base_folder / 'datasets')
self.make_dir_exists_ok(self.data_folder)
self.benchmark_folder = benchmark_folder if benchmark_folder else (self.base_folder / 'benchmark')
self.make_dir_exists_ok(self.benchmark_folder)
self.models_folder = models_folder if models_folder else (self.base_folder / 'models')
self.make_dir_exists_ok(self.models_folder)
self.benchmark_task_folder = self.benchmark_folder / self.task_name
self.make_dir_exists_ok(self.benchmark_task_folder)
def make_dir_exists_ok(self, path: Path):
path.mkdir(exist_ok=True) if self.make_dir else None
def dataset_file(self, **kwargs):
return self.data_folder / "dataset_{0}_{1}.json".format(self.dataset_name, self.dataset_variant)
def results_folder(self, **kwargs) -> Path:
if self.only_train:
folder = self.benchmark_task_folder / '{0}_{1}_train'.format(self.dataset_name, self.dataset_variant)
elif self.only_valid:
folder = self.benchmark_task_folder / '{0}_{1}_valid'.format(self.dataset_name, self.dataset_variant)
elif self.only_test:
folder = self.benchmark_task_folder / '{0}_{1}_test'.format(self.dataset_name, self.dataset_variant)
else:
folder = self.benchmark_task_folder / '{0}_{1}'.format(self.dataset_name, self.dataset_variant)
self.make_dir_exists_ok(folder)
return folder
def output_file(self, system_name: str, **kwargs) -> Path:
folder = self.results_folder() / system_name
self.make_dir_exists_ok(folder)
return folder / 'output.json'
def stats_file(self, system_name: str, **kwargs) -> Path:
folder = self.results_folder() / system_name
self.make_dir_exists_ok(folder)
return folder / 'stats.json'
def table_file(self, system_name: str, table_name: Optional[str] = None, **kwargs) -> Path:
folder = self.results_folder() / system_name
self.make_dir_exists_ok(folder)
return folder / f'{table_name if table_name else "table"}.csv'
def resources_folder(self, **kwargs) -> Path:
pass
def vocab_folder(self, **kwargs) -> Path:
pass
def model_folder(self, **kwargs) -> Path:
pass
def normalized_dataset_file(self, **kwargs) -> Path:
pass
def train_file(self, **kwargs) -> Path:
pass
def valid_file(self, **kwargs) -> Path:
pass
def test_file(self, **kwargs) -> Path:
pass
def train_source_file(self, **kwargs) -> Path:
pass
def train_target_file(self, **kwargs) -> Path:
pass
def valid_source_file(self, **kwargs) -> Path:
pass
def valid_target_file(self, **kwargs) -> Path:
pass
def test_source_file(self, **kwargs) -> Path:
pass
def test_target_file(self, **kwargs) -> Path:
pass
```
#### File: filenames/file_managers/query_generation.py
```python
from pathlib import Path
from typing import Optional
from filenames.file_managers import FileManager
class BaseQueryGenerationFiles(FileManager):
def __init__(self,
task_name: str,
empty_sparql: bool = True,
dataset_name: str = 'lcquad2',
dataset_variant: str = 'standard',
only_train: bool = False, only_valid: bool = False, only_test: bool = False,
query_split: bool = False,
property_guarantee: bool = True,
simplified: bool = True,
train_prefix: str = 'train',
valid_prefix: str = 'valid',
test_prefix: str = 'test',
base_folder: Optional[Path] = None,
data_folder: Optional[Path] = None,
benchmark_folder: Optional[Path] = None,
models_folder: Optional[Path] = None,
make_dir: bool = True,
model_type: str = 'fconv',
):
super().__init__(task_name, dataset_name, dataset_variant, only_train, only_valid, only_test, base_folder,
data_folder, benchmark_folder, models_folder, make_dir)
self.dataset_type = 'empty' if empty_sparql else 'full'
self.split_type = 'query' if query_split else 'question'
self.guarantee = "with" if property_guarantee else "without"
self.train_prefix = train_prefix
self.valid_prefix = valid_prefix
self.test_prefix = test_prefix
self.target_suffix = "empsparql" if empty_sparql else "sparql"
self.simplified = simplified
self.model_type = model_type
def resources_folder(self, **kwargs) -> Path:
if self.simplified:
folder = self.models_folder / "{0}_{1}_{2}".format(self.dataset_type, self.dataset_name, self.dataset_variant)
else:
folder = self.models_folder / "{0}_{1}_{2}_by_{3}_split_{4}_guarantee".format(self.dataset_type,
self.dataset_name,
self.dataset_variant,
self.split_type, self.guarantee)
self.make_dir_exists_ok(folder)
return folder
def vocab_folder(self, **kwargs):
folder = self.resources_folder() / "data-bin_{0}.tokenized.en-{1}".format(self.dataset_name, self.target_suffix)
self.make_dir_exists_ok(folder)
return folder
def model_folder(self, **kwargs):
folder = self.resources_folder() / "checkpoints_{2}_{0}_en_{1}".format(self.dataset_name, self.target_suffix,
self.model_type)
self.make_dir_exists_ok(folder)
return folder
def _split_file(self, base_prefix: str):
return self.resources_folder() / "{0}_{1}_{2}_sparql.txt".format(base_prefix, self.dataset_name,
self.dataset_type)
def train_file(self, **kwargs):
return self._split_file(self.train_prefix)
def valid_file(self, **kwargs):
return self._split_file(self.valid_prefix)
def _side_split_file(self, base_prefix: str, file_suffix: str):
return self.resources_folder() / "{0}.{1}".format(base_prefix, file_suffix)
def train_source_file(self, **kwargs) -> Path:
return self._side_split_file(self.train_prefix, 'en')
def train_target_file(self, **kwargs) -> Path:
return self._side_split_file(self.train_prefix, self.target_suffix)
def valid_source_file(self, **kwargs) -> Path:
return self._side_split_file(self.valid_prefix, 'en')
def valid_target_file(self, **kwargs) -> Path:
return self._side_split_file(self.valid_prefix, self.target_suffix)
class QueryGenerationFiles(BaseQueryGenerationFiles):
def __init__(self, dataset_name: str = 'lcquad2',
dataset_variant: str = 'standard',
only_train: bool = False, only_valid: bool = False, only_test: bool = False,
query_split: bool = False, property_guarantee: bool = True, simplified: bool = True,
train_prefix: str = 'train', valid_prefix: str = 'valid', test_prefix: str = 'test',
base_folder: Optional[Path] = None, models_folder: Optional[Path] = None, make_dir: bool = True,
model_type: str = 'fconv',
):
empty_sparql = False
super().__init__("query_generation", empty_sparql, dataset_name, dataset_variant, only_train, only_valid,
only_test, query_split, property_guarantee, simplified, train_prefix, valid_prefix, test_prefix,
base_folder, models_folder=models_folder, make_dir=make_dir, model_type=model_type)
# Full SPARQL, standard size
full_file_standard_manager = QueryGenerationFiles(model_type='fconv')
LCQUAD2_FULL_VOCAB = full_file_standard_manager.vocab_folder()
LCQUAD2_FULL_CONVS2S_MODEL = full_file_standard_manager.model_folder()
# Full SPARQL, plus size
full_file_plus_manager = QueryGenerationFiles(dataset_variant='plus', model_type='fconv')
LCQUAD2_FULL_PLUS_VOCAB = full_file_plus_manager.vocab_folder()
LCQUAD2_FULL_PLUS_CONVS2S_MODEL = full_file_plus_manager.model_folder()
# Baseline: Full SPARQL, standard size
LCQUAD2_BASELINE_VOCAB = LCQUAD2_FULL_VOCAB
LCQUAD2_BASELINE_CONVS2S_MODEL = LCQUAD2_FULL_CONVS2S_MODEL
```
#### File: ElNeuKGQA/query_generation/base_query_generator.py
```python
from typing import List, Union
from dataset_tools import QuestionCase
from query_tools import Query
class BaseQueryGeneratorMethodNotImplemented(Exception):
"""
Exception when a BaseQueryGenerator method hasn't been implemented yet.
"""
pass
class BaseQueryGenerator:
"""
Base Query Generator class for generating SPARQL queries or Query templates given a Natural Language question.
"""
def generate_one(self, question_case: QuestionCase) -> Union[str, Query]:
"""
Given a QuestionCase instance, generate a SPARQL query or Query template.
:param question_case: QuestionCase instance.
:return: a SPARQL Query instance or Query Template.
"""
raise BaseQueryGeneratorMethodNotImplemented
def generate_one_n_candidates(self, question_case: QuestionCase, n_candidates: int = 5) -> Union[List[str], List[Query]]:
"""
Given a QuestionCase instance, generate n SPARQL query candidates or n Query template candidates.
:param question_case: QuestionCase instance.
:param n_candidates: number of candidates per question.
:return: a List of SPARQL Query instance or a List of Query Template which represents the candidates for the given question.
"""
raise BaseQueryGeneratorMethodNotImplemented
def generate(self, question_cases: List[QuestionCase]) -> Union[List[str], List[Query]]:
"""
Given a list of QuestionCase instances, generate a SPARQL query or Query template for each question.
:param question_cases: list of QuestionCase instances.
:return: a List of SPARQL Query instance or a List of Query Template whose elements represent the output for each question respectively.
"""
raise BaseQueryGeneratorMethodNotImplemented
def generate_n_candidates(
self, question_cases: List[QuestionCase], n_candidates: int = 5
) -> Union[List[List[str]], List[List[Query]]]:
"""
Given a list of QuestionCase instances, generate n SPARQL query candidates
or n Query template candidates for each question.
:param question_cases: list of QuestionCase instances.
:param n_candidates: number of candidates per question.
:return: a List of Lists of SPARQL Query instance or Query Template (not both). Each List represent the candidates of each question respectively.
"""
raise BaseQueryGeneratorMethodNotImplemented
```
#### File: ElNeuKGQA/query_generation/sparql_query_generator.py
```python
import json
from pathlib import Path
from typing import List, Dict, Optional
from dataset_tools import Normalizer, QuestionCase
from filenames import QueryGenerationFiles
from neural_sparql_machine.fairseq_wrapper import FairseqTranslator
from query_generation import BaseQueryGenerator, BaseQueryGeneratorMethodNotImplemented
from query_tools import Query, Tokenizer, WikidataQuery, WikidataTokenizer
QueryGenerationDict = dict()
class SparqlQueryGenerator(BaseQueryGenerator):
"""
SPARQL Query Generator class for generating SPARQL queries given a Natural Language question.
"""
def generate_one(self, question_case: QuestionCase) -> Query:
"""
Given a QuestionCase instance, generate a SPARQL query.
:param question_case: natural language QuestionCase instance.
:return: a SPARQL Query instance.
"""
raise BaseQueryGeneratorMethodNotImplemented
def generate_one_n_candidates(self, question_case: QuestionCase, n_candidates: int = 5) -> List[Query]:
"""
Given a QuestionCase instance generate n SPARQL query candidates.
:param question_case: natural language QuestionCase instance.
:param n_candidates: number of candidates per question.
:return: a List of SPARQL Query instance which represents the candidates for the given question.
"""
raise BaseQueryGeneratorMethodNotImplemented
def generate(self, question_cases: List[QuestionCase]) -> List[Query]:
"""
Given a list of QuestionCase instance generate a SPARQL query for each question.
:param question_cases: list of natural language QuestionCase instance.
:return: a List of SPARQL Query instances whose elements represent the output for each question respectively.
"""
raise BaseQueryGeneratorMethodNotImplemented
def generate_n_candidates(self, question_cases: List[QuestionCase], n_candidates: int = 5) -> List[List[Query]]:
"""
Given a list of QuestionCase instance, generate n SPARQL query candidates for each question.
:param question_cases: list of natural language QuestionCase instance.
:param n_candidates: number of candidates per question.
:return: a List of Lists of SPARQL Query instances. Each List represent the candidates of each question respectively.
"""
raise BaseQueryGeneratorMethodNotImplemented
@classmethod
def load_model(cls, query_generator_opt: Dict, dataset_opt: Dict) -> 'SparqlQueryGenerator':
# initialize Query Generation file manager
system_name = query_generator_opt["system_name"]
if query_generator_opt['offline']:
file_manager = QueryGenerationFiles(**dataset_opt['params'])
offline_results = file_manager.output_file(system_name)
print(f'Using offline data from "{offline_results}"...')
query_generator = OfflineSparqlQueryGenerator(offline_results)
else:
system_type = query_generator_opt["system_type"]
query_generator = QueryGenerationDict[system_type].load_model(query_generator_opt, dataset_opt)
print(f"{system_name} system ready...")
return query_generator
class FairseqSparqlQueryGenerator(SparqlQueryGenerator):
"""
SPARQL Query Generator class for a generating SPARQL query given a Natural Language question.
Based on Fairseq Neural Translation models.
More details: https://fairseq.readthedocs.io/en/latest/
"""
def __init__(self, translation_model: FairseqTranslator, query_tokenizer: Tokenizer):
"""
Fairseq SparqlQueryGenerator Constructor.
:param translation_model: Neural Machine Translation fairseq model wrapper.
:param query_tokenizer: Tokenizer for decoding the SPARQl query output from the query generator.
"""
self.translation_model = translation_model
self.query_tokenizer = query_tokenizer
def generate_one(self, question_case: QuestionCase) -> Query:
"""
Given a QuestionCase instance, generate a SPARQL query.
:param question_case: natural language QuestionCase instance.
:return: a SPARQL Query instance.
"""
question_string = question_case.question_text
normalized_question = Normalizer.normalize_question(question_string)
return self.query_tokenizer.decode(self.translation_model.evaluate([normalized_question])[0])
def generate_one_n_candidates(self, question_case: QuestionCase, n_candidates: int = 5) -> List[Query]:
"""
Given a QuestionCase instance generate n SPARQL query candidates.
:param question_case: natural language QuestionCase instance.
:param n_candidates: number of candidates per question.
:return: a List of SPARQL Query instance which represents the candidates for the given question.
"""
assert n_candidates > 0
question_string = question_case.question_text
normalized_question = Normalizer.normalize_question(question_string)
candidates = self.translation_model.evaluate_best_n(normalized_question, beam=n_candidates)
return list(self.query_tokenizer.decode(candidate) for candidate in candidates)
def generate(self, question_cases: List[QuestionCase]) -> List[Query]:
"""
Given a list of QuestionCase instance generate a SPARQL query for each question.
:param question_cases: list of natural language QuestionCase instance.
:return: a List of SPARQL Query instances whose elements represent the output for each question respectively.
"""
return list(self.generate_one(question_case) for question_case in question_cases)
def generate_n_candidates(self, question_cases: List[QuestionCase], n_candidates: int = 5) -> List[List[Query]]:
"""
Given a list of QuestionCase instance, generate n SPARQL query candidates for each question.
:param question_cases: list of natural language QuestionCase instance.
:param n_candidates: number of candidates per question.
:return: a List of Lists of SPARQL Query instances. Each List represent the candidates of each question respectively.
"""
assert n_candidates > 0
return list(self.generate_one_n_candidates(q_case, n_candidates) for q_case in question_cases)
@classmethod
def load_model(cls, query_generator_opt: Dict, dataset_opt: Dict) -> 'FairseqSparqlQueryGenerator':
"""
TODO: documentation
:param query_generator_opt:
:param dataset_opt:
:return:
"""
assert 'system_params' in query_generator_opt
file_manager = QueryGenerationFiles(**query_generator_opt['system_params'])
translator_params = dict(
vocab_path=file_manager.vocab_folder(),
checkpoints_folder=file_manager.model_folder(),
checkpoint_file=query_generator_opt['checkpoint_file'] if 'checkpoint_file' in query_generator_opt else None,
gpu=query_generator_opt['gpu'] if 'gpu' in query_generator_opt else False
)
translator = FairseqTranslator(**translator_params)
return FairseqSparqlQueryGenerator(translator, WikidataTokenizer())
class OfflineSparqlQueryGenerator(SparqlQueryGenerator):
"""
Offline SPARQL Query Generator class for a generating SPARQL query given a Natural Language question.
Use results gathered for other SparqlQueryGenerator's
"""
def __init__(self, offline_results: Path):
"""
Offline SparqlQueryGenerator Constructor.
:param offline_results: ...
"""
self.offline_results = offline_results
with open(offline_results, encoding='utf-8') as inJsonFile:
data = json.load(inJsonFile)
self.uid_data_map = {case['uid']: case for case in data['questions']}
def generate_one(self, question_case: QuestionCase) -> Query:
"""
Given a QuestionCase instance, generate a SPARQL query.
:param question_case: natural language QuestionCase instance.
:return: a SPARQL Query instance.
"""
question_id = question_case.question_id
if question_id not in self.uid_data_map:
print(f"Warning: {question_id} is not in cache. You might want to update your results.")
return WikidataQuery("")
result_case = self.uid_data_map[question_id]
return WikidataQuery(result_case['system_answer'][0])
def generate_one_n_candidates(self, question_case: QuestionCase, n_candidates: int = 5) -> List[Query]:
"""
Given a QuestionCase instance generate n SPARQL query candidates.
:param question_case: natural language QuestionCase instance.
:param n_candidates: number of candidates per question.
:return: a List of SPARQL Query instance which represents the candidates for the given question.
"""
question_id = question_case.question_id
if question_id not in self.uid_data_map:
print(f"Warning: {question_id} is not in cache. You might want to update your results.")
return list()
result_case = self.uid_data_map[question_id]
candidates_length = min(n_candidates, len(result_case))
return list(WikidataQuery(query) for query in result_case['system_answer'][:candidates_length])
def generate(self, question_cases: List[QuestionCase]) -> List[Query]:
"""
Given a list of QuestionCase instance generate a SPARQL query for each question.
:param question_cases: list of natural language QuestionCase instance.
:return: a List of SPARQL Query instances whose elements represent the output for each question respectively.
"""
return list(self.generate_one(question_case) for question_case in question_cases)
def generate_n_candidates(self, question_cases: List[QuestionCase], n_candidates: int = 5) -> List[List[Query]]:
"""
Given a list of QuestionCase instance, generate n SPARQL query candidates for each question.
:param question_cases: list of natural language QuestionCase instance.
:param n_candidates: number of candidates per question.
:return: a List of Lists of SPARQL Query instances. Each List represent the candidates of each question respectively.
"""
return list(self.generate_one_n_candidates(q_case, n_candidates) for q_case in question_cases)
@classmethod
def load_model(cls, query_generator_opt: Dict, dataset_opt: Optional[Dict] = None) -> 'OfflineSparqlQueryGenerator':
raise BaseQueryGeneratorMethodNotImplemented
QueryGenerationDict['Fairseq'] = FairseqSparqlQueryGenerator
```
#### File: query_tools/test/test_query_utils.py
```python
import unittest
from query_tools.base_tokenizer import WikidataTokenizer, DBpediaTokenizer
from query_tools.base_query import WikidataQuery
from templates.wikidata_template import WikidataTemplate
class TestQuery(unittest.TestCase):
def testWikidataQuery(self):
query_1 = "SELECT DISTINCT ?uri WHERE { <http://www.wikidata.org/entity/Q4072104> <http://www.wikidata.org/prop/direct/P184> ?uri }"
compressed_query_1 = "SELECT DISTINCT ?uri WHERE { wd:Q4072104 wdt:P184 ?uri }"
q1 = WikidataQuery(query_1)
cq1 = WikidataQuery(compressed_query_1)
self.assertFalse(q1.is_compressed())
self.assertTrue(cq1.is_compressed())
self.assertEqual(q1.compress(), cq1.get_query())
self.assertEqual(cq1.decompress(), q1.get_query())
class TestTokenizer(unittest.TestCase):
def testWikidataTokenizer(self):
tokenizer = WikidataTokenizer()
q1 = WikidataQuery("SELECT DISTINCT ?uri WHERE { <http://www.wikidata.org/entity/Q4072104> <http://www.wikidata.org/prop/direct/P184> ?uri }")
cq1 = WikidataQuery("SELECT DISTINCT ?uri WHERE { wd:Q4072104 wdt:P184 ?uri }")
encoded_query_1 = "select distinct var_uri where brack_open wd_q4072104 wdt_p184 var_uri brack_close"
self.assertEqual(tokenizer.encode(q1), encoded_query_1)
self.assertEqual(tokenizer.encode(cq1), encoded_query_1)
decoded_query_1 = "select distinct ?uri where { wd:Q4072104 wdt:P184 ?uri }"
self.assertEqual(tokenizer.decode(encoded_query_1).get_query(), decoded_query_1)
encoded_query_2 = "select distinct var_uri where brack_open wd_q3025443 wdt_p86 var_uri brack_close"
decoded_query_2 = "select distinct ?uri where { wd:Q3025443 wdt:P86 ?uri }"
self.assertEqual(tokenizer.decode(encoded_query_2).get_query(), decoded_query_2)
query_3 = "SELECT ?value WHERE { <x> p:P2128 ?s . ?s ps:P2128 ?x filter(contains(?x,'162.0')) . ?s pq:P459 ?value}"
encoded_query_3 = "select var_value where brack_open placeholder_x p_p2128 var_s sep_dot var_s ps_p2128 var_x filter attr_open contains attr_open var_x sep_comma apstrph_162_dot_0_apstrph attr_close attr_close sep_dot var_s pq_p459 var_value brack_close"
decoded_query_3 = "select ?value where { <x> p:P2128 ?s . ?s ps:P2128 ?x filter ( contains ( ?x , '162.0' ) ) . ?s pq:P459 ?value }"
q3 = WikidataQuery(query_3)
self.assertEqual(encoded_query_3, tokenizer.encode(q3))
self.assertEqual(tokenizer.decode(encoded_query_3).get_query(), decoded_query_3)
query_string_4 = "ASK WHERE { wd:Q658 wdt:P1108 ?obj filter(?obj < 1.2) }"
query_4 = WikidataQuery(query_string_4)
encoded_query_4 = "ask where brack_open wd_q658 wdt_p1108 var_obj filter attr_open var_obj math_lt 1_dot_2 attr_close brack_close"
decoded_query_4 = "ask where { wd:Q658 wdt:P1108 ?obj filter ( ?obj < 1.2 ) }"
self.assertEqual(tokenizer.encode(query_4), encoded_query_4)
self.assertEqual(tokenizer.decode(encoded_query_4).get_query(), decoded_query_4)
def testWikidataTokenizerWithStringCases(self):
tokenizer = WikidataTokenizer()
query_string_5 = "SELECT DISTINCT ?sbj ?sbj_label WHERE { ?sbj wdt:P31 wd:Q427626 . ?sbj rdfs:label ?sbj_label . FILTER(CONTAINS(lcase(?sbj_label), 'variety')) . FILTER (lang(?sbj_label) = 'en') } LIMIT 25"
query_5 = WikidataQuery(query_string_5)
encoded_query_5 = "select distinct var_sbj var_sbj_label where brack_open var_sbj wdt_p31 wd_q427626 sep_dot var_sbj rdfs_label var_sbj_label sep_dot filter attr_open contains attr_open lcase attr_open var_sbj_label attr_close sep_comma apstrph_variety_apstrph attr_close attr_close sep_dot filter attr_open lang attr_open var_sbj_label attr_close math_eq apstrph_en_apstrph attr_close brack_close limit 25"
decoded_query_5 = "select distinct ?sbj ?sbj_label where { ?sbj wdt:P31 wd:Q427626 . ?sbj rdfs:label ?sbj_label . filter ( contains ( lcase ( ?sbj_label ) , 'variety' ) ) . filter ( lang ( ?sbj_label ) = 'en' ) } limit 25"
self.assertEqual(encoded_query_5, tokenizer.encode(query_5))
self.assertEqual(decoded_query_5, tokenizer.decode(encoded_query_5).get_query())
query_string_6 = WikidataTemplate(query_string_5).get_query_template(query_5)
query_6 = WikidataQuery(query_string_6)
encoded_query_6 = "select distinct var_sbj var_sbj_label where brack_open var_sbj wdt_p31 placeholder_obj_1 sep_dot var_sbj rdfs_label var_sbj_label sep_dot filter attr_open contains attr_open lcase attr_open var_sbj_label attr_close sep_comma placeholder_str_value attr_close attr_close sep_dot filter attr_open lang attr_open var_sbj_label attr_close math_eq apstrph_en_apstrph attr_close brack_close limit 25"
decoded_query_6 = "select distinct ?sbj ?sbj_label where { ?sbj wdt:P31 <obj_1> . ?sbj rdfs:label ?sbj_label . filter ( contains ( lcase ( ?sbj_label ) , <str_value> ) ) . filter ( lang ( ?sbj_label ) = 'en' ) } limit 25"
self.assertEqual(encoded_query_6, tokenizer.encode(query_6))
self.assertEqual(decoded_query_6, tokenizer.decode(encoded_query_6).get_query(), )
def testDBpediaTokenizer(self):
encoded_query = "SELECT DISTINCT var_uri where brack_open dbr_Mad_River_ attr_open California attr_close dbo_city var_uri brack_close"
encoded_query_2 = "ask where brack_open dbr_Island_Barn_Reservoir dbo_areaTotal var_a1 sep_dot dbr_Arab_League dbo_areaTotal var_a2 sep_dot filter attr_open var_a1math_gtvar_a2 attr_close brack_close"
encoded_query_3 = "SELECT DISTINCT COUNT attr_open var_uri attr_close where brack_open var_uri dbp_distributor dbr_Electronic_Arts brack_close"
encoded_query_4 = "SELECT DISTINCT var_uri where brack_open dbr_Up_All_Night_ attr_open One_Direction_album attr_close dbp_writer var_uri sep_dot dbr_Air_Guitar_ attr_open McBusted_song attr_close dbo_writer var_uri brack_close"
encoded_query_5 = "SELECT DISTINCT COUNT attr_open var_uri attr_close where brack_open var_x dbo_builder dbr_Department_of_Public_Works_and_Highways_ attr_open Philippines attr_close sep_dot var_x dbo_builder var_uri brack_close"
encoded_query_6 = "SELECT DISTINCT COUNT attr_open var_uri attr_close where brack_open var_x dbo_team dbr_İzmir_Büyükşehir_Belediyesi_GSK_ attr_open men's_ice_hockey attr_close sep_dot var_x dbo_formerTeam var_uri sep_dot var_uri a dbo_SportsTeam brack_close"
encoded_query_7 = "SELECT DISTINCT var_uri where brack_open var_x dbo_hometown dbr_Île-de-France_ attr_open region attr_close sep_dot var_x dbp_genre var_uri sep_dot var_x a dbo_Band brack_close"
encoded_query_8 = "SELECT DISTINCT var_uri where brack_open dbr_ZFS_ attr_open z/OS_file_system attr_close dbp_developer var_uri sep_dot dbr_Maqetta dbo_author var_uri brack_close"
encoded_query_9 = "select distinct var_uri where brack_open brack_open var_uri dbo_field dbr_Jazz sep_dot brack_close union brack_open var_uri dc:description var_s sep_dot filter regex attr_open var_s,'dbr_Jazz','i' attr_close brack_close var_uri dbo_award dbr_Academy_Awards sep_dot brack_close"
encoded_query_10 = "ASK where brack_open dbr_ attr_open 12538 attr_close _1998_OH dbo_discoverer dbr_Near_Earth_Asteroid_Tracking brack_close"
encoded_query_11 = "ask where brack_open dbr_Alexis_Denisof dbo_spouse var_spouse sep_dot var_spouse rdfs_label var_name sep_dot filter attr_open regex attr_open var_name,'dbo_Station' attr_close attr_close brack_close"
tokenizer = DBpediaTokenizer()
print(tokenizer.decode(encoded_query).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_2).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_3).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_4).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_5).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_6).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_7).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_8).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_9).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_10).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_11).get_query())
if __name__ == '__main__':
unittest.main()
```
#### File: slot_filling/test/testSlotFillingHelper.py
```python
import unittest
from typing import Dict, List
from slot_filling.qa_slot_filling_helper import ForceQueryGenerationSlotFillingHelper
class TestSlotFillingHelper(unittest.TestCase):
def auxTestForceFill(self, query_template: str, slots: Dict, entities: List[Dict], expected_answer: str,
expected_slot_entity_map: List[Dict],
do_assert: bool = True):
system_answer, system_slot_entity_map = ForceQueryGenerationSlotFillingHelper().fill_template(query_template, slots, entities)
if do_assert:
self.assertEqual(expected_answer, system_answer)
self.assertListEqual(expected_slot_entity_map, system_slot_entity_map)
# self.assertEqual(set(tuple([case['slot'], case['entity']]) for case in expected_slot_entity_map), set(tuple([case['slot'], case['entity']]) for case in system_slot_entity_map))
else:
print("EXPECTED: ", expected_answer)
print("SYSTEM: ", system_answer)
def testForceFillSlotCorrectlyLabeled(self):
# Labels correctly assigned
self.auxTestForceFill(
query_template="ask where { <sbj_1> wdt:P103 <obj_1> }",
slots={
"<NAME>": "<sbj_1>",
"old english": "<obj_1>"
},
entities=[
{
"ini": 4,
"fin": 19,
"label": "<NAME>",
"url": "wd:Q9696",
"score_list": [
{
"value": 0.9665,
"field_name": "disambiguationScore"
}
]
},
{
"ini": 38,
"fin": 49,
"label": "Old English",
"url": "wd:Q42365",
"score_list": [
{
"value": 0.42504,
"field_name": "disambiguationScore"
}
]
}
],
expected_answer="ask where { wd:Q9696 wdt:P103 wd:Q42365 }",
expected_slot_entity_map=[
dict(slot='<sbj_1>', entity='wd:Q9696'),
dict(slot='<obj_1>', entity='wd:Q42365'),
],
)
def testForceFillSlotMissing(self):
# One slot missing: <obj_1>
self.auxTestForceFill(
query_template="ask where { <sbj_1> wdt:P27 <obj_1> }",
slots = {"lil wayne": "<sbj_1>"},
entities = [
{
"ini": 3,
"fin": 12,
"label": "<NAME>",
"url": "wd:Q15615",
"score_list": [{"value": 25.65301366603667, "field_name": "log_likelihood"}]
},
{
"ini": 15,
"fin": 17,
"label": "US",
"url": "wd:Q30",
"score_list": [{"value": 1.0, "field_name": "disambiguationScore"}]
}
],
expected_answer = "ask where { wd:Q15615 wdt:P27 wd:Q30 }",
expected_slot_entity_map=[
dict(slot='<sbj_1>', entity='wd:Q15615'),
dict(slot='<obj_1>', entity='wd:Q30'),
],
)
def testForceFillSlotIncorrectlyTagged(self):
# One slot incorrectly tagged: <obj_2> instead of <obj_1>
self.auxTestForceFill(
query_template="ask where { <sbj_1> wdt:P26 <obj_1> }",
slots= {
"<NAME>": "<sbj_1>",
"<NAME>": "<obj_2>"
},
entities= [
{
"ini": 22,
"fin": 33,
"label": "<NAME>",
"url": "wd:Q257943",
"score_list": [
{
"value": 24.912549928495714,
"field_name": "log_likelihood"
}
]
},
{
"ini": 4,
"fin": 17,
"label": "<NAME>",
"url": "wd:Q352159",
"score_list": [
{
"value": 0.5002393126487732,
"field_name": "rho"
}
]
}
],
expected_answer="ask where { wd:Q352159 wdt:P26 wd:Q257943 }",
expected_slot_entity_map=[
dict(slot='<sbj_1>', entity='wd:Q352159'),
dict(slot='<obj_1>', entity='wd:Q257943'),
],
)
def testForceFillOneLabelContainedInOtherOne(self):
# Label "fibonacci" is a substring of the label "fibonacci sequence"
self.auxTestForceFill(
query_template="ask where { <sbj_1> wdt:P138 <obj_1> }",
slots={
"fibonacci sequence": "<sbj_1>",
"fibonacci": "<obj_1>"
},
entities=[
{
"ini": 39,
"fin": 48,
"label": "Fibonacci",
"url": "wd:Q8763",
"score_list": [
{
"value": 14.069714998388692,
"field_name": "log_likelihood"
}
]
},
{
"ini": 4,
"fin": 22,
"label": "Fibonacci Sequence",
"url": "wd:Q47577",
"score_list": [
{
"value": 0.4509202539920807,
"field_name": "rho"
}
]
}
],
expected_answer="ask where { wd:Q47577 wdt:P138 wd:Q8763 }",
expected_slot_entity_map=[
dict(slot='<sbj_1>', entity='wd:Q47577'),
dict(slot='<obj_1>', entity='wd:Q8763'),
],
)
def testForceFillNotExactMatchedLabels(self):
# Label "nebula award for best script" assigned to "Nebula Award"
self.auxTestForceFill(
query_template="ask where { <sbj_1> wdt:P1411 <obj_1> }",
slots={
"<NAME>": "<sbj_1>",
"nebula award for best script": "<obj_1>"
},
entities=[
{
"ini": 4,
"fin": 14,
"label": "<NAME>",
"url": "wd:Q103646",
"score_list": [
{
"value": 1.0,
"field_name": "disambiguationScore"
}
]
},
{
"ini": 45,
"fin": 57,
"label": "Nebula Award",
"url": "wd:Q194285",
"score_list": [
{
"value": 0.561386227607727,
"field_name": "rho"
}
]
}
],
expected_answer="ask where { wd:Q103646 wdt:P1411 wd:Q194285 }",
expected_slot_entity_map=[
dict(slot='<sbj_1>', entity='wd:Q103646'),
dict(slot='<obj_1>', entity='wd:Q194285'),
],
)
```
#### File: templates/test/test_templates_lcquad_2.py
```python
import unittest
from templates.templates_lcquad_2 import TemplateLCQUAD2
################################### TEMPLATE TEST CASES ################################################################
class TestTemplate(unittest.TestCase):
def aux_testTemplate(self, temp_string, nnqt, query, empty_query, expected_resources, expected_label_entity, expected_slot):
template=TemplateLCQUAD2.create_template(temp_string)
self.assertEqual(template.replace_entities(query), empty_query, "replace: " + str(template))
self.assertEqual(template.extract_resources(nnqt, query), expected_resources, "extract: " + str(template))
self.assertEqual(template.get_label_entity_list(nnqt, query), expected_label_entity, "label_entity: " + str(template))
self.assertEqual(template.get_slot_list(nnqt, query), expected_slot, "slot: " + str(template))
def testSelectSubjectInstanceOfType(self):
self.aux_testTemplate(
temp_string="<?S P O ; ?S InstanceOf Type>",
nnqt="What is the {city} for {capital of} of {Meghalaya}",
query=" select distinct ?sbj where { ?sbj wdt:P1376 wd:Q1195 . ?sbj wdt:P31 wd:Q515 } ",
empty_query=" select distinct ?sbj where { ?sbj wdt:P1376 <obj> . ?sbj wdt:P31 <type> } ",
expected_resources={
"Meghalaya": "wd:Q1195",
"city": "wd:Q515"
},
expected_label_entity=[
{
"label": "city",
"entity": "wd:Q515"
},
{
"label": "Meghalaya",
"entity": "wd:Q1195"
}
],
expected_slot=[
{
"label": "city",
"slot": "<type>"
},
{
"label": "Meghalaya",
"slot": "<obj>"
}
]
)
def testSelectSubjectInstanceOfTypeContainsWord(self):
self.aux_testTemplate(
temp_string="<?S P O ; ?S instanceOf Type ; contains word >",
nnqt="Give me {human} that contains the word {vitellius} in their name",
query="SELECT DISTINCT ?sbj ?sbj_label WHERE { ?sbj wdt:P31 wd:Q5 . ?sbj rdfs:label ?sbj_label . FILTER(CONTAINS(lcase(?sbj_label), 'vitellius')) . FILTER (lang(?sbj_label)='en') } LIMIT 25 ",
empty_query="SELECT DISTINCT ?sbj ?sbj_label WHERE { ?sbj wdt:P31 <type> . ?sbj rdfs:label ?sbj_label . FILTER(CONTAINS(lcase(?sbj_label), <label>)) . FILTER (lang(?sbj_label)='en') } LIMIT 25 ",
expected_resources={
"human": "wd:Q5",
"value_vitellius": 'vitellius'
},
expected_label_entity=[
{
"label": "human",
"entity": "wd:Q5"
}
],
expected_slot=[
{
"label": "human",
"slot": "<type>"
},
{
"label": "vitellius",
"slot": "<label>"
}
]
)
def testSelectSubjectInstanceOfTypeStartsWith(self):
self.aux_testTemplate(
temp_string="<?S P O ; ?S instanceOf Type ; starts with character >",
nnqt="Give me {city} that starts with {'w'}",
query="SELECT DISTINCT ?sbj ?sbj_label WHERE { ?sbj wdt:P31 wd:Q515 . ?sbj rdfs:label ?sbj_label . FILTER(STRSTARTS(lcase(?sbj_label), 'w')) . FILTER (lang(?sbj_label)='en') } LIMIT 25 ",
empty_query="SELECT DISTINCT ?sbj ?sbj_label WHERE { ?sbj wdt:P31 <type> . ?sbj rdfs:label ?sbj_label . FILTER(STRSTARTS(lcase(?sbj_label), <letter>)) . FILTER (lang(?sbj_label)='en') } LIMIT 25 ",
expected_resources={
"city": "wd:Q515",
"value_w": "w"
},
expected_label_entity=[
{
"label": "city",
"entity": "wd:Q515"
}
],
expected_slot=[
{
"label": "city",
"slot": "<type>"
},
{
"label": "w",
"slot": "<letter>"
}
]
)
def testSelectObjectInstanceOfType(self):
self.aux_testTemplate(
temp_string="<S P ?O ; ?O instanceOf Type>",
nnqt="What is the {city/town} for {twinned administrative body} of {Hamburg}",
query="select distinct ?obj where { wd:Q1055 wdt:P710 ?obj . ?obj wdt:P31 wd:Q7930989 }",
empty_query="select distinct ?obj where { <sbj> wdt:P710 ?obj . ?obj wdt:P31 <type> }",
expected_resources={
"city/town": "wd:Q7930989",
"Hamburg": "wd:Q1055"
},
expected_label_entity=[
{
"label": "city/town",
"entity": "wd:Q7930989"
},
{
"label": "Hamburg",
"entity": "wd:Q1055"
}
],
expected_slot=[
{
"label": "city/town",
"slot": "<type>"
},
{
"label": "Hamburg",
"slot": "<sbj>"
}
]
)
def testAskOneFact(self):
self.aux_testTemplate(
temp_string="Ask (ent-pred-obj)",
nnqt="Did {Rihanna} {record label} {Motown}?",
query="ASK WHERE { wd:Q36844 wdt:P264 wd:Q43327 }",
empty_query="ASK WHERE { <sbj> wdt:P264 <obj> }",
expected_resources={
"Rihanna": "wd:Q36844",
"Motown": "wd:Q43327"
},
expected_label_entity=[
{
"label": "Rihanna",
"entity": "wd:Q36844"
},
{
"label": "Motown",
"entity": "wd:Q43327"
}
],
expected_slot=[
{
"label": "Rihanna",
"slot": "<sbj>"
},
{
"label": "Motown",
"slot": "<obj>"
}
]
)
def testAskOneFactWithFilter(self):
self.aux_testTemplate(
temp_string="ASK ?sbj ?pred ?obj filter ?obj = num",
nnqt="Does the {family relationship degree} of the {paternal grandmother} {equals} {2}",
query="ASK WHERE { wd:Q20776714 wdt:P4500 ?obj filter(?obj=2) } ",
empty_query="ASK WHERE { <sbj> wdt:P4500 ?obj filter(?obj = <num>) } ",
expected_resources={
"paternal grandmother": "wd:Q20776714",
"number_2": "2"
},
expected_label_entity=[
{
"label": "paternal grandmother",
"entity": "wd:Q20776714"
}
],
expected_slot=[
{
"label": "paternal grandmother",
"slot": "<sbj>"
},
{
"label": "2",
"slot": "<num>"
}
]
)
self.aux_testTemplate(
temp_string="ASK ?sbj ?pred ?obj filter ?obj = num",
nnqt="Does the {heat capacity} of the {water} {equals} {75.375}",
query="ASK WHERE { wd:Q283 wdt:P2056 ?obj filter(?obj=75.375) } ",
empty_query="ASK WHERE { <sbj> wdt:P2056 ?obj filter(?obj = <num>) } ",
expected_resources={
"water": "wd:Q283",
"number_75.375": "75.375"
},
expected_label_entity=[
{
"label": "water",
"entity": "wd:Q283"
}
],
expected_slot=[
{
"label": "water",
"slot": "<sbj>"
},
{
"label": "75.375",
"slot": "<num>"
}
]
)
def testAskTwoFacts(self):
self.aux_testTemplate(
temp_string="Ask (ent-pred-obj1 . ent-pred-obj2)",
nnqt="Did {Beijing} {twinned administrative body} {Nur-Sultan} and {Salo} ?",
query="ASK WHERE { wd:Q956 wdt:P190 wd:Q1520 . wd:Q956 wdt:P190 wd:Q210987 }",
empty_query="ASK WHERE { <sbj> wdt:P190 <obj_1> . <sbj> wdt:P190 <obj_2> }",
expected_resources={
"Beijing": "wd:Q956",
"Nur-Sultan": "wd:Q1520",
"Salo": "wd:Q210987"
},
expected_label_entity=[
{
"label": "Beijing",
"entity": "wd:Q956"
},
{
"label": "Nur-Sultan",
"entity": "wd:Q1520"
},
{
"label": "Salo",
"entity": "wd:Q210987"
}
],
expected_slot=[
{
"label": "Beijing",
"slot": "<sbj>"
},
{
"label": "Nur-Sultan",
"slot": "<obj_1>"
},
{
"label": "Salo",
"slot": "<obj_2>"
}
]
)
def testSelectOneFactSubject(self):
self.aux_testTemplate(
temp_string="E REF ?F",
nnqt="What is <SANU member ID> of <<NAME>> ?",
query="select distinct ?answer where { wd:Q156201 wdt:P3475 ?answer}",
empty_query="select distinct ?answer where { <sbj> wdt:P3475 ?answer}",
expected_resources={
"<NAME>": "wd:Q156201"
},
expected_label_entity=[
{
"label": "<NAME>",
"entity": "wd:Q156201"
}
],
expected_slot=[
{
"label": "<NAME>",
"slot": "<sbj>"
}
]
)
def testSelectOneFactObject(self):
self.aux_testTemplate(
temp_string="?D RDE E",
nnqt="What is <this zoological name is coordinate with> of <Papilionidae> ?",
query="select distinct ?answer where { ?answer wdt:P2743 wd:Q59905}",
empty_query="select distinct ?answer where { ?answer wdt:P2743 <obj>}",
expected_resources={
"Papilionidae": "wd:Q59905"
},
expected_label_entity=[
{
"label": "Papilionidae",
"entity": "wd:Q59905"
}
],
expected_slot=[
{
"label": "Papilionidae",
"slot": "<obj>"
}
]
)
def testSelectTwoAnswers(self):
self.aux_testTemplate(
temp_string="select where (ent-pred-obj1 . ent-pred-obj2)",
nnqt="What is the {child} and the {place of birth} of {Ashton_Kutcher} ?",
query="SELECT ?ans_1 ?ans_2 WHERE { wd:Q164782 wdt:P40 ?ans_1 . wd:Q164782 wdt:P19 ?ans_2 }",
empty_query="SELECT ?ans_1 ?ans_2 WHERE { <sbj> wdt:P40 ?ans_1 . <sbj> wdt:P19 ?ans_2 }",
expected_resources={
"Ashton_Kutcher": "wd:Q164782"
},
expected_label_entity=[
{
"label": "Ashton_Kutcher",
"entity": "wd:Q164782"
}
],
expected_slot=[
{
"label": "Ashton_Kutcher",
"slot": "<sbj>"
}
]
)
def testSelectTwoFactsSubjectObject(self):
self.aux_testTemplate(
temp_string="E REF ?F . ?F RFG G",
nnqt="What is {capital town} of {Kingdom of Wessex}, that has {notable event} is {7 July 2005 London bombings} ?",
query="SELECT ?answer WHERE { wd:Q105313 wdt:P36 ?answer . ?answer wdt:P793 wd:Q10818}",
empty_query="SELECT ?answer WHERE { <sbj_1> wdt:P36 ?answer . ?answer wdt:P793 <obj_2>}",
expected_resources={
"Kingdom of Wessex": "wd:Q105313",
"7 July 2005 London bombings": "wd:Q10818"
},
expected_label_entity=[
{
"label": "Kingdom of Wessex",
"entity": "wd:Q105313"
},
{
"label": "7 July 2005 London bombings",
"entity": "wd:Q10818"
}
],
expected_slot=[
{
"label": "Kingdom of Wessex",
"slot": "<sbj_1>"
},
{
"label": "7 July 2005 London bombings",
"slot": "<obj_2>"
}
]
)
def testSelectTwoFactsRightSubject(self):
self.aux_testTemplate(
temp_string="E REF xF . xF RFG ?G",
nnqt="What is {safety classification and labelling} of {polymer of} of {polyvinyl chloride} ?",
query="SELECT ?answer WHERE { wd:Q146368 wdt:P4600 ?X . ?X wdt:P4952 ?answer}",
empty_query="SELECT ?answer WHERE { <sbj_1> wdt:P4600 ?X . ?X wdt:P4952 ?answer}",
expected_resources={
"polyvinyl chloride": "wd:Q146368"
},
expected_label_entity=[
{
"label": "polyvinyl chloride",
"entity": "wd:Q146368"
}
],
expected_slot=[
{
"label": "polyvinyl chloride",
"slot": "<sbj_1>"
}
]
)
def testSelectTwoFactsLeftSubject(self):
self.aux_testTemplate(
temp_string="C RCD xD . xD RDE ?E",
nnqt="What is {has influence} of {brother or sister} of {<NAME>} ?",
query="SELECT ?answer WHERE { wd:Q170348 wdt:P3373 ?X . ?X wdt:P737 ?answer}",
empty_query="SELECT ?answer WHERE { <sbj_1> wdt:P3373 ?X . ?X wdt:P737 ?answer}",
expected_resources={
"<NAME>": "wd:Q170348",
},
expected_label_entity=[
{
"label": "<NAME>",
"entity": "wd:Q170348"
}
],
expected_slot=[
{
"label": "<NAME>",
"slot": "<sbj_1>"
}
]
)
def testCountOneFactSubject(self):
self.aux_testTemplate(
temp_string="Count ent (ent-pred-obj)",
nnqt="How many {manner of death} are to/by {battle} ?",
query="SELECT (COUNT(?sub) AS ?value ) { ?sub wdt:P1196 wd:Q178561 }",
empty_query="SELECT (COUNT(?sub) AS ?value ) { ?sub wdt:P1196 <obj> }",
expected_resources={
"battle": "wd:Q178561",
},
expected_label_entity=[
{
"label": "battle",
"entity": "wd:Q178561"
}
],
expected_slot=[
{
"label": "battle",
"slot": "<obj>"
}
]
)
def testCountOneFactObject(self):
self.aux_testTemplate(
temp_string="Count Obj (ent-pred-obj)",
nnqt="How many {location of final assembly} are for {Airbus A320} ?",
query="SELECT (COUNT(?obj) AS ?value ) { wd:Q6475 wdt:P1071 ?obj }",
empty_query="SELECT (COUNT(?obj) AS ?value ) { <sbj> wdt:P1071 ?obj }",
expected_resources={
"Airbus A320": "wd:Q6475",
},
expected_label_entity=[
{
"label": "Airbus A320",
"entity": "wd:Q6475"
}
],
expected_slot=[
{
"label": "Airbus A320",
"slot": "<sbj>"
}
]
)
def testSelectOneQualifierValueUsingOneStatementProperty(self):
self.aux_testTemplate(
temp_string="(E pred ?Obj ) prop value",
nnqt="what is the {criterion used} for {Eros} has {mother} as {Nyx} ?",
query="SELECT ?value WHERE { wd:Q121973 p:P25 ?s . ?s ps:P25 wd:Q131203 . ?s pq:P1013 ?value}",
empty_query="SELECT ?value WHERE { <sbj_1> p:P25 ?s . ?s ps:P25 <obj_2> . ?s pq:P1013 ?value}",
expected_resources={
"Eros": "wd:Q121973",
"Nyx": "wd:Q131203",
},
expected_label_entity=[
{
"label": "Eros",
"entity": "wd:Q121973"
},
{
"label": "Nyx",
"entity": "wd:Q131203"
}
],
expected_slot=[
{
"label": "Eros",
"slot": "<sbj_1>"
},
{
"label": "Nyx",
"slot": "<obj_2>"
}
]
)
self.aux_testTemplate(
temp_string="(E pred ?Obj ) prop value",
nnqt="what is the {has quality} for {Heidelberg University} has {IPv4 routing prefix} as {192.168.3.11/16} ?",
query="SELECT ?value WHERE { wd:Q151510 p:P3761 ?s . ?s ps:P3761 ?x filter(contains(?x,'192.168.3.11/16')) . ?s pq:P1552 ?value}",
empty_query="SELECT ?value WHERE { <sbj_1> p:P3761 ?s . ?s ps:P3761 ?x filter(contains(?x,<str_value>)) . ?s pq:P1552 ?value}",
expected_resources={
"Heidelberg University": "wd:Q151510",
"value_192.168.3.11/16": "192.168.3.11/16"
},
expected_label_entity=[
{
"label": "Heidelberg University",
"entity": "wd:Q151510"
}
],
expected_slot=[
{
"label": "Heidelberg University",
"slot": "<sbj_1>"
},
{
"label": "129.206.0.0/16",
"slot": "<str_value>"
}
]
)
def testSelectObjectUsingOneStatementProperty(self):
self.aux_testTemplate(
temp_string="(E pred F) prop ?value",
nnqt="What is {position held} of {Mieszko I} that is {replaced by} is {Boles\u0142aw I Chrobry} ?",
query="SELECT ?obj WHERE { wd:Q53435 p:P39 ?s . ?s ps:P39 ?obj . ?s pq:P1366 wd:Q53436 }",
empty_query="SELECT ?obj WHERE { <sbj_1> p:P39 ?s . ?s ps:P39 ?obj . ?s pq:P1366 <obj_3> }",
expected_resources={
"Mieszko I": "wd:Q53435",
"Boles\u0142aw I Chrobry": "wd:Q53436",
},
expected_label_entity=[
{
"label": "<NAME>",
"entity": "wd:Q53435"
},
{
"label": "Boles\u0142aw I Chrobry",
"entity": "wd:Q53436"
}
],
expected_slot=[
{
"label": "<NAME>",
"slot": "<sbj_1>"
},
{
"label": "Boles\u0142aw I Chrobry",
"slot": "<obj_3>"
}
]
)
self.aux_testTemplate(
temp_string="(E pred F) prop ?value",
nnqt="What is {spouse} of {Nero} that is {end time} is {68-6-9} ?",
query="SELECT ?obj WHERE { wd:Q1413 p:P26 ?s . ?s ps:P26 ?obj . ?s pq:P582 ?x filter(contains(?x,'68-6-9')) }",
empty_query="SELECT ?obj WHERE { <sbj_1> p:P26 ?s . ?s ps:P26 ?obj . ?s pq:P582 ?x filter(contains(?x,<str_value>)) }",
expected_resources={
"Nero": "wd:Q1413",
"value_68-6-9": "68-6-9"
},
expected_label_entity=[
{
"label": "Nero",
"entity": "wd:Q1413"
}
],
expected_slot=[
{
"label": "Nero",
"slot": "<sbj_1>"
},
{
"label": "68-6-9",
"slot": "<str_value>"
}
]
)
def testRankInstanceOfTypeOneFact(self):
self.aux_testTemplate(
temp_string="?E is_a Type, ?E pred Obj value. MAX/MIN (value)",
nnqt="What is the {independent city} with the {MAX(vehicles per capita (1000))} ?",
query="select ?ent where { ?ent wdt:P31 wd:Q22865 . ?ent wdt:P5167 ?obj } ORDER BY DESC(?obj)LIMIT 5 ",
empty_query="select ?ent where { ?ent wdt:P31 <type> . ?ent wdt:P5167 ?obj } ORDER BY DESC(?obj)LIMIT 5 ",
expected_resources={
"independent city": "wd:Q22865"
},
expected_label_entity=[
{
"label": "independent city",
"entity": "wd:Q22865"
}
],
expected_slot=[
{
"label": "independent city",
"slot": "<type>"
}
]
)
self.aux_testTemplate(
temp_string="?E is_a Type, ?E pred Obj value. MAX/MIN (value)",
nnqt="What is the {weapon model} with the {MIN(rate of fire)} ?",
query="select ?ent where { ?ent wdt:P31 wd:Q15142894 . ?ent wdt:P3792 ?obj } ORDER BY ASC(?obj)LIMIT 5 ",
empty_query="select ?ent where { ?ent wdt:P31 <type> . ?ent wdt:P3792 ?obj } ORDER BY ASC(?obj)LIMIT 5 ",
expected_resources={
"weapon model": "wd:Q15142894"
},
expected_label_entity=[
{
"label": "weapon model",
"entity": "wd:Q15142894"
}
],
expected_slot=[
{
"label": "weapon model",
"slot": "<type>"
}
]
)
def testRankMaxInstanceOfTypeTwoFacts(self):
self.aux_testTemplate(
temp_string="?E is_a Type. ?E pred Obj. ?E-secondClause value. MAX (value)",
nnqt="What is the {smartphone model} with the {MAX(energy storage capacity)} whose {manufacturer} is {Microsoft Mobile} ?",
query="select ?ent where { ?ent wdt:P31 wd:Q19723451 . ?ent wdt:P4140 ?obj } ?ent wdt:P176 wd:Q16538568 ORDER BY DESC(?obj)LIMIT 5 ",
empty_query="select ?ent where { ?ent wdt:P31 <type> . ?ent wdt:P4140 ?obj . ?ent wdt:P176 <obj_3> } ORDER BY DESC(?obj)LIMIT 5 ",
expected_resources={
"smartphone model": "wd:Q19723451",
"Microsoft Mobile": "wd:Q16538568",
},
expected_label_entity=[
{
"label": "smartphone model",
"entity": "wd:Q19723451"
},
{
"label": "Microsoft Mobile",
"entity": "wd:Q16538568"
}
],
expected_slot=[
{
"label": "smartphone model",
"slot": "<type>"
},
{
"label": "Microsoft Mobile",
"slot": "<obj_3>"
}
]
)
def testRankMinInstanceOfTypeTwoFacts(self):
self.aux_testTemplate(
temp_string="?E is_a Type. ?E pred Obj. ?E-secondClause value. MIN (value)",
nnqt="What is the {state of Germany} with the {MIN(vehicles per capita (1000))} whose {contains administrative territorial entity} is {ERROR1} ?",
query="select ?ent where { ?ent wdt:P31 wd:Q1221156 . ?ent wdt:P5167 ?obj } ?ent wdt:P150 wd:Q30127558 ORDER BY ASC(?obj)LIMIT 5 ",
empty_query="select ?ent where { ?ent wdt:P31 <type> . ?ent wdt:P5167 ?obj . ?ent wdt:P150 <obj_3> } ORDER BY ASC(?obj)LIMIT 5 ",
expected_resources={
"state of Germany": "wd:Q1221156",
"ERROR1": "wd:Q30127558",
},
expected_label_entity=[
{
"label": "state of Germany",
"entity": "wd:Q1221156"
},
{
"label": "ERROR1",
"entity": "wd:Q30127558"
}
],
expected_slot=[
{
"label": "state of Germany",
"slot": "<type>"
},
{
"label": "ERROR1",
"slot": "<obj_3>"
}
]
)
def testSelectOneQualifierValueAndObjectUsingOneStatementProperty(self):
self.aux_testTemplate(
temp_string=3,
nnqt="What is {award received} of {Konrad Lorenz} and {together with}",
query="SELECT ?value1 ?obj WHERE { wd:Q78496 p:P166 ?s . ?s ps:P166 ?obj . ?s pq:P1706 ?value1 . }",
empty_query="SELECT ?value1 ?obj WHERE { <sbj_1> p:P166 ?s . ?s ps:P166 ?obj . ?s pq:P1706 ?value1 . }",
expected_resources={
"<NAME>": "wd:Q78496"
},
expected_label_entity=[
{
"label": "<NAME>",
"entity": "wd:Q78496"
}
],
expected_slot=[
{
"label": "<NAME>",
"slot": "<sbj_1>"
}
]
)
def testSelectTwoQualifierValuesUsingOneStatementProperty(self):
self.aux_testTemplate(
temp_string=2,
nnqt="What is {point in time} and {together with} of {{<NAME>} has {award received} as {MTV Movie Award for Best Fight}}",
query="SELECT ?value1 ?value2 WHERE { wd:Q381178 p:P166 ?s . ?s ps:P166 wd:Q734036 . ?s pq:P585 ?value1 . ?s pq:P1706 ?value2 }",
empty_query="SELECT ?value1 ?value2 WHERE { <sbj_1> p:P166 ?s . ?s ps:P166 <obj_2> . ?s pq:P585 ?value1 . ?s pq:P1706 ?value2 }",
expected_resources={
"<NAME>": "wd:Q381178",
"MTV Movie Award for Best Fight": "wd:Q734036"
},
expected_label_entity=[
{
"label": "<NAME>",
"entity": "wd:Q381178"
},
{
"label": "MTV Movie Award for Best Fight",
"entity": "wd:Q734036"
}
],
expected_slot=[
{
"label": "<NAME>",
"slot": "<sbj_1>"
},
{
"label": "MTV Movie Award for Best Fight",
"slot": "<obj_2>"
}
]
)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jpraveenkanna/Computer_Pointer_Controller",
"score": 2
} |
#### File: Computer_Pointer_Controller/src/main.py
```python
from input_feeder import InputFeeder
from mouse_controller import MouseController
import numpy as np
from openvino.inference_engine import IENetwork,IECore
import cv2
from matplotlib import pyplot as plt
from argparse import ArgumentParser
import face_detection
import landmark_detection
import head_pose_estimation
import gaze_estimation
import logging
def visualize_frame(frame,face,x_coord,y_coord,gaze_vec,boxes,result):
gaze_x = int(gaze_vec[0]*100)
gaze_y = int(gaze_vec[1]*100)
cv2.arrowedLine(face, (x_coord[0], y_coord[0]),
(x_coord[0] + gaze_x, y_coord[0] - gaze_y),
(255,0,255), 5)
cv2.arrowedLine(face, (x_coord[1], y_coord[1]),
(x_coord[1] + gaze_x, y_coord[1] - gaze_y),
(255,0,255), 5)
frame[boxes[0][1]:boxes[0][3], boxes[0][0]:boxes[0][2]] = face
for box in boxes:
cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), (232, 255, 244), 2)
cv2.imshow("Preview",frame)
cv2.waitKey(10)
return frame
def process_frame(frame,visualize):
#calling face detection
input_img = fd.pre_process_input(frame)
result = fd.predict(input_img)
_,boxes = fd.preprocess_output(result)
face_location = fd.get_face_location()
if (len(boxes) < 1):
return "No face detected", frame
# calling landmark detection
crop_percentage = 0.5
pre_processed_img = ld.pre_process_input(face_location[0])
result = ld.predict(pre_processed_img)
output_image,x_axis,y_axis = ld.preprocess_output(result)
left_eye_crop = ld.crop_left_eye(crop_percentage)
right_eye_crop = ld.crop_right_eye(crop_percentage)
# Calling head pose estimation
pre_processed_img = hd.pre_process_input(face_location[0])
result = hd.predict(pre_processed_img)
headpose = hd.preprocess_output(result)
# calling gaze model
res_left = ge.pre_process_input(left_eye_crop)
res_right = ge.pre_process_input(right_eye_crop)
result_ge = ge.predict(headpose,res_left,res_right)
gaze_vec = result_ge['gaze_vector'][0, :]
#for visualizing
if(visualize == 'True'):
frame = visualize_frame(frame,face_location[0],x_axis,y_axis,gaze_vec,boxes,result)
return gaze_vec,frame
def process_video(input_video, video_output,visualize):
if input_video is None:
feed = InputFeeder(input_type='cam')
else:
feed = InputFeeder(input_type='video', input_file=input_video)
feed.load_data()
w = int(feed.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(feed.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(feed.cap.get(cv2.CAP_PROP_FPS))
fps=int(fps/4)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(video_output, fourcc, fps, (w, h), True)
frame_counter = 0
for frame in feed.next_batch():
if frame is not None:
frame_counter += 1
key = cv2.waitKey(10)
result, output_frame = process_frame(frame,visualize)
out.write(output_frame)
print("Frame: {} result: {}".format(frame_counter,result))
logger.info("Frame: {} result: {}".format(frame_counter,result))
esc_code = 27
if key == esc_code:
break
if mouse_controller is not None:
try:
mouse_controller.move(result[0], result[1])
except Exception as e:
print("Mouse controller exception:\n",e)
logger.info("Mouse controller exception:{}".format(e))
else:
break
cv2.destroyAllWindows()
out.release()
feed.close()
print("Saved the video")
logger.info("Saved the video")
def build_argparser():
parser = ArgumentParser()
parser.add_argument('-i', '--input_path', default= None)
parser.add_argument("-o", "--output_path",
type=str,required=True)
parser.add_argument("-t", "--inputType", default= 'video',
type=str,help='Options - [video,cam,image]')
parser.add_argument('--mouse_precision', default='medium',
help='Mouse movement precision. Options - [low, medium, high]')
parser.add_argument('--mouse_speed', default='medium',
help='Mouse movement speed. Options -[slow, medium, fast]')
parser.add_argument('--face_detection_model', default='models/face-detection-adas-binary-0001/FP32-INT1/face-detection-adas-binary-0001',
help='Path of face detection model without file extension')
parser.add_argument('--landmark_detection_model', default='models/landmarks-regression-retail-0009/FP16/landmarks-regression-retail-0009',
help='Path of landmark detection model without file extension')
parser.add_argument('--head_pose_estimation_model', default='models/head-pose-estimation-adas-0001/FP16/head-pose-estimation-adas-0001',
help='Path of headpose estimation model without file extension')
parser.add_argument('--gaze_estimation_model', default='models/gaze-estimation-adas-0002/FP16/gaze-estimation-adas-0002',
help='Path of Gaze estimation model without file extension')
parser.add_argument('--device', default='CPU',
help='Target hardware type to run inference on. Options - [CPU, GPU, FPGA, VPU]')
parser.add_argument('--visualize', default='True',
help='To visualize model intermediate output. Options - [True,False]')
return parser
if __name__ == '__main__':
args = build_argparser().parse_args()
#Logging
logging.basicConfig(filename="bin/mouse_controller.log",
format='%(asctime)s %(message)s',
filemode='w')
logger=logging.getLogger()
logger.setLevel(logging.DEBUG)
#initialize models
face_detection_model = args.face_detection_model
fd = face_detection.face_detection(face_detection_model,device=args.device)
fd.load_model()
fd.check_model()
fd.get_input_name()
landmark_detection_model = args.landmark_detection_model
ld = landmark_detection.landmark_detection(landmark_detection_model,args.device)
ld.load_model()
ld.check_model()
ld.get_input_name()
head_pose_estimation_model = args.head_pose_estimation_model
hd = head_pose_estimation.head_pose_estimation(head_pose_estimation_model,args.device)
hd.load_model()
hd.check_model()
hd.get_input_name()
gaze_estimation_model = args.gaze_estimation_model
ge = gaze_estimation.gaze_estimation(gaze_estimation_model,args.device)
ge.load_model()
ge.check_model()
ge.get_input_name()
#initialize mouse controller
mouse_controller = MouseController(args.mouse_precision,args.mouse_speed)
if(args.inputType == 'image'):
input_image = args.input_path
feed = InputFeeder(input_type='image', input_file=input_image)
feed.load_data()
frame = feed.cap
_,output_img = process_frame(frame,args.visualize)
cv2.imshow("Preview",output_img)
cv2.imwrite(args.output_path,output_img)
elif(args.inputType == 'video'):
process_video(args.input_path, args.output_path,args.visualize)
elif(args.inputType == 'cam'):
process_video(None, args.output_path,args.visualize)
else:
print("Invalid input type")
``` |
{
"source": "jpraveenkanna/People-Counter-App-at-the-Edge",
"score": 2
} |
#### File: jpraveenkanna/People-Counter-App-at-the-Edge/main.py
```python
import os
import sys
from openvino.inference_engine import IENetwork, IECore
import cv2
import numpy as np
import time
import socket
import json
import logging as log
import paho.mqtt.client as mqtt
import time
import logging
from argparse import ArgumentParser
from inference import Network
#Logging
logging.basicConfig(filename="person_counter.log",
format='%(asctime)s %(message)s',
filemode='w')
logger=logging.getLogger()
logger.setLevel(logging.DEBUG)
# MQTT server environment variables
HOSTNAME = socket.gethostname()
IPADDRESS = socket.gethostbyname(HOSTNAME)
MQTT_HOST = IPADDRESS
MQTT_PORT = 3001
MQTT_KEEPALIVE_INTERVAL = 60
def build_argparser():
"""
Parse command line arguments.
:return: command line arguments
"""
parser = ArgumentParser()
parser.add_argument("-m", "--model", required=False, type=str,
help="Path to an xml file with a trained model.")
parser.add_argument("-i", "--input", required=True, type=str,
help="Path to image or video file")
parser.add_argument("-l", "--cpu_extension", required=False, type=str,
default=None,
help="MKLDNN (CPU)-targeted custom layers."
"Absolute path to a shared library with the"
"kernels impl.")
parser.add_argument("-d", "--Device", type=str, default="CPU",
help="Specify the target device to infer on: "
"CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
"will look for a suitable plugin for device "
"specified (CPU by default)")
parser.add_argument("-pt", "--prob_threshold", type=float, default=0.1,
help="Probability threshold for detections filtering"
"(0.7 by default)")
parser.add_argument("-t", "--Type_of_media", type=str, default=0.1,
help="image or video""(0.7 by default)")
return parser
def connect_mqtt():
client = mqtt.Client()
client.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)
return client
def handle_video(self,input_source,prob_threshold,net_shape,client):
client_mqtt = client
start_signal = False
in_sec = 0
x_gradient = 0
timer = 0
prev_count = 0
x_prev = 0
count = 0
counter = 0
center_info =[]
duration = np.array([])
avg_duration = 0.0
total_count_copy = 0
n, c, h, w = self.network.inputs[self.input_blob].shape
cap = cv2.VideoCapture(input_source)
res_width = int(cap.get(3))
res_height = int(cap.get(4))
no_of_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fourcc = cv2.VideoWriter_fourcc(*'XVID') #saving format
out = cv2.VideoWriter('out20.mp4', fourcc, 10, (res_width,res_height)) #To save video
n, c, h, w = self.network.inputs[self.input_blob].shape
while cap.isOpened():
flag, frame = cap.read()
if not flag:
break
image_copy = frame
image = frame
image_shape = (image.shape[1], image.shape[0])
frame , center= self.infer_on_track_image(image,image_shape,net_shape,prob_threshold) #Running inference
counter+=1
#Finding out new person entry
if((prev_count==count) and (prev_count != 0) and start_signal ):
timer +=1
if len(center)==1:
if(center[0][1] > 690):
#690 ---> x axis exit value
start_signal = False
duration = np.append(duration,round(in_sec, 1))
avg_duration = np.average(duration)
client_mqtt.publish('person/duration',payload=json.dumps({'duration': round(duration[-1])}),qos=0, retain=False)
timer = 0
in_sec = timer * 0.09971 #Calculating time for each frame in seconds
if len(center)==1: #Tracking person if person exist
try:
if(center[0][1]):
if(x_prev == 0):
x_prev = center[0][1]
count+=1
else:
x_gradient = abs(x_prev - center[0][1])
x_prev = center[0][1]
if(x_gradient>150):
count+=1
except:
logger.info("Exception in tracking person")
center_info.append(x_gradient)
cv2.putText(frame, "Total Person Counted:"+str(count), (15, 25), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
cv2.putText(frame, "Persons on Screen:"+str(int(start_signal)), (15, 50), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
cv2.putText(frame, "Duration :"+str(round(in_sec, 1))+" Second", (15, 75), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
cv2.putText(frame, "Avg Duration :"+str(round(avg_duration,1))+" Second", (15, 100), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
total_count_copy = int(count)
out.write(frame)
try:
client_mqtt.publish('person',payload=json.dumps({'count': int(start_signal), 'total': total_count_copy}),qos=0, retain=False)
frame_stream = cv2.resize(frame, (668, 432))
sys.stdout.buffer.write(frame_stream)
sys.stdout.flush()
except:
logger.info("Exception in MQTT of ffmpeg server")
else:
cv2.putText(image_copy, "Total Person Counted:"+str(count), (15, 25), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
cv2.putText(image_copy, "Persons on Screen:"+str(int(start_signal)), (15, 50), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
cv2.putText(image_copy, "Duration :"+str(round(in_sec, 1))+" Second", (15, 75), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
cv2.putText(image_copy, "Avg Duration :"+str(round(avg_duration,1))+" Second", (15, 100), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
total_count_copy = int(count)
out.write(image_copy)
try:
client_mqtt.publish('person',payload=json.dumps({'count': int(start_signal), 'total': total_count_copy}),qos=0, retain=False)
frame_stream = cv2.resize(image_copy, (668, 432))
sys.stdout.buffer.write(frame_stream)
sys.stdout.flush()
except:
logger.info("Exception in MQTT of ffmpeg server")
#Tracking change in person count
if((prev_count < count) and (prev_count != count)):
prev_count = count
start_signal = True
key = cv2.waitKey(30)
esc_code = 27
if key == esc_code:
break
cap.release()
out.release()
cv2.destroyAllWindows()
client_mqtt.disconnect()
def handle_image(self,input_source,prob_threshold,net_shape,client):
client_mqtt=client
image = cv2.imread(input_source)
image_shape = (image.shape[1], image.shape[0])
result, center = self.infer_on_track_image(image,image_shape,net_shape,prob_threshold)
frame_stream = cv2.resize(result, (768, 432))
#cv2.imwrite("output1.jpeg", result)
sys.stdout.buffer.write(frame_stream)
client_mqtt.publish('person',payload=json.dumps({'count': len(center), 'total': len(center)}),qos=0, retain=False)
client_mqtt.publish('person/duration',payload=json.dumps({'duration': 0}),qos=0, retain=False)
client_mqtt.disconnect()
sys.stdout.flush()
return True
def infer_on_stream(net,args, client,net_shape):
"""
Initialize the inference network, stream video to network,
and output stats and video.
:param args: Command line arguments parsed by `build_argparser()`
:param client: MQTT client
:return: None
"""
# Initialise the class
infer_network = net
# Set Probability threshold for detections
prob_threshold = args.prob_threshold
media_type =args.Type_of_media
input_source = args.input
if(media_type == "image"):
handle_image(infer_network,input_source,prob_threshold,net_shape,client)
elif(media_type == "video"):
handle_video(infer_network,input_source,prob_threshold,net_shape,client)
else:
print("Type video or image")
def main():
args = build_argparser().parse_args()
# Connect to the MQTT server
client = connect_mqtt()
# Perform inference on the input stream
model = args.model
Device = args.Device
CPU_extension = args.cpu_extension
net = Network()
net.load_model(model,Device,CPU_extension)
net_input_shape = net.get_input_shape()['image_tensor']
net_shape = (net_input_shape[3], net_input_shape[2])
infer_on_stream(net,args, client,net_shape)
if __name__ == '__main__':
main()
``` |
{
"source": "jprchlik/aia_mkmovie",
"score": 3
} |
#### File: jprchlik/aia_mkmovie/grab_goes_xray_flux.py
```python
import ftplib
import numpy as np
import os
import sys
from datetime import timedelta,datetime
#Funtion to retrieve events from given day on noao ftp server
def getfiles(day,ftp,sdir):
"""
Downloads GOES X-ray fluxes from SWPC archive
Parameters
----------
day : datetime object
Day to download from the swpc archive.
ftp : ftplib FTP object
ftp connection to the swpc archive.
sdir : string
Output directory
"""
files = '{0:%Y%m%d}_Gs_xr_1m.txt'.format(day)
#create file to write ftp data
fhandle = open(sdir+'/goes/'+files,'wb')
try:
ftp.retrbinary('RETR {0}'.format(files),fhandle.write)
except:
print('{0} not in archive'.format(files))
fhandle.close()
#Funtion to retrieve events from given day on noao ftp server
def getfiles_ace(day,ftp,sdir,swepam=False,mag=False):
"""
Downloads ACE text files from NOAA SWPC archives for a given day.
Parameters
----------
day : datetime object
Day to download from the swpc archive.
ftp : ftplib FTP object
ftp connection to the swpc archive.
sdir : string
Output directory
swepam: boolean
Download ACE solar wind parameters
mag : boolean
Download ACE magnetometer
"""
if swepam: files = '{0:%Y%m%d}_ace_swepam_1m.txt'.format(day)
if mag: files = '{0:%Y%m%d}_ace_mag_1m.txt'.format(day)
#create file to write ftp data
fhandle = open(sdir+'/ace/'+files,'wb')
try:
ftp.retrbinary('RETR {0}'.format(files),fhandle.write)
except:
print('{0} not in archive'.format(files))
fhandle.close()
def look_xrays(start,end,sdir,email=None):
"""
Function for downloading GOES flux data
from the NOAA archives for a given date range.
Parameters
----------
start : datetime object
The start time for downloading data from the NOAA swpc archives
end : datetime object
The end time for downloading data from the NOAA swpc archives
sdir : string
String to path of output directory
email : string optional
String of email address for connecting to the swpc archives
"""
#initialize variables
ftp = ftplib.FTP('ftp.swpc.noaa.gov','anonymous',email)
#change ftp directory to events directory for a given year
ftp.cwd('/pub/lists/xray/')
days = np.arange(start,end,timedelta(days=1)).astype(datetime)
#loop through days
for day in days:
#request file from ftp server to add to local directory
getfiles(day,ftp,sdir)
#change ftp directory to events directory for a given year
ftp.cwd('../ace/')
for day in days:
getfiles_ace(day,ftp,sdir,swepam=True)
getfiles_ace(day,ftp,sdir,mag=True)
#nicely leave the ftp server
ftp.quit()
``` |
{
"source": "jprchlik/cms2_python_helpers",
"score": 2
} |
#### File: jprchlik/cms2_python_helpers/create_fit_plots.py
```python
import matplotlib as mpl
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.size'] = 18.0
mpl.rcParams['font.weight'] = 'bold'
from astropy.io import ascii
from astropy.table import join
import glob
import os,shutil
from datetime import datetime,timedelta
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
try:
from fancy_plot import fancy_plot
fp = True
except:
print 'Fancy plot not in your python path please add'
fp = False
class cms2_plot:
def __init__(self,time,cmsdir='',outdir='%Y/%m/%d/%H%M/',fit_file='fit_information.dat',lop_file='fit_loops.dat'):
"""Creates plots of helicity vs free energy and Poloidal vs Axial flux"""
if cmsdir == '': cmsdir = open('cms2_dir','r').readlines()[0][:-1]#read in first line of cms2_dir and format to send to script
if cmsdir[-1] != '/': cmsdir=cmsdir+'/'
self.time = time
self.form = "%Y/%m/%d %H:%M:%S"
self.time = datetime.strptime(self.time,self.form)
if outdir[-1] != '/': outdir = outdir+'/'
self.bdir = datetime.strftime(self.time,outdir)
self.cdir = cmsdir
self.ifit = fit_file
self.loop = lop_file
def read_model(self):
"""Reads the formatted model file to extract axial and poloidal fluxes"""
axi = []
pol = []
#read the file name for each model
for i in self.tdat['mod_nam']:
mod_num = i.split('_')[0] #get modelN
mod_pat = mod_num+'_path' #path file
fil = open(self.cdir+self.bdir+mod_pat,'r').readlines()
pol.append(fil[1].split()[-1]) #get polodial flux
axi.append(fil[-3].split()[-1]) #get axial flux
#add axial and poloidal flux to main table
self.tdat['axi'] = np.array(axi).astype('float')
self.tdat['pol'] = np.array(pol).astype('float')
#sort in order of polodial then axial fluxes
self.tdat.sort(['pol','axi'])
#set up plotting order based on pol and axi fluxes
self.tdat['plot_id'] = np.arange(self.tdat['pol'].size)
def read_infit(self):
"""Read the best fit model file"""
from astropy.io import ascii
self.fdat = ascii.read(self.cdir+self.bdir+self.ifit)
self.ldat = ascii.read(self.cdir+self.bdir+self.loop)
self.tdat = join(self.fdat,self.ldat,keys=['mod_nam'])
self.comp_tsig()
self.read_model()
#remove some bad coronal loops
#use model id for abbicus
self.tdat['mod_id'] = [int(i.split('_')[0].replace('model','')) for i in self.tdat['mod_nam']]
def comp_tsig(self):
"""Add total sigma to table"""
cols = self.tdat.columns.keys() #list of columns in table
bfit = [i for i in cols if "bfit" in i] #All the bfit columns
self.sind = int(bfit[0].split('_')[1])#start index for loops
self.eind = int(bfit[-1].split('_')[1])#end index for loops
self.tind = self.eind-self.sind+1 #total number of coronal loops
#fitting stat values
fsta = [i for i in bfit if ((("_5" in i) & ("_5_" not in i)) | ("_5_5" in i))]
#normalize minimums
for i in fsta: self.tdat[i] = self.tdat[i]/self.tdat[i][self.tdat[i] > 0.].min()
#init new column
self.tdat['bfit_t_5'] = 0.0
self.tdat['bfit_s_5'] = 0.0
for i in fsta: self.tdat['bfit_t_5'] = self.tdat[i]+self.tdat['bfit_t_5']#total uncertainty
for i in fsta: self.tdat['bfit_s_5'] = self.tdat[i]+self.tdat['bfit_s_5']**2.#sq. total uncertainty
self.tdat['bfit_a_5'] = self.tdat['bfit_t_5']/self.tind #average
self.tdat['bfit_s_5'] = (self.tdat['bfit_s_5'])**.5/self.tind #sum squared average
#uncertainty in average
self.tdat['bfit_u_5'] = 0.0
for i in fsta: self.tdat['bfit_u_5'] = (self.tdat[i]-self.tdat['bfit_a_5'])**2.+self.tdat['bfit_u_5']
self.tdat['bfit_u_5'] = np.sqrt(self.tdat['bfit_u_5'])/float(self.tind)
def create_plots(self):
"""Create plots using the best fit data"""
self.read_infit()
self.figt,self.axt = plt.subplots(ncols=3,nrows=1,gridspec_kw={'width_ratios':[50,50,10]},figsize=(8,4))
#self.figt.subplots_adjust(wspace=0.6)
self.figi,self.axi = plt.subplots(nrows=2,ncols=2,sharey=True)
#model number plots
self.figm,self.axm = plt.subplots()
# self.grid = gridspec.GridSpec(1,3,width_ratios=[10,10,1],height_ratios=[1])
self.axt = [i for i in self.axt.ravel()]
self.axi = [i for i in self.axi.ravel()]
#xvalues for 4 figure plot
xfigs = ['axi','pol','free_energy','helicity']
names = {'axi': ['Axial','Mx'],'pol': ['Poloidal','Mx/cm'], 'free_energy' : ['Free Energy','ergs'], 'helicity': ['Helicity','Mx$^2$']}
#set color for eruptions and non eruptive states
self.tdat['color'] = 'black'
self.tdat['color'][self.tdat['eruption'] == 'Yes'] = 'red'
use = self.tdat['bfit_a_5'] > 0 #reject filled values
print self.tdat['bfit_a_5'][use].max(),self.tdat['bfit_a_5'][use].min()
#plot 4 panel plot
ccolor = plt.cm.jet #colors for cycling
ocolor = ccolor(np.linspace(0,1,self.tind))
for i in np.arange(self.tind)+self.sind:
#plot as a function of model number
self.axm.scatter(self.tdat['plot_id'][use].astype('float'),self.tdat['bfit_{0:1d}_5'.format(i)][use].astype('float'),c=ocolor[i],label='{0:1d}'.format(i))
for k,j in enumerate(self.axi): j.scatter(self.tdat[xfigs[k]][use].astype('float'),self.tdat['bfit_{0:1d}_5'.format(i)][use].astype('float'),c=ocolor[i],label='{0:1d}'.format(i))
for k,j in enumerate(self.axi):
j.set_xlabel('{0} [{1}]'.format(*names[xfigs[k]]))
j.set_ylabel('Normed Quad. Mean Dis.')
self.axi[0].legend(loc='upper left',frameon=False,fontsize=4)
#self.pol[0].set_ylbot[-3.5,-2.])
self.axi[0].set_yscale('log')
self.axi[0].set_xscale('log')
self.axi[1].set_xscale('log')
#self.axi[0].set_ylim([0.0001,0.01])
#model fit parameter fit labels
self.axm.scatter(self.tdat['plot_id'][use],self.tdat['bfit_a_5'][use].astype('float'),marker='x',c='black',label='Ave.')
self.axm.errorbar(self.tdat['plot_id'][use],self.tdat['bfit_a_5'][use].astype('float'),yerr=self.tdat['bfit_u_5'][use],fmt='x',c='black',label=None,markersize=1)
self.axm.set_ylabel('Normed Quad. Mean Dis.')
self.axm.set_xlabel('Axial Flux [Mx]/Poloidal Flux [Mx/cm]')
self.axm.set_yscale('log')
#set scale of tick labels so they fit nicely on the plot 2018/08/27 <NAME>
self.axm.tick_params(axis='both', which='both', labelsize=10)
#set up nested plot labels
#for the value closer to the axis (axial flux)
xticks_top = [i for i in self.tdat['plot_id'][use]]
xticks_val = ['{0:3.1e}'.format(float(i)) for i in self.tdat['axi'][use]]
#create vertical alignment array
xticks_vrt = [0]*len(xticks_top)
#for the values farther from the axis (axial flux)
#unique poloidal fluxes
u_pol = np.unique(self.tdat['pol'][use])
#save starting point ending point and median
xticks_bot_st = []
xticks_bot_nd = []
#find first instance of axial flux value
#store values in lists
for i in u_pol:
tix_p, = np.where(self.tdat['pol'] == i)
xticks_bot_st.append(np.min(self.tdat['plot_id'][tix_p])-0.75)
xticks_bot_nd.append(np.max(self.tdat['plot_id'][tix_p])+0.75)
#use the starting and end points to create a mid point for the ticks
xticks_top.append(round((int(xticks_bot_nd[-1])-int(xticks_bot_st[-1]))/2.+int(xticks_bot_st[-1])+.5))
xticks_val.append('{0:3.1e}'.format(float(i)))
#offset vertical plotting point
xticks_vrt.append(-.15)
#sort by tick value
sorter = np.argsort(xticks_top)
xticks_top = np.array(xticks_top)[sorter]
xticks_val = np.array(xticks_val)[sorter]
xticks_vrt = np.array(xticks_vrt)[sorter]
#create xticks with new labals
self.axm.set_xticks(xticks_top)
self.axm.set_xticks(xticks_bot_st,minor=True)
self.axm.set_xticklabels(xticks_val)
#adjust vertical offset
for j,i in zip(self.axm.get_xticklabels(),xticks_vrt):
j.set_y(i)
if i > -.02: j.set_rotation(90)
#make long bars for minor ticks
self.axm.tick_params(axis='x',which='minor',direction='out',length=55.001)
#3D plot
vmin = 0.0015
vmax = 0.0025
vmin = 1.00
vmax = 2.0
cmap = plt.cm.Greys
cmap = plt.cm.Blues
cax =self.axt[0].scatter(self.tdat['axi'][use].astype('float'),self.tdat['pol'][use].astype('float'),c=self.tdat['bfit_a_5'][use],cmap=cmap,vmin=vmin,vmax=vmax,edgecolor=self.tdat['color'][use])
self.axt[1].scatter(self.tdat['free_energy'][use],self.tdat['helicity'][use],c=self.tdat['bfit_a_5'][use],cmap=cmap,vmin=vmin,vmax=vmax,edgecolor=self.tdat['color'][use])
#set labels
self.axt[0].set_xlabel('Axial Flux [Mx]')
self.axt[0].set_ylabel('Poloidal Flux [Mx/cm]')
self.axt[0].set_yscale('log')
self.axt[0].set_xscale('log')
self.axt[1].set_xlabel('Free Energy [erg]')
self.axt[1].set_ylabel('Helicity [Mx$^2$]')
cbar = self.figt.colorbar(cax,cax=self.axt[2])
cbar.set_label('Normed Quad. Mean Dis.')
# self.axt[1,1].scatter(self.tdat['free_energy'][use],self.tdat['helicity'][use],c=self.tdat['bfit_s_5'][use],cmap=plt.cm.magma,vmin= 0.00015,vmax=0.000600)
#Use fancy plot if module exists
if fp:
#fancy_plot(self.axm)
self.axm.grid(which='minor',color='black',linestyle='--',linewidth=1,axis='x')
self.axm.yaxis.set_tick_params(which='both',direction='in')
self.axm.yaxis.set_tick_params(which='major',length=7)
self.axm.yaxis.set_tick_params(which='minor',length=3)
for i in self.axt: fancy_plot(i)
for i in self.axi: fancy_plot(i)
self.figt.tight_layout()
self.figt.savefig(self.cdir+self.bdir+'total_fit_{0:%Y%m%d%H%M%S}.png'.format(self.time),bbox_pad=.1,bbox_inches='tight')
self.figt.savefig(self.cdir+self.bdir+'total_fit_{0:%Y%m%d%H%M%S}.eps'.format(self.time),bbox_pad=.1,bbox_inches='tight')
self.figi.savefig(self.cdir+self.bdir+'indvd_fit_{0:%Y%m%d%H%M%S}.png'.format(self.time),bbox_pad=.1,bbox_inches='tight')
self.figm.savefig(self.cdir+self.bdir+'model_fit_{0:%Y%m%d%H%M%S}.eps'.format(self.time),bbox_pad=.1,bbox_inches='tight')
self.figm.savefig(self.cdir+self.bdir+'model_fit_{0:%Y%m%d%H%M%S}.png'.format(self.time),bbox_pad=.1,bbox_inches='tight')
self.figi.savefig(self.cdir+self.bdir+'indvd_fit_{0:%Y%m%d%H%M%S}.eps'.format(self.time),bbox_pad=.1,bbox_inches='tight')
```
#### File: jprchlik/cms2_python_helpers/create_model_files_wrapper.py
```python
import sys, getopt
import create_model_files as cmf
def main(argv):
inargs1 = 'ht:c:o:m:a:p:s:'
snargs1 = inargs1[1:].split(':')
inargs2 = ['time','cmsdir','outdir','modeltemp','axial','poloidal','start']
helpinfo = "create_model_files_wrapper.py is a command line utility which calls the class create_model_files\n"
helpinfo = helpinfo+"The command takes only a few arguments and if you stick to a common theme you should only have to change the time between run\n"
helpinfo = helpinfo+"python create_model_files_wrapper.py"
for i in range(len(inargs2)): helpinfo=helpinfo+' -'+snargs1[i]+' <--'+inargs2[i]+'>'
helpinfo=helpinfo+'\n'
#Descriptive information about each keyword
argsdes=["A string time in the format of YYYY/MM/DD HH:MM:SS",
"The directory containing the CMS2 (default = read 'cms2_dir' file)",
"The directory format for the sigmoid (assumes a subdirectory of cmsdir (default = YYYY/MM/DD/HHMM/)",
"The initial model template already ran through CMS2. The model must end in 1 to work properly (default = model1)",
"Axial Fluxes to use (Default = 1.00000e19,3.00000e19,5.00000e19,7.00000e19,9.00000e19,1.00000e20,3.00000e20,5.00000e20,7.00000e20,9.00000e20,1.00000e21,1.50000e21)",
"Polodial Fluxes to use (Default = 1.00000E9,5.00000E9,1.00000E10,5.00000E10,1.00000E11)",
"Index to start grid (Default = 1)"]
for i in range(len(inargs2)): helpinfo = helpinfo+' -'+snargs1[i]+' <--'+inargs2[i]+'> : '+argsdes[i]+'\n'
#Add user example
helpinfo = helpinfo+"""\n Example: \n python create_model_files_wrapper.py -t "2017/12/31 23:59:00" -p "1.00000E9,5.00000E9" -a '7.00000e19,9.00000e19,1.00000e20,3.00000e20' -s 1"""
#load user values
try:
opts,args = getopt.getopt(argv,inargs1,inargs2)
except getop.GetoptError:
print(helpinfo)
sys.exit(2)
#default for directory structure
sigd = '%Y/%m/%d/%H%M/'
#default for the model template file (must end in a 1)
temp = 'model1'
#default for cms2 directory
cmsd = ''
#list of axial and poloidal fluxes
axi = []
pol = []
#index to start creating files
start = 1
for opt, arg in opts:
if opt == '-h':
print(helpinfo)
sys.exit(0)
elif opt in ("-t","--time"):
time = arg
elif opt in ("-c","--cmsdir"):
cmsd = arg
elif opt in ("-o","--outdir"):
sigd = arg
elif opt in ("-m","--modeltemp"):
temp = arg
elif opt in ("-a","--axial"):
axi = arg.split(',')
elif opt in ("-p","--poloidal"):
pol = arg.split(',')
elif opt in ("-s","--start"):
start = int(arg)
mod = cmf.create_cms2_files(time,cmsdir=cmsd,outdir=sigd,tempmodel=temp,axi=axi,pol=pol,start=start)
mod.create_all_files()
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: jprchlik/cms2_python_helpers/fff2_input_models_wrapper.py
```python
import sys, getopt
import fff2_input_models as fim
def main(argv):
inargs1 = 'ht:c:o:i:n:s:l:'
snargs1 = inargs1[1:].split(':')
inargs2 = ['time','cmsdir','outdir','ifile',"nproc","minmod","maxmod"]
helpinfo = "create_model_files_wrapper.py is a command line utility which calls the class create_model_files\n"
helpinfo = helpinfo+"The command takes only a few arguments and if you stick to a common theme you should only have to change the time between run\n"
helpinfo = helpinfo+"python create_model_files_wrapper.py"
for i in range(len(inargs2)): helpinfo=helpinfo+' -'+snargs1[i]+' <--'+inargs2[i]+'>'
helpinfo=helpinfo+'\n'
#Descriptive information about each keyword
argsdes=["A string time in the format of YYYY/MM/DD HH:MM:SS",
"The directory containing the CMS2 (default = read 'cms2_dir' file)",
"The directory format for the sigmoid (assumes a subdirectory of cmsdir (default = YYYY/MM/DD/HHMM/",
"The input file format (default = inputYYYYMMDDHHMMSS_mod)",
"The number of processors to run on (default = 6)",
"The first model input number to look for (default = 1)",
"The last model input number to look for (default = 48)"
]
for i in range(len(inargs2)): helpinfo = helpinfo+' -'+snargs1[i]+' <--'+inargs2[i]+'> : '+argsdes[i]+'\n'
#load user values
try:
opts,args = getopt.getopt(argv,inargs1,inargs2)
except getopt.GetoptError:
print(helpinfo)
sys.exit(2)
#default for cms2 directory
cmsd = ''
#default for directory structure
sigd = '%Y/%m/%d/%H%M/'
#default for the input model template file (must end in a 1)
fstart = 'input%y%m%d%H%M%S_mod'
#default number of processors
nproc = 6
#model input number to start with
modmin = 1
#model input number to end wtih
modmax = 48
for opt, arg in opts:
if opt == '-h':
print(helpinfo)
sys.exit(0)
elif opt in ("-t","--time"):
time = arg
elif opt in ("-c","--cmsdir"):
cmsd = arg
elif opt in ("-o","--outdir"):
sigd = arg
elif opt in ("-i","--ifile"):
fstart = arg
elif opt in ("-n","--nproc"):
nproc = int(arg)
elif opt in ("-s","--minmod"):
modmin = int(arg)
elif opt in ("-l","--maxmod"):
print arg
modmax = int(arg)
inp = fim.fff2_input_models(time,cmsdir=cmsd,mdir=sigd,fstart=fstart,nproc=nproc,modmin=modmin,modmax=modmax)
if nproc == 1:
inp.run_loop()
elif nproc > 1:
inp.run_par()
elif nproc < 1:
print "Number of processors used must be greater than 0"
sys.exit(2)
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: jprchlik/cms2_python_helpers/grab_sigmoid_fits_files.py
```python
from sunpy.net import vso, Fido
from numpy import unique
import glob
from sunpy.sun import carrington_rotation_number
import astropy.units as u
from astropy.io import fits
from datetime import datetime,timedelta
import ftplib
import sys,os
import gzip
import shutil
class download_cms_files:
# inital information about the directoies and time for sigmoid
def __init__(self,time='2009/02/17 11:44:00',nproc=4,cmsdir='',outdir='%Y/%m/%d/%H%M/',x=None,y=None):
"""Sets up inital variables to pass to rest of download_cms_file functions.
Really only need to set the input time string "YYYY/MM/DD HH:MM:SS" and full path to the CMS2 directory.
Then assuming you set up the sigmoid directory to be YYYY/MM/DD/HHMM (can change with outdir variable if needed) you are set."""
if cmsdir == '': cmsdir = open('cms2_dir','r').readlines()[0][:-1]#read in first line of cms2_dir and format to send to script
if cmsdir[-1] != '/': cmsdir=cmsdir+'/'
self.time = time
self.nproc = nproc
self.sform = '%Y/%m/%d %H:%M:%S'
self.dttime = datetime.strptime(self.time,self.sform)
self.basedir = datetime.strftime(self.dttime,outdir)
self.cmsdir = cmsdir
self.x = x
self.y = y
#start date from using sdo data
self.sdo_start = datetime(2010,05,01,00,00,00)
#copy hinode synoptic files
def get_hinode(self):
#Hinode archive
self.hinode_date = '%Y/%m/%d/'
self.hinode_arch = '/archive/hinode/xrt/level1/'+datetime.strftime(self.dttime,self.hinode_date)
#look for timeline to get synoptic timelines
self.find_synoptic_times()
#find times for synoptics
def find_synoptic_times(self):
self.hinode_tfmt = self.hinode_date+'%Y%m%d_exported/'#location of hinode timelines
m = 0 # counter for number of days looking back
for p in range(2):#dirty way to prevent timeline start day from only returning one set of synoptics
lookt = True # found the proper timeline
while lookt:
self.hinode_time = self.dttime-timedelta(days=m)
self.hinode_tldr = '/archive/hinode/xrt/timelines/'+datetime.strftime(self.hinode_time,self.hinode_tfmt)
foundt = os.path.exists(self.hinode_tldr)
m += 1 #increment by 1 day
if m >= 14: return #exit downloading hinode data if no timeline found for 14 days
if foundt: lookt = False #if find hinode directory exit loop
self.copy_synoptics()
#copy xrt synoptics to local directory
def copy_synoptics(self):
self.read_xrt_timeline()
self.xrt_file_list()
for i in self.xrt_files: shutil.copy(i,self.cmsdir+self.basedir)
#get list of files in timerange
def xrt_file_list(self):
#get formatted list of hours
self.xrt_hours = []
for i in self.xrt_beg: self.xrt_hours.append('H{0:%H}00'.format(i))
for i in self.xrt_end: self.xrt_hours.append('H{0:%H}00'.format(i))
#get unique hours
self.xrt_hours = unique(self.xrt_hours)
#get all files in hours and their associated times
self.xrt_files = []
#loop over unique hours
for i in self.xrt_hours:
tempfiles = glob.glob('{0}/{1}/*fits'.format(self.hinode_arch,i))
for j in tempfiles:
temptime = datetime.strptime(j.split('/')[-1].split('.')[0],'L1_XRT%Y%m%d_%H%M%S')
#check if time is in range
for k in range(len(self.xrt_beg)):
if ((temptime >= self.xrt_beg[k]) & (temptime <= self.xrt_end[k])):
#check if header information is compatible with a syntopic
dat = fits.open(j)
hdr = dat[0].header
#check for acceptable filters
fil_check = (((hdr['EC_FW2_'] == 'Ti_poly') | (hdr['EC_FW1_'] == 'Al_poly') | (hdr['EC_FW2_'] == 'Al_mesh') | (hdr['EC_FW1_'] == 'Be_thin') | (hdr['EC_FW2_'] != 'Gband')))
#check header information on fits files to get just synoptics
if ((hdr['NAXIS1'] == 1024) & (hdr['NAXIS2'] == 1024) & (fil_check)):
self.xrt_files.append(j)
#check to make sure self.x and self.y are defined
if ((self.x != None) & (self.y != None)):
#Also check to see if there are any small FOV XRT files within 100'' of y and x
dist = (((hdr['CRVAL1']-self.x)**2.+(hdr['CRVAL2']-self.y)**2.))**.5
if ((dist <= 100) & (fil_check)):
self.xrt_files.append(j)
#read timeline and return synoptic times
def read_xrt_timeline(self):
fmtrept = self.hinode_tldr+'re-point_{0:%Y%m%d}*.txt'.format(self.hinode_time)
repfile = glob.glob(fmtrept)
repoint = open(repfile[-1],'r') # read the repoint file
#list of beginning and end time for synoptics
self.xrt_beg = []
self.xrt_end = []
ender = False #gets the end time for synoptic
timefmt = '%Y/%m/%d %H:%M:%S' #format of time in pointing file
#do not start with an end
end = False
#get the begging and end times of xrt syntopics
#loop through repoint file lines
for i in repoint:
if end:
end = False
try:
self.xrt_end.append(datetime.strptime(i[20:39],timefmt))
except:
self.xrt_end.append(self.xrt_beg[-1]+timedelta(minutes=20)) #if syntopic is last pointing just add 20 minutes
#look for lines containing the word synoptic
if 'synoptic' in i.lower():
end = True
self.xrt_beg.append(datetime.strptime(i[20:39],timefmt))
#Add continue to prevent errors when synoptic pointing is close to observed AR
continue
#if you want to look for local AR files
if ((self.x != None) & (self.y != None)):
#check for nearby pointings with small FoV <NAME> 2018/01/24
try:
p_x = float(i[72:78])
p_y = float(i[79:87])
#if values are not floats continue
except:
continue
#distance from pointing to planned AR
dist = (((p_x-self.x)**2.+(p_y-self.y)**2.))**.5
#if distance less than 100'' from AR add to list to look for XRT files
if dist < 100:
end = True
self.xrt_beg.append(datetime.strptime(i[20:39],timefmt))
#find carrington rotation number and as politely for the files
def get_carrington(self):
self.rotnum = carrington_rotation_number(self.time)
#connect to ftp directory
self.ftp = ftplib.FTP('solis.nso.<EMAIL>','anonymous')
#change to carrigoton rotation number directory
self.ftp.cwd('synoptic/level3/vsm/merged/carr-rot/')
#format of the fits file
self.forfil = "svsm_m21lr_B3_cr{0:4d}.fts.gz"
try:
self.grab_car()
self.ftp.close()
except:
print('Failed unexpectedly, closing ftp access',sys.exc_info()[0])
self.ftp.close()
raise
#get carrington roation files
def grab_car(self):
#primary rotation number
prot = int(self.rotnum)
#rotation number +/- 1
brot = prot-1
arot = prot+1
#only get 3 carrington rotations
rot_list = [brot,prot,arot]
#only get exact carrington rotation number
#rot_list = [prot]
#NSO synoptic maps only go until 2166
if self.rotnum > 2166:
#Use HMI synoptics
import urllib2
fname = 'hmi.Synoptic_Mr_small.{0:1.0f}.fits'.format(self.rotnum)
hmi_url = 'http://jsoc.stanford.edu/data/hmi/synoptic/'+fname
res = urllib2.urlopen(hmi_url)
#read binary fits file
f_carrot = res.read()
#write fits file locally
with open(self.cmsdir+self.basedir+fname,'wb') as f:
f.write(f_carrot)
#print("Carrington rotation {0:1.0f} is beyond NSO archive".format(self.rotnum))
else:
for rot in rot_list:
fname = self.forfil.format(rot)
#see if file exists with or without .gz
testfile = ((os.path.isfile(self.cmsdir+self.basedir+fname)) | (os.path.isfile(self.cmsdir+self.basedir+fname[:-3])))
#if file does not exist download new file
if testfile == False:
try:
fhandle = open(self.cmsdir+self.basedir+fname,'wb')
self.ftp.retrbinary('RETR {0}'.format(fname),fhandle.write)
fhandle.close()
self.unzip_fil(self.cmsdir+self.basedir+fname)
except:
print("Unable to download carrington rotation map at {0}".format(fname))
#unzip carrington file
def unzip_fil(self,fname):
oname = fname[:-3]
with gzip.open(fname,'rb') as infile:
with open(oname,'wb') as outfile:
for line in infile:
outfile.write(line)
#Get AIA 1024x1024 synoptics from Stanford, since CMS2 cannot handle 4096x4096 files
def get_aia_syn(self):
import urllib
#synoptic archive location
syn_arch = 'http://jsoc.stanford.edu/data/aia/synoptic/'
#check if current minute is even, since synoptics are every 2 minutes
if self.dttime.minute % 2 == 0:
inp_time = self.dttime
#otherwise add 1 minute to current time
else:
inp_time = self.dttime+timedelta(minutes=1)
#reduced time to 12 seconds for AIA observation download <NAME> 2018/01/24
dt = timedelta(minutes=2)
start = inp_time-dt
end = inp_time
#wavelengths to download
d_wav = [131,171,193,211,304]
#create directory path minus the wavelength
f_dir = '{0:%Y/%m/%d/H%H00/AIA%Y%m%d_%H%M_}'
s_dir = f_dir.format(start)
e_dir = f_dir.format(end)
#wavelength format
w_fmt = '{0:04d}.fits'
#download files from archive for each wavelength
for i in d_wav:
#format wavelength
w_fil = w_fmt.format(i)
urllib.urlretrieve(syn_arch+s_dir+w_fil,self.cmsdir+self.basedir+s_dir.split('/')[-1]+w_fil)
urllib.urlretrieve(syn_arch+e_dir+w_fil,self.cmsdir+self.basedir+e_dir.split('/')[-1]+w_fil)
#Get aia files from VSO
def get_aia(self):
#Get Stereo observations
client = vso.VSOClient()
#reduced time to 12 seconds for AIA observation download J. Prchlik 2018/01/18
dt = timedelta(seconds=12)
start = datetime.strftime(self.dttime-dt,self.sform)
end = datetime.strftime(self.dttime+dt,self.sform)
#set time span
time = vso.attrs.Time(start,end)
#grabs both stereo a and b
ins = vso.attrs.Instrument('aia')
#grab particular (UV) wavelengths
wave = vso.attrs.Wavelength(171*u.AA,171*u.AA)
qr1 = client.query(time,ins,wave)
res1 = client.get(qr1,path=self.cmsdir+self.basedir+'{file}').wait()
#grab particular (UV) wavelengths
wave = vso.attrs.Wavelength(193*u.AA,193*u.AA)
qr2 = client.query(time,ins,wave)
res2 = client.get(qr2,path=self.cmsdir+self.basedir+'{file}').wait()
#grab particular (UV) wavelengths
wave = vso.attrs.Wavelength(304*u.AA,304*u.AA)
qr3 = client.query(time,ins,wave)
res3 = client.get(qr3,path=self.cmsdir+self.basedir+'{file}').wait()
#grab particular (UV) wavelengths
wave = vso.attrs.Wavelength(211*u.AA,211*u.AA)
qr4 = client.query(time,ins,wave)
res4 = client.get(qr4,path=self.cmsdir+self.basedir+'{file}').wait()
#grab stereo files from stereo archive
def grab_stereo(self):
#look in both stereo ahead and behind
beacons = ['ahead','behind']
#set time range around to look for stereo files
dt = timedelta(minutes=30)
start = self.dttime-dt
end = self.dttime+dt
#base directory for start and end directory
f_bdir = '{0:%Y%m%d}/*fts'
s_bdir = f_bdir.format(start)
e_bdir = f_bdir.format(end)
#loop over subdirectories if start and end time cross days
if s_bdir == e_bdir:
l_dir = [s_bdir]
else:
l_dir = [s_bdir,e_bdir]
#loop over stereo ahead and behind
for bea in beacons:
#change to stereo ahead and behind directory continue if directories do not exist
try:
self.s_ftp.cwd('/pub/beacon/{0}/secchi/img/euvi/'.format(bea))
except:
print('No STEREO {0} OBS'.format(bea))
continue
#get list of files in subdirectory
fit_list = []
try:
for days in l_dir: fit_list.append(self.s_ftp.nlst(days))
except:
print('No STEREO {0} OBS at {1}'.format(bea,days))
continue
#flatten the list
flat_list = [item for sublist in fit_list for item in sublist]
#list of files to download
d_list = []
#time reange for looping
t_r = 1
#try expanding the time search range
loop = True
#make sure you get at least 4 files
while ((len(d_list) <= 4) & (loop)):
#check datetime in list is between start and end
for fil in flat_list:
#get datetime from list
obs_time = datetime.strptime(fil.split('/')[-1][:15],'%Y%m%d_%H%M%S')
#if in time range add to download list
if ((obs_time >= self.dttime-(dt*t_r)) & (obs_time <= self.dttime+(dt*t_r))): d_list.append(fil)
#increment index
t_r += 1
#don't loop more than 5 times
if t_r > 6: loop = False
#finally download stereo files
for fil in d_list:
fname = fil.split('/')[-1]
testfile = os.path.isfile(self.cmsdir+self.basedir+fname)
#if file does not exist download new file
if testfile == False:
try:
fhandle = open(self.cmsdir+self.basedir+fname,'wb')
self.s_ftp.retrbinary('RETR {0}'.format(fil),fhandle.write)
fhandle.close()
except:
print("Unable to download STEREO observation at {0}".format(fil))
continue
#unzip carrington file
def unzip_fil(self,fname):
oname = fname[:-3]
#get stereo files directly from archive
def get_stereo(self):
#connect to ftp directory
self.s_ftp = ftplib.FTP('stereoftp.nascom.nasa.gov','anonymous')
try:
self.grab_stereo()
except:
print('Unable to download STEREO files')
#close ftp connection
self.s_ftp.close()
def get_stereo_vso(self):
#Get Stereo observations
client = vso.VSOClient()
dt = timedelta(minutes=6)
start = datetime.strftime(self.dttime-dt,self.sform)
end = datetime.strftime(self.dttime+dt,self.sform)
#set time span
time = vso.attrs.Time(start,end)
#grabs both stereo a and b
ins = vso.attrs.Instrument('secchi')
#grab particular (UV) wavelengths
wave = vso.attrs.Wavelength(100*u.AA,3000*u.AA)
#qr = client.query(time,ins,wave)
qr = client.search(time,ins,wave)
#res = client.get(qr,path=self.cmsdir+self.basedir+'{file}')
res = client.fetch(qr,path=self.cmsdir+self.basedir+'{file}').wait()
#Move file to a file name with wavelength time included
for k in qr:
swave = k['wave']['wavemin']
sfile = k['fileid'].split('/')[-1].lower() #
shutil.move(self.cmsdir+self.basedir+sfile,self.cmsdir+self.basedir+sfile.replace('.fts','_'+swave+'.fits'))
#Download EUV images
def get_euv(self):
if self.dttime >= self.sdo_start:
#self.get_aia()
self.get_aia_syn()
#include get stereo on recent observaitons <NAME> (2018/01/18)
self.get_stereo_vso()
else:
self.get_stereo_vso()
#dowload magnetograms
def get_magnetogram(self):
if self.dttime >= self.sdo_start:
self.get_hmi()
else:
self.get_mdi()
#get mdi magnetogram
def get_mdi(self):
client = vso.VSOClient()
dt = timedelta(minutes=96)
start = datetime.strftime(self.dttime-dt,self.sform)
end = datetime.strftime(self.dttime+dt,self.sform)
#set time span
time = vso.attrs.Time(start,end)
#set instrument
ins = vso.attrs.Instrument('mdi')
#set provider which reduces to just 96m magnetograms
#prov = vso.attrs.Provider('SDAC')
#query vso
#qr = client.query(time,ins,prov)
qr = client.search(time,ins)#,prov)
self.qr = qr
#res = client.get(qr,path=self.cmsdir+self.basedir+'{file}').wait()
res = client.fetch(qr,path=self.cmsdir+self.basedir+'{file}').wait()
#Move file to a file name with start time included
for k in qr:
stime = k['time']['start']
stime = stime[:8]+'_'+stime[8:]
sfile = k['fileid'].split('/')[-1].lower().replace('.','_').replace('_fits','.fits')
shutil.move(self.cmsdir+self.basedir+sfile,self.cmsdir+self.basedir+stime+'_'+sfile)
#Get AIA 1024x1024 synoptics from Stanford, since CMS2 cannot handle 4096x4096 files
def get_hmi(self):
import urllib
#hmi archive location
hmi_arch = 'http://jsoc.stanford.edu/data/hmi/fits/'
#check if current minute is even, since synoptics are every 2 minutes
if self.dttime.minute == 0:
inp_time = self.dttime
#otherwise add 1 minute to current time
else:
inp_time = self.dttime+timedelta(minutes=60)
#reduced time to 12 seconds for AIA observation download <NAME> 2018/01/24
dt = timedelta(minutes=60)
start = inp_time-dt
end = inp_time
#create full file path hmi
f_dir = '{0:%Y/%m/%d/hmi.M_720s.%Y%m%d_%H0000_TAI.fits}'
s_fil = f_dir.format(start)
e_fil = f_dir.format(end)
#see if files already exsist
s_tst = os.path.isfile(self.cmsdir+self.basedir+s_fil.split('/')[-1]) == False
e_tst = os.path.isfile(self.cmsdir+self.basedir+e_fil.split('/')[-1]) == False
#download files from archive
if s_tst: urllib.urlretrieve(hmi_arch+s_fil,self.cmsdir+self.basedir+s_fil.split('/')[-1])
if e_tst: urllib.urlretrieve(hmi_arch+e_fil,self.cmsdir+self.basedir+e_fil.split('/')[-1])
#get hmi magnetogram
def get_hmi_vso(self):
client = vso.VSOClient()
dt = timedelta(minutes=1)
start = datetime.strftime(self.dttime-dt,self.sform)
end = datetime.strftime(self.dttime+dt,self.sform)
phys = vso.attrs.Physobs('LOS_magnetic_field')
#set time span
time = vso.attrs.Time(start,end)
#set instrument
ins = vso.attrs.Instrument('hmi')
#query vso
qr = client.query(time,ins,phys)
res = client.get(qr,path=self.cmsdir+self.basedir+'{file}',methods=("URL-FILE_Rice","URL-FILE")).wait()
#download all
def download_all(self):
self.get_hinode()
try:
self.get_euv()
except:
print('Could not retrieve EUV images')
try:
self.get_carrington()
except:
print('Could not retrieve Carrington Rotation Mag.')
try:
self.get_magnetogram()
except:
print('Could not retrieve High Resoution Mag.')
#create subdirectory tree
def build_subtree(self):
try:
os.makedirs(self.cmsdir+self.basedir)
except:
print('Directory {0} already exists. Proceeding'.format(self.cmsdir+self.basedir))
``` |
{
"source": "Jprebys/crypto_tracker",
"score": 4
} |
#### File: crypto_tracker/src/helpers.py
```python
def load_data():
"""Load data from raw text files into a DataFrame
"""
import pandas as pd
import json
from dateutil.parser import parse
# Load raw text
with open('../src/prices.txt', 'r') as file:
lines = [json.loads(line) for line in file.readlines()]
with open('../src/decoder.txt', 'r') as file:
decoder = json.load(file)
# Hacky formatting for DataFrame
dates = [parse(date) for line in lines for date in line]
data = [list(x.values())[0] for x in lines]
# Make Dataframe and convert coin codes to names
result = pd.DataFrame(data=data, index=dates)
result.columns = result.columns.map(decoder)
return result
``` |
{
"source": "Jprebys/food_prices",
"score": 4
} |
#### File: food_prices/src/functions.py
```python
def fao_global_averages(data, food):
'''
take the FAO food prices table and a specific food
return a Series representing the average global price per year
'''
prices = data[data.Item == food]
return prices.groupby('Year').Value.mean()
def fao_pivot(data, food):
'''
take the FAO data and the name of a food
return the pivot table where the rows are the countries,
the columns are the years, and the values are the values
'''
foods = data[data.Item == food]
return foods.pivot(index='Area', columns='Year', values='Value')
``` |
{
"source": "jpreece/saspy",
"score": 2
} |
#### File: saspy/saspy/sasbase.py
```python
try:
import pandas
except Exception as e:
pass
import os
import sys
import datetime
import getpass
import importlib
import re
import shutil
import tempfile
from saspy.sasiostdio import SASsessionSTDIO
from saspy.sasioiom import SASsessionIOM
from saspy.sasiohttp import SASsessionHTTP
from saspy.sasiocom import SASSessionCOM
from saspy.sasdata import SASdata
from saspy.sasml import SASml
from saspy.sasets import SASets
from saspy.sasqc import SASqc
from saspy.sasstat import SASstat
from saspy.sasutil import SASutil
from saspy.sasViyaML import SASViyaML
from saspy.sasexceptions import (SASIONotSupportedError, SASConfigNotValidError,
SASConfigNotFoundError)
_cfgfile_cnt = 0
try:
from IPython.display import HTML
from IPython.display import display as DISPLAY
except ImportError:
def DISPLAY(x): print(x)
def HTML(x): return "IPython didn't import. Can't render HTML"
def zepDISPLAY(x):
print(x)
def zepHTML(x):
return("%html "+x)
def dbDISPLAY(x):
displayHTML(x)
def dbHTML(x):
return(x)
def list_configs() -> list:
cfg = []
sp = []
sp[:] = sys.path
sp[0] = os.path.abspath(sp[0])
sp.insert(1, os.path.expanduser('~/.config/saspy'))
sp.insert(0, __file__.rsplit(os.sep+'sasbase.py')[0])
for dir in sp:
f1 = dir+os.sep+'sascfg_personal.py'
if os.path.isfile(f1):
cfg.append(f1)
if len(cfg) == 0:
f1 =__file__.rsplit('sasbase.py')[0]+'sascfg.py'
if os.path.isfile(f1):
cfg.append(f1)
return cfg
class SASconfig(object):
"""
This object is not intended to be used directly. Instantiate a SASsession object instead
"""
DOTCONFIG = '~/.config/saspy/'
def __init__(self, **kwargs):
self._kernel = kwargs.get('kernel', None)
self.valid = True
self.mode = ''
self.origin = ''
configs = []
try:
import pandas
self.pandas = None
except Exception as e:
self.pandas = e
SAScfg = self._find_config(cfg_override=kwargs.get('cfgfile'))
self.SAScfg = SAScfg
# Get Config options. Fallback to empty dict.
self.cfgopts = getattr(SAScfg, "SAS_config_options", {})
# See if we don't want to allow prompting in this environment
prompt = self.cfgopts.get('prompt', True)
self.prompt = kwargs.get('prompt', prompt)
# In lock down mode, don't allow runtime overrides of option values from the config file.
lock = self.cfgopts.get('lock_down', True)
# Get Config names. Fallback to empty list.
configs = getattr(SAScfg, "SAS_config_names", [])
cfgname = kwargs.get('cfgname', '')
if len(cfgname) == 0:
if len(configs) == 0:
raise SASConfigNotValidError(cfgname, msg='No SAS_config_names found in saspy.sascfg')
else:
if len(configs) == 1:
cfgname = configs[0]
if self._kernel is None:
print("Using SAS Config named: " + cfgname)
else:
cfgname = self._prompt(
"Please enter the name of the SAS Config you wish to run. Available Configs are: " +
str(configs) + " ")
while cfgname not in configs:
cfgname = self._prompt(
"The SAS Config name specified was not found. Please enter the SAS Config you wish to use. Available Configs are: " +
str(configs) + " ")
if cfgname is None:
raise RuntimeError("No SAS Config name provided.")
self.name = cfgname
cfg = getattr(SAScfg, cfgname)
ip = cfg.get('ip')
url = cfg.get('url')
ssh = cfg.get('ssh')
path = cfg.get('saspath')
java = cfg.get('java')
provider = cfg.get('provider')
self.display = cfg.get('display', '')
self.results = cfg.get('results')
self.autoexec = cfg.get('autoexec')
self.m5dsbug = cfg.get('m5dsbug')
indisplay = kwargs.get('display', '')
if len(indisplay) > 0:
if lock and len(self.display):
print("Parameter 'display' passed to SAS_session was ignored due to configuration restriction.")
else:
self.display = indisplay
if self.display == '':
self.display = 'jupyter'
else:
if self.display.lower() not in ['zeppelin', 'jupyter', 'databricks']:
print("Invalid value specified for 'display'. Using the default of 'jupyter'")
self.display = 'jupyter'
if self.display.lower() == 'zeppelin':
self.DISPLAY = zepDISPLAY
self.HTML = zepHTML
elif self.display.lower() == 'databricks':
self.DISPLAY = dbDISPLAY
self.HTML = dbHTML
else:
self.DISPLAY = DISPLAY
self.HTML = HTML
inautoexec = kwargs.get('autoexec', None)
if inautoexec:
if lock and self.autoexec is not None:
print("Parameter 'autoexec' passed to SAS_session was ignored due to configuration restriction.")
else:
self.autoexec = inautoexec
inm5dsbug = kwargs.get('m5dsbug', None)
if inm5dsbug is not None:
self.m5dsbug = inm5dsbug
inurl = kwargs.get('url', None)
if inurl:
if lock and url is not None:
print("Parameter 'url' passed to SAS_session was ignored due to configuration restriction.")
else:
url = inurl
inip = kwargs.get('ip', None)
if inip:
if lock and ip is not None:
print("Parameter 'ip' passed to SAS_session was ignored due to configuration restriction.")
else:
ip = inip
inssh = kwargs.get('ssh', None)
if inssh:
if lock and ssh is not None:
print("Parameter 'ssh' passed to SAS_session was ignored due to configuration restriction.")
else:
ssh = inssh
insaspath = kwargs.get('saspath', None)
if insaspath:
if lock and path is not None:
print("Parameter 'saspath' passed to SAS_session was ignored due to configuration restriction.")
else:
path = insaspath
injava = kwargs.get('java', None)
if injava:
if lock and java is not None:
print("Parameter 'java' passed to SAS_session was ignored due to configuration restriction.")
else:
java = injava
inprov = kwargs.get('provider', None)
if inprov:
if lock and provider is not None:
print("Parameter 'provider' passed to SAS_session was ignored due to configuration restriction.")
else:
provider = inprov
if java is not None:
self.mode = 'IOM'
elif url is not None:
self.mode = 'HTTP'
elif ip is not None:
self.mode = 'HTTP'
elif ssh is not None:
self.mode = 'SSH'
elif provider is not None:
self.mode = 'COM'
elif path is not None:
self.mode = 'STDIO'
else:
raise SASConfigNotValidError(cfgname)
def _find_config(self, cfg_override: str=None):
"""
Locate the user's preferred configuration file if possible, falling
back through a hierarchy of configuration file locations. The heirarchy
is as follows:
1. If a `cfgfile` param is provided to `sas.SASsession()`, use this
configuration or nothing else. If the configuration path is
invalid, raise an exception.
2. If no `cfgfile` param is provided, use existing behavior of global
"personal" config in the saspy library path.
3. If no gloabl "personal" file found search for a "personal" config
in the local scope (`sys.path[0]`). This is mainly to support a
local project config that differs from a more general one.
4. If no config file is found locally, search for a "personal"
config in the user's $HOME/.config/saspy directory.
5. Finally, fall back to the standard `sascfg.py` file in the
library path, then further doen the rest of the path.
:option cfg_override: The provided `cfgfile` param to `sas.SASsession()`
:return [module]:
"""
if cfg_override is not None:
# Option 1
#
# This is the config file override import method, which copies a
# given config file to a temp location and imports. This method
# can be significantly cleaner if using the builtin importlib
# functions, but we must support Python versions <= 3.4 (all EOL).
cfg_expand = os.path.expanduser(cfg_override)
# Check file exists before proceeding
if not os.path.exists(cfg_expand):
raise SASConfigNotFoundError(cfg_expand)
self.origin = cfg_expand
global _cfgfile_cnt
_cfgfile_cnt += 1
tempdir = tempfile.TemporaryDirectory()
tempname = "sascfg"+'%03d' % _cfgfile_cnt
shutil.copyfile(cfg_expand, os.path.join(tempdir.name, tempname+'.py'))
sys.path.append(tempdir.name)
#import sascfgfile as SAScfg
SAScfg = importlib.import_module(tempname)
sys.path.remove(tempdir.name)
tempdir.cleanup()
else:
# Options 2, 3, 4, 5
# Insert saspy config folder behind any local configs but ahead of other
# configurations on the system.
cfg_path = os.path.expanduser(self.DOTCONFIG)
sys.path.insert(1, cfg_path)
mod_path = __file__.replace(os.sep+'sasbase.py', '')
sys.path.insert(0, mod_path)
try:
# Option 2, 3, 4
import sascfg_personal as SAScfg
except ImportError:
# Option 5
import sascfg as SAScfg
finally:
sys.path.remove(cfg_path)
sys.path.remove(mod_path)
self.origin = SAScfg.__spec__.origin
return SAScfg
def _prompt(self, prompt, pw=False):
if self.prompt:
if self._kernel is None:
if not pw:
try:
return input(prompt)
except KeyboardInterrupt:
return None
else:
try:
return getpass.getpass(prompt)
except KeyboardInterrupt:
return None
else:
try:
return self._kernel._input_request(prompt, self._kernel._parent_ident, self._kernel._parent_header,
password=pw)
except KeyboardInterrupt:
return None
else:
return None
class SASsession():
"""
**Overview**
The SASsession object is the main object to instantiate and provides access to the rest of the functionality.
Most of these parameters will be configured in the sascfg_personal.py configuration file.
All of these parameters are documented more thoroughly in the configuration section of the saspy doc:
https://sassoftware.github.io/saspy/install.html#configuration
These are generally defined in the sascfg_personal.py file as opposed to being specified on the SASsession() invocation.
Common parms for all access methods are:
:param cfgname: the Configuration Definition to use - value in SAS_config_names List in the sascfg_personal.py file
:param cfgfile: fully qualified file name of your sascfg_personal.py file, if it's not in the python search path
:param kernel: None - internal use when running the SAS_kernel notebook
:param results: Type of tabular results to return. default is 'Pandas', other options are 'HTML or 'TEXT'
:param lrecl: An integer specifying the record length for transferring wide data sets from SAS to Data Frames.
:param autoexec: A string of SAS code that will be submitted upon establishing a connection
:param display: controls how to display html in differnet notebooks. default is jupyter.
valid values are ['jupyter', 'zeppelin', 'databricks']
:return: 'SASsession'
:rtype: 'SASsession'
And each access method has its own set of parameters.
**STDIO**
:param saspath: overrides saspath Dict entry of cfgname in sascfg_personal.py file
:param options: overrides options Dict entry of cfgname in sascfg_personal.py file
:param encoding: This is the python encoding value that matches the SAS session encoding
**STDIO over SSH**
and for running STDIO over passwordless ssh, add these required parameters
:param ssh: full path of the ssh command; /usr/bin/ssh for instance
:param host: host name of the remote machine
:param identity: (Optional) path to a .ppk identity file to be used on the ssh -i parameter
:param port: (Optional) The ssh port of the remote machine normally 22 (equivalent to invoking ssh with the -p option)
:param tunnel: (Optional) Certain methods of saspy require opening a local port and accepting data streamed from the SAS instance.
:param rtunnel: (Optional) Certain methods of saspy require opening a remote port and accepting data streamed to the SAS instance.
**IOM**
and for the IOM IO module to connect to SAS9 via Java IOM
:param java: the path to the java executable to use
:param iomhost: for remote IOM case, not local Windows] the resolvable host name, or ip to the IOM server to connect to
:param iomport: for remote IOM case, not local Windows] the port IOM is listening on
:param omruser: user id for remote IOM access
:param omrpw: pw for user for remote IOM access
:param encoding: This is the python encoding value that matches the SAS session encoding of the IOM server you are connecting to
:param classpath: classpath to IOM client jars and saspyiom client jar.
:param authkey: Key value for finding credentials in .authfile
:param timeout: Timeout value for establishing connection to workspace server
:param appserver: Appserver name of the workspace server to connect to
:param sspi: Boolean for using IWA to connect to a workspace server configured to use IWA
:param javaparms: for specifying java command line options if necessary
:param logbufsz: see issue 266 for details on this. not needed normally
**COM**
and for IOM IO via COM
:param iomhost: Resolvable host name or IP of the server
:param iomport: Server port
:param class_id: IOM workspace server class identifier
:param provider: IOM provider
:param authkey: Key value for finding credentials in .authfile
:param encoding: This is the python encoding value that matches the SAS
session encoding of the IOM server
:param omruser: User
:param omrpw: Password
**Common SASsession attributes**
The values of the following attributes will be displayed if you submit a SASsession object.
These can be referenced programmatically in you code. For the Booleans, you should use the provided methods to set them,
or change their value. The others you should change NOT for obvious reasons.
- workpath - string containing the WORK libref?s filesystem path.
- sasver - string of the SAS Version for the SAS server connected to
- version - string of the saspy version you?re running
- nosub - Boolean for current value of the teach_me_SAS() setting.
- batch - Boolean for current value of the set_batch() setting.
- results - Boolean for current value of the set_results() setting.
- sascei - string for the SAS Session Encoding this SAS server is using
- SASpid - The SAS processes id, or None if no SAS session connected
"""
# SAS Epoch: 1960-01-01
SAS_EPOCH = datetime.datetime(1960, 1, 1)
# def __init__(self, cfgname: str ='', kernel: 'SAS_kernel' =None, saspath :str ='', options: list =[]) -> 'SASsession':
def __init__(self, **kwargs):
self._loaded_macros = False
self._obj_cnt = 0
self.nosub = False
self.sascfg = SASconfig(**kwargs)
self.batch = False
self.results = kwargs.get('results', self.sascfg.results)
if not self.results:
self.results = 'Pandas'
if self.sascfg.pandas and self.results.lower() == 'pandas':
self.results = 'HTML'
print('Pandas module not available. Setting results to HTML')
self.workpath = ''
self.sasver = ''
self.version = sys.modules['saspy'].__version__
self.sascei = ''
self.SASpid = None
self.HTML_Style = "HTMLBlue"
self.sas_date_fmts = sas_date_fmts
self.sas_time_fmts = sas_time_fmts
self.sas_datetime_fmts = sas_datetime_fmts
self.DISPLAY = self.sascfg.DISPLAY
self.HTML = self.sascfg.HTML
self.logoffset = 0
self.check_error_log = False
if not self.sascfg.valid:
self._io = None
return
if self.sascfg.mode in ['STDIO', 'SSH', '']:
if os.name != 'nt' or self.sascfg.mode == 'SSH':
self._io = SASsessionSTDIO(sascfgname=self.sascfg.name, sb=self, **kwargs)
else:
raise SASIONotSupportedError(self.sascfg.mode, alts=['IOM'])
elif self.sascfg.mode == 'IOM':
self._io = SASsessionIOM(sascfgname=self.sascfg.name, sb=self, **kwargs)
elif self.sascfg.mode == 'COM':
self._io = SASSessionCOM(sascfgname=self.sascfg.name, sb=self, **kwargs)
elif self.sascfg.mode == 'HTTP':
self._io = SASsessionHTTP(sascfgname=self.sascfg.name, sb=self, **kwargs)
# gather some session info
sysvars = "data _null_; length x $ 4096;"
if self.sascfg.mode in ['STDIO', 'SSH', '']:
sysvars += " file STDERR;"
sysvars += """
x = resolve('%sysfunc(pathname(work))'); put 'WORKPATH=' x 'WORKPATHEND=';
x = resolve('&SYSENCODING'); put 'ENCODING=' x 'ENCODINGEND=';
x = resolve('&SYSVLONG4'); put 'SYSVLONG=' x 'SYSVLONGEND=';
x = resolve('&SYSJOBID'); put 'SYSJOBID=' x 'SYSJOBIDEND=';
x = resolve('&SYSSCP'); put 'SYSSCP=' x 'SYSSCPEND=';
run;
"""
# Validating encoding is done next, so handle it not being set for
# this one call
enc = self._io.sascfg.encoding
if enc == '':
self._io.sascfg.encoding = 'utf_8'
res = self._io.submit(sysvars, "text")['LOG']
self._io.sascfg.encoding = enc
vlist = res.rpartition('SYSSCP=')
self.hostsep = vlist[2].partition(' SYSSCPEND=')[0]
vlist = res.rpartition('SYSJOBID=')
self.SASpid = vlist[2].partition(' SYSJOBIDEND=')[0]
vlist = res.rpartition('SYSVLONG=')
self.sasver = vlist[2].partition(' SYSVLONGEND=')[0]
vlist = res.rpartition('ENCODING=')
self.sascei = vlist[2].partition(' ENCODINGEND=')[0]
vlist = res.rpartition('WORKPATH=')
self.workpath = vlist[2].rpartition('WORKPATHEND=')[0].strip().replace('\n','')
# validate encoding
if self.sascfg.mode != 'HTTP':
try:
self.pyenc = sas_encoding_mapping[self.sascei]
except KeyError:
print("Invalid response from SAS on inital submission. printing the SASLOG as diagnostic")
print(self._io._log)
raise
if self.pyenc is not None:
if self._io.sascfg.encoding != '':
if self._io.sascfg.encoding.lower() not in self.pyenc:
print("The encoding value provided doesn't match the SAS session encoding.")
print("SAS encoding is "+self.sascei+". Specified encoding is "+self._io.sascfg.encoding+".")
print("Using encoding "+self.pyenc[1]+" instead to avoid transcoding problems.")
self._io.sascfg.encoding = self.pyenc[1]
print("You can override this change, if you think you must, by changing the encoding attribute of the SASsession object, as follows.")
print("""If you had 'sas = saspy.SASsession(), then submit: "sas._io.sascfg.encoding='override_encoding'" to change it.\n""")
else:
self._io.sascfg.encoding = self.pyenc[1]
if self._io.sascfg.verbose:
print("No encoding value provided. Will try to determine the correct encoding.")
print("Setting encoding to "+self.pyenc[1]+" based upon the SAS session encoding value of "+self.sascei+".\n")
else:
print("The SAS session encoding for this session ("+self.sascei+") doesn't have a known Python equivalent encoding.")
if self._io.sascfg.encoding == '':
self._io.sascfg.encoding = 'utf_8'
print("Proceeding using the default encoding of 'utf_8', though you may encounter transcoding problems.\n")
else:
print("Proceeding using the specified encoding of "+self._io.sascfg.encoding+", though you may encounter transcoding problems.\n")
else:
self.pyenc = sas_encoding_mapping['utf-8']
if self.hostsep == 'WIN':
self.hostsep = '\\'
else:
self.hostsep = '/'
self.workpath = self.workpath + self.hostsep
if self.sascfg.autoexec:
self._io.submit(self.sascfg.autoexec)
if self.sascfg.m5dsbug is None:
if self.sasver[:9] in ['9.04.01M5'] and self.sascei in ['utf-8', 'euc-cn', 'euc-jp', 'euc-kr', 'shift-jis', 'big5']:
#if self.sasver[:9] in ['9.04.01M5']: #, 'V.03.04M0', 'V.03.03M0']: couldn't reproduce on SPRE
self.m5dsbug = True
print("There is a known bug in the Data Step in 9.40M5 with multibyte encodings. This session is connected to that version")
print("running with a multibyte encoding. Setting 'm5dsbug' to True to use alternate code to work around this bug.")
print("You can eliminate this message by setting {'m5dsbug' : True} (or to False if the deployment has been hotfixed)")
print("in your configuration definition for this connection, or on SASsession(m5dsbug = [True | False]).\n")
else:
self.m5dsbug = False
else:
self.m5dsbug = self.sascfg.m5dsbug
# this is to support parsing the log to fring log records w/ 'ERROR' when diagnostic logging is enabled.
# in thi scase the log can have prefix and/or suffix info so the 'regular' log data is in the middle, not left justified
if self.sascfg.mode in ['STDIO', 'SSH', '']:
ll = self._io.submit("""data _null_; file STDERR; put %upcase('col0REG=');
data _null_; put %upcase('col0LOG=');run;""", results='text')
regoff = len(ll['LOG'].rpartition('COL0REG=')[0].rpartition('\n')[2])
logoff = len(ll['LOG'].rpartition('COL0LOG=')[0].rpartition('\n')[2])
if regoff == 0 and logoff > 0:
self.logoffset = logoff
self._lastlog = self._io._log
def __repr__(self):
"""
Display info about this object
:return [str]:
"""
if self._io is None:
pyenc = ''
if self.sascfg.cfgopts.get('verbose', True):
print("This SASsession object is not valid\n")
else:
pyenc = self._io.sascfg.encoding
x = "Access Method = %s\n" % self.sascfg.mode
x += "SAS Config name = %s\n" % self.sascfg.name
x += "SAS Config file = %s\n" % self.sascfg.origin
x += "WORK Path = %s\n" % self.workpath
x += "SAS Version = %s\n" % self.sasver
x += "SASPy Version = %s\n" % self.version
x += "Teach me SAS = %s\n" % str(self.nosub)
x += "Batch = %s\n" % str(self.batch)
x += "Results = %s\n" % self.results
x += "SAS Session Encoding = %s\n" % self.sascei
x += "Python Encoding value = %s\n" % pyenc
x += "SAS process Pid value = %s\n" % self.SASpid
x += "\n"
return x
def __del__(self):
if getattr(self, '_io', None) is not None:
return self._io.__del__()
def _objcnt(self):
self._obj_cnt += 1
return '%04d' % self._obj_cnt
def _startsas(self):
return self._io._startsas()
def endsas(self):
"""
This method terminates the SAS session, shutting down the SAS process.
"""
return self._endsas()
def _endsas(self):
self.SASpid = None
if self._io:
return self._io._endsas()
def _getlog(self, **kwargs):
return self._io._getlog(**kwargs)
def _getlst(self, **kwargs):
return self._io._getlst(**kwargs)
def _getlsttxt(self, **kwargs):
return self._io._getlsttxt(**kwargs)
def _asubmit(self, code, results):
if results == '':
if self.results.upper() == 'PANDAS':
results = 'HTML'
else:
results = self.results
return self._io._asubmit(code, results)
def submitLOG(self, code, results: str = '', prompt: dict = None, printto=False, **kwargs):
'''
This method is a convenience wrapper around the submit() method. It executes the submit then prints the LOG that was returned.
'''
print(self.submit(code, results, prompt, printto, **kwargs)['LOG'])
def submitLST(self, code, results: str = '', prompt: dict = None, method: str = None, printto=False, **kwargs):
'''
This method is a convenience wrapper around the submit() method. It executes the submit then renders the LST that was returned,
as either HTML or TEXT, depending upon results. The method= parameter allows you to adjust what gets returned to suit your needs.
- listorlog - this is the default as of V3.6.5. returns the LST, unless it's empty, then it returns the LOG instead \
(one or the other). Useful in case there's an ERROR.
- listonly - this was the default, and returns the LST (will be empty if no output was produced by what you submitted)
- listandlog - as you might guess, this returns both. The LST followed by the LOG
- logandlist - as you might guess, this returns both. The LOG followed by the LST
'''
if method is None:
method = 'listorlog'
if method.lower() not in ['listonly', 'listorlog', 'listandlog', 'logandlist']:
print("The specified method is not valid. Using the default: 'listorlog'")
method = 'listorlog'
if results == '':
if self.results.upper() == 'PANDAS':
results = 'HTML'
else:
results = self.results
ll = self.submit(code, results, prompt, printto, **kwargs)
if results.upper() == 'HTML':
if method.lower() == 'listonly':
self.DISPLAY(self.HTML(ll['LST']))
elif method.lower() == 'listorlog':
if len(ll['LST']) > 0:
self.DISPLAY(self.HTML(ll['LST']))
else:
self.DISPLAY(self.HTML("<pre>"+ll['LOG']+"</pre>"))
elif method.lower() == 'listandlog':
self.DISPLAY(self.HTML(ll['LST']+"\n<pre>"+ll['LOG']+"</pre>"))
else:
self.DISPLAY(self.HTML("<pre>"+ll['LOG']+"\n</pre>"+ll['LST']))
else:
if method.lower() == 'listonly':
print(ll['LST'])
elif method.lower() == 'listorlog':
if len(ll['LST']) > 0:
print(ll['LST'])
else:
print(ll['LOG'])
elif method.lower() == 'listandlog':
print(ll['LST']+"\n"+ll['LOG'])
else:
print(ll['LOG']+"\n"+ll['LST'])
def submit(self, code: str, results: str = '', prompt: dict = None, printto=False, **kwargs) -> dict:
'''
This method is used to submit any SAS code. It returns the Log and Listing as a python dictionary.
:param saspath: overrides saspath Dict entry of cfgname in sascfg_personal.py file
:param code: the SAS statements you want to execute
:param results: format of results. 'HTML' by default, alternatively 'TEXT'
:param prompt: dict of names and flags to prompt for; create macro variables (used in submitted code), then keep or delete \
the keys which are the names of the macro variables. The boolean flag is to either hide what you type and \
delete the macros, or show what you type and keep the macros (they will still be available later).
for example (what you type for pw will not be displayed, user and dsname will):
.. code-block:: python
results_dict = sas.submit(
"""
libname tera teradata server=teracop1 user=&user pw=&pw;
proc print data=tera.&dsname (obs=10); run;
""" ,
prompt = {'user': False, 'pw': True, 'dsname': False}
)
:param printto: this option, when set to True, will cause saspy to issue a 'proc printto;run;' after the code that is being \
submitted. This will 'undo' any proc printto w/in the submitted code that redirected the LOG or LST, to return \
the LOG/LST back to saspy. This is explained in more detail in the doc: https://sassoftware.github.io/saspy/limitations.html
:return: a Dict containing two keys:values, [LOG, LST]. LOG is text and LST is 'results' (HTML or TEXT)
NOTE: to view HTML results in the ipykernel, issue: from IPython.display import HTML and use HTML() instead of print()
In Zeppelin, the html LST results can be displayed via print("%html "+ ll['LST']) to diplay as HTML.
i.e,: results = sas.submit("data a; x=1; run; proc print;run')
print(results['LOG'])
HTML(results['LST'])
'''
if self.nosub:
return dict(LOG=code, LST='')
prompt = prompt if prompt is not None else {}
if results == '':
if self.results.upper() == 'PANDAS':
results = 'HTML'
else:
results = self.results
ll = self._io.submit(code, results, prompt, undo=printto, **kwargs)
return ll
def saslog(self) -> str:
"""
This method is used to get the current, full contents of the SASLOG
:return: SAS log
:rtype: str
"""
return self._io.saslog()
def lastlog(self) -> str:
"""
This method is used to get the LOG from the most recetly executed submit() method. That is either
a user submitted submit() or internally submitted by any saspy method. This is just a convenience
over the saslog() method, to just see the LOG for the last code that was submitted instead of the
whole session.
:return: SAS log (partial)
:rtype: str
"""
return self._lastlog
def teach_me_SAS(self, nosub: bool):
"""
:param nosub: bool. True means don't submit the code, print it out so I can see what the SAS code would be. \
False means run normally - submit the code.
"""
self.nosub = nosub
def set_batch(self, batch: bool):
"""
This method sets the batch attribute for the SASsession object; it stays in effect until changed.
For methods that just display results like SASdata object methods (head, tail, hist, series, etc.)
and SASresult object results, you can set 'batch' to true to get the results back directly so you
can write them to files or whatever you want to do with them.
This is intended for use in python batch scripts so you can still get ODS XML5 results
and save them to files, which you couldn't otherwise do for these methods.
When running interactively, the expectation is that you want to have the results directly rendered,
but you can run this way too; get the objects display them yourself and/or write them to somewhere.
When `set_batch ==True`, you get the same dictionary returned as from the `SASsession.submit()` method.
:param batch: bool True = return dict([LOG, LST]. False = display LST to screen.
"""
self.batch = batch
def set_results(self, results: str):
"""
This method set the results attribute for the SASsession object; it stays in effect till changed
:param results: set the default result type for this SASdata object. ``'Pandas' or 'HTML' or 'TEXT'``.
:return: string of the return type
:rtype: str
"""
self.results = results
def exist(self, table: str, libref: str = "") -> bool:
"""
Does the SAS data set currently exist
:param table: the name of the SAS Data Set
:param libref: the libref for the Data Set, defaults to WORK, or USER if assigned
:return: Boolean True it the Data Set exists and False if it does not
:rtype: bool
"""
return self._io.exist(table, libref)
def sasets(self) -> 'SASets':
"""
This methods creates a SASets object which you can use to run various analytics.
See the sasets.py module.
:return: sasets object
"""
if not self._loaded_macros:
self._loadmacros()
self._loaded_macros = True
return SASets(self)
def sasstat(self) -> 'SASstat':
"""
This methods creates a SASstat object which you can use to run various analytics.
See the sasstat.py module.
:return: sasstat object
"""
if not self._loaded_macros:
self._loadmacros()
self._loaded_macros = True
return SASstat(self)
def sasml(self) -> 'SASml':
"""
This methods creates a SASML object which you can use to run various analytics. See the sasml.py module.
:return: sasml object
"""
if not self._loaded_macros:
self._loadmacros()
self._loaded_macros = True
return SASml(self)
def sasqc(self) -> 'SASqc':
"""
This methods creates a SASqc object which you can use to run various analytics. See the sasqc.py module.
:return: sasqc object
"""
if not self._loaded_macros:
self._loadmacros()
self._loaded_macros = True
return SASqc(self)
def sasutil(self) -> 'SASutil':
"""
This methods creates a SASutil object which you can use to run various analytics.
See the sasutil.py module.
:return: sasutil object
"""
if not self._loaded_macros:
self._loadmacros()
self._loaded_macros = True
return SASutil(self)
def sasviyaml(self) -> 'SASViyaML':
"""
This methods creates a SASViyaML object which you can use to run various analytics.
See the SASViyaML.py module.
:return: SASViyaML object
"""
if not self._loaded_macros:
self._loadmacros()
self._loaded_macros = True
return SASViyaML(self)
def _loadmacros(self):
"""
Load the SAS macros at the start of the session
:return:
"""
macro_path = os.path.dirname(os.path.realpath(__file__))
fd = os.open(macro_path + '/' + 'libname_gen.sas', os.O_RDONLY)
code = b'options nosource;\n'
code += os.read(fd, 32767)
code += b'\noptions source;'
self._io._asubmit(code.decode(), results='text')
os.close(fd)
def _render_html_or_log(self, ll):
"""
This method renders the html lst if it's there else the log
"""
if len(ll['LST']) > 0:
self.DISPLAY(self.HTML(ll['LST']))
else:
self.DISPLAY(self.HTML("<pre> NO HTML TO RENDER. LOG IS:\n"+ll['LOG']+" </pre>"))
def sasdata(self, table: str, libref: str = '', results: str = '', dsopts: dict = None) -> 'SASdata':
"""
Method to define an existing SAS dataset so that it can be accessed via SASPy
:param table: the name of the SAS Data Set
:param libref: the libref for the Data Set, defaults to WORK, or USER if assigned
:param results: format of results, SASsession.results is default, Pandas, HTML and TEXT are the valid options
:param dsopts: a dictionary containing any of the following SAS data set options(where, drop, keep, obs, firstobs):
- where is a string
- keep are strings or list of strings.
- drop are strings or list of strings.
- obs is a numbers - either string or int
- first obs is a numbers - either string or int
- format is a string or dictionary { var: format }
- encoding is a string
.. code-block:: python
{'where' : 'msrp < 20000 and make = "Ford"' ,
'keep' : 'msrp enginesize Cylinders Horsepower Weight' ,
'drop' : ['msrp', 'enginesize', 'Cylinders', 'Horsepower', 'Weight'] ,
'obs' : 10 ,
'firstobs' : '12' ,
'format' : {'money': 'dollar10', 'time': 'tod5.'} ,
'encoding' : 'latin9'
}
:return: SASdata object
"""
lastlog = len(self._io._log)
dsopts = dsopts if dsopts is not None else {}
if results == '':
results = self.results
sd = SASdata(self, libref, table, results, dsopts)
if not self.exist(sd.table, sd.libref):
if not self.batch:
print(
"Table " + sd.libref + '.' + sd.table + " does not exist. This SASdata object will not be useful until the data set is created.")
self._lastlog = self._io._log[lastlog:]
return sd
def saslib(self, libref: str, engine: str = ' ', path: str = '',
options: str = ' ', prompt: dict = None) -> str:
"""
:param libref: the libref to be assigned
:param engine: the engine name used to access the SAS Library (engine defaults to BASE, per SAS)
:param path: path to the library (for engines that take a path parameter)
:param options: other engine or engine supervisor options
:return: SAS log
"""
prompt = prompt if prompt is not None else {}
code = "libname " + libref + " " + engine + " "
if len(path) > 0:
code += " '" + path + "' "
code += options + ";"
if self.nosub:
print(code)
else:
ll = self._io.submit(code, "text", prompt)
if self.batch:
return ll['LOG'].rsplit(";*\';*\";*/;\n")[0]
else:
print(ll['LOG'].rsplit(";*\';*\";*/;\n")[0])
def datasets(self, libref: str = '') -> str:
"""
This method is used to query a libref. The results show information about the libref including members.
:param libref: the libref to query
:return:
"""
code = "proc datasets"
if libref:
code += " dd=" + libref
code += "; quit;"
if self.nosub:
print(code)
else:
if self.results.lower() == 'html':
ll = self._io.submit(code, "html")
if not self.batch:
self._render_html_or_log(ll)
else:
return ll
else:
ll = self._io.submit(code, "text")
if self.batch:
return ll['LOG'].rsplit(";*\';*\";*/;\n")[0]
else:
print(ll['LOG'].rsplit(";*\';*\";*/;\n")[0])
def read_csv(self, file: str, table: str = '_csv', libref: str = '', results: str = '',
opts: dict = None) -> 'SASdata':
"""
:param file: either the OS filesystem path of the file, or HTTP://... for a url accessible file
:param table: the name of the SAS Data Set to create
:param libref: the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
:param results: format of results, SASsession.results is default, PANDAS, HTML or TEXT are the alternatives
:param opts: a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows)
:return: SASdata object
"""
lastlog = len(self._io._log)
opts = opts if opts is not None else {}
if results == '':
results = self.results
self._io.read_csv(file, table, libref, self.nosub, opts)
if self.exist(table, libref):
sd = SASdata(self, libref, table, results)
else:
sd =None
self._lastlog = self._io._log[lastlog:]
return sd
def write_csv(self, file: str, table: str, libref: str = '',
dsopts: dict = None, opts: dict = None) -> str:
"""
:param file: the OS filesystem path of the file to be created (exported from the SAS Data Set)
:param table: the name of the SAS Data Set you want to export to a CSV file
:param libref: the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
:param dsopts: a dictionary containing any of the following SAS data set options(where, drop, keep, obs, firstobs)
- where is a string
- keep are strings or list of strings.
- drop are strings or list of strings.
- obs is a numbers - either string or int
- first obs is a numbers - either string or int
- format is a string or dictionary { var: format }
- encoding is a string
.. code-block:: python
{'where' : 'msrp < 20000 and make = "Ford"' ,
'keep' : 'msrp enginesize Cylinders Horsepower Weight' ,
'drop' : ['msrp', 'enginesize', 'Cylinders', 'Horsepower', 'Weight'] ,
'obs' : 10 ,
'firstobs' : '12' ,
'format' : {'money': 'dollar10', 'time': 'tod5.'} ,
'encoding' : 'latin9'
}
:param opts: a dictionary containing any of the following Proc Export options(delimiter, putnames)
- delimiter is a single character
- putnames is a bool [True | False]
.. code-block:: python
{'delimiter' : '~',
'putnames' : True
}
:return: SAS log
"""
dsopts = dsopts if dsopts is not None else {}
opts = opts if opts is not None else {}
log = self._io.write_csv(file, table, libref, self.nosub, dsopts, opts)
if not self.batch:
print(log)
else:
return log
def upload(self, localfile: str, remotefile: str, overwrite: bool = True, permission: str = '', **kwargs):
"""
This method uploads a local file to the SAS servers file system.
:param localfile: path to the local file
:param remotefile: path to remote file to create or overwrite
:param overwrite: overwrite the output file if it exists?
:param permission: permissions to set on the new file. See SAS Filename Statement Doc for syntax
:return: dict with 2 keys {'Success' : bool, 'LOG' : str}
"""
lastlog = len(self._io._log)
if self.nosub:
print("too complicated to show the code, read the source :), sorry.")
return None
else:
log = self._io.upload(localfile, remotefile, overwrite, permission, **kwargs)
self._lastlog = self._io._log[lastlog:]
return log
def download(self, localfile: str, remotefile: str, overwrite: bool = True, **kwargs):
"""
This method downloads a remote file from the SAS servers file system.
:param localfile: path to the local file to create or overwrite
:param remotefile: path to remote file
:param overwrite: overwrite the output file if it exists?
:return: dict with 2 keys {'Success' : bool, 'LOG' : str}
"""
lastlog = len(self._io._log)
if self.nosub:
print("too complicated to show the code, read the source :), sorry.")
return None
else:
log = self._io.download(localfile, remotefile, overwrite, **kwargs)
self._lastlog = self._io._log[lastlog:]
return log
def df_char_lengths(self, df: 'pandas.DataFrame', encode_errors = None, char_lengths = None,
**kwargs) -> dict:
"""
This is a utility method for df2sd, use to get the character columns lengths from a dataframe to use to
create a SAS data set. This can be called by the user and the returned dict can be passed in to df2sd via
the char_lengths= option. For big data frames, this can take a long time, so this can be used to do it once,
and then the dictionary returned can be provided to df2sd each time it's called to avoid recalculating this again.
:param df: :class:`pandas.DataFrame` Pandas Data Frame to import to a SAS Data Set
:param encode_errors: 'fail', 'replace' - default is to 'fail', other choice is to 'replace' \
invalid chars with the replacement char. This is only when calculating byte lengths, \
which is dependent upon the value of char_lengths=. When calculating char lengths, this \
parameter is ignored in this method (encoding is deferred to the data transfer step in df2sd).
:param char_lengths: How to determine (and declare) lengths for CHAR variables in the output SAS data set \
SAS declares lenghts in bytes, not characters, so multibyte encodings require more bytes per character (BPC)
- 'exact' - the default if SAS is in a multibyte encoding. calculate the max number of bytes, in SAS encoding, \
required for the longest actual value. This is slowest but most accurate. For big data, this can \
take excessive time. If SAS is running in a single byte encoding then this defaults to '1' (see below), \
but you can override even that by explicitly specifying 'exact' when SAS is a single byte encoding.
- 'safe' - use char len of the longest values in the column, multiplied by max BPC of the SAS multibyte \
encoding. This is much faster, but could declare SAS Char variables longer than absolutely required \
for multibyte SAS encodings. If SAS is running in a single byte encoding then '1' (see below) is used. \
Norte that SAS has no fixed length multibyte encodings, so BPC is always between 1-2 or 1-4 for these. \
ASCII characters hex 00-7F use one btye in all of these, which other characters use more BPC; it's variable
- [1|2|3|4]- this is 'safe' except the number (1 or 2 or 3 or 4) is the multiplier to use (BPC) instead of the \
default BPC of the SAS session encoding. For SAS single byte encodings, the valuse of 1 is the default \
used, since characters can only be 1 byte long so char len == byte len \
For UTF-8 SAS session, 4 is the BPC, so if you know you don't have many actual unicode characters \
you could specify 2 so the SAS column lengths are only twice the length as the longest value, instead \
of 4 times the, which would be much longer than actually needed. Or if you know you have no unicode \
chars (all the char data is actual only 1 byte), you could specify 1 since it only requires 1 BPC.
:return: SASdata object
"""
ret = {}
if encode_errors is None:
encode_errors = 'fail'
bpc = self.pyenc[0]
if char_lengths and str(char_lengths).strip() in ['1','2','3','4']:
bpc = int(char_lengths)
if char_lengths and str(char_lengths) == 'exact':
CnotB = False
else:
CnotB = bpc == 1
for name in df.columns:
colname = str(name)
if df.dtypes[name].kind in ('O','S','U','V'):
if CnotB: # calc max Chars not Bytes
col_l = df[name].astype(str).map(len).max() * bpc
else:
if encode_errors == 'fail':
try:
col_l = df[name].astype(str).apply(lambda x: len(x.encode(self._io.sascfg.encoding))).max()
except Exception as e:
print("Transcoding error encountered.")
print("DataFrame contains characters that can't be transcoded into the SAS session encoding.\n"+str(e))
return None
else:
col_l = df[name].astype(str).apply(lambda x: len(x.encode(self._io.sascfg.encoding, errors='replace'))).max()
if not col_l > 0:
col_l = 8
ret[colname] = col_l
return ret
def df2sd(self, df: 'pandas.DataFrame', table: str = '_df', libref: str = '',
results: str = '', keep_outer_quotes: bool = False,
embedded_newlines: bool = True,
LF: str = '\x01', CR: str = '\x02',
colsep: str = '\x03', colrep: str = ' ',
datetimes: dict={}, outfmts: dict={}, labels: dict={},
outdsopts: dict={}, encode_errors = None, char_lengths = None,
**kwargs) -> 'SASdata':
"""
This is an alias for 'dataframe2sasdata'. Why type all that?
Also note that dataframe indexes (row label) are not transferred over as columns, as they aren't actualy in df.columns.
You can simpley use df.reset_index() before this method and df.set_index() after to have the index be a column which
is transferred over to the SAS data set. If you want to create a SAS index at the same time, use the outdsopts dict.
:param df: :class:`pandas.DataFrame` Pandas Data Frame to import to a SAS Data Set
:param table: the name of the SAS Data Set to create
:param libref: the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
:param results: format of results, SASsession.results is default, PANDAS, HTML or TEXT are the alternatives
As of version 3.5.0, keep_outer_quotes is deprecated and embedded_newlines defaults to True
:param keep_outer_quotes: the defualt is for SAS to strip outer quotes from delimitted data. This lets you keep them
:param embedded_newlines: if any char columns have embedded CR or LF, set this to True to get them iported into the SAS data set
colrep is new as of version 3.5.0
:param LF: if embedded_newlines=True, the chacter to use for LF when transferring the data; defaults to hex(1)
:param CR: if embedded_newlines=True, the chacter to use for CR when transferring the data; defaults to hex(2)
:param colsep: the column seperator character used for streaming the delimmited data to SAS defaults to hex(3)
:param colrep: the char to convert to for any embedded colsep, LF, CR chars in the data; defaults to ' '
:param datetimes: dict with column names as keys and values of 'date' or 'time' to create SAS date or times instead of datetimes
:param outfmts: dict with column names and SAS formats to assign to the new SAS data set
:param outdsopts: a dictionary containing output data set options for the table being created \
for instance, compress=, encoding=, index=, outrep=, replace=, rename= ... \
the options will be generated simply as key=value, so if a value needs quotes or parentheses, provide them in the value
.. code-block:: python
{'compress' : 'yes' ,
'encoding' : 'latin9' ,
'replace' : 'NO' ,
'index' : 'coli' ,
'rename' : "(col1 = Column_one col2 = 'Column Two'n)"
}
:param encode_errors: 'fail', 'replace' or 'ignore' - default is to 'fail', other choice is to 'replace' \
invalid chars with the replacement char. 'ignore' doesn't try to transcode in python, so you \
get whatever happens in SAS based upon the data you send over. Note 'ignore' is only valid for IOM and HTTP
:param char_lengths: How to determine (and declare) lengths for CHAR variables in the output SAS data set \
SAS declares lenghts in bytes, not characters, so multibyte encodings require more bytes per character (BPC)
- 'exact' - the default if SAS is in a multibyte encoding. calculate the max number of bytes, in SAS encoding, \
required for the longest actual value. This is slowest but most accurate. For big data, this can \
take excessive time. If SAS is running in a single byte encoding then this defaults to '1' (see below), \
but you can override even that by explicitly specifying 'exact' when SAS is a single byte encoding.
- 'safe' - use char len of the longest values in the column, multiplied by max BPC of the SAS multibyte \
encoding. This is much faster, but could declare SAS Char variables longer than absolutely required \
for multibyte SAS encodings. If SAS is running in a single byte encoding then '1' (see below) is used. \
Norte that SAS has no fixed length multibyte encodings, so BPC is always between 1-2 or 1-4 for these. \
ASCII characters hex 00-7F use one btye in all of these, which other characters use more BPC; it's variable
- [1|2|3|4]- this is 'safe' except the number (1 or 2 or 3 or 4) is the multiplier to use (BPC) instead of the \
default BPC of the SAS session encoding. For SAS single byte encodings, the valuse of 1 is the default \
used, since characters can only be 1 byte long so char len == byte len \
For UTF-8 SAS session, 4 is the BPC, so if you know you don't have many actual unicode characters \
you could specify 2 so the SAS column lengths are only twice the length as the longest value, instead \
of 4 times the, which would be much longer than actually needed. Or if you know you have no unicode \
chars (all the char data is actual only 1 byte), you could specify 1 since it only requires 1 BPC.
- dictionary - a dictionary containing the names:lengths of all of the character columns. This eliminates \
runmning the code to calculate the lengths, and goes strainght to transferring the data \
:return: SASdata object
"""
return self.dataframe2sasdata(df, table, libref, results, keep_outer_quotes, embedded_newlines, LF, CR, colsep, colrep,
datetimes, outfmts, labels, outdsopts, encode_errors, char_lengths, **kwargs)
def dataframe2sasdata(self, df: 'pandas.DataFrame', table: str = '_df', libref: str = '',
results: str = '', keep_outer_quotes: bool = False,
embedded_newlines: bool = True,
LF: str = '\x01', CR: str = '\x02',
colsep: str = '\x03', colrep: str = ' ',
datetimes: dict={}, outfmts: dict={}, labels: dict={},
outdsopts: dict={}, encode_errors = None, char_lengths = None, **kwargs) -> 'SASdata':
"""
This method imports a Pandas Data Frame to a SAS Data Set, returning the SASdata object for the new Data Set.
Also note that dataframe indexes (row label) are not transferred over as columns, as they aren't actualy in df.columns.
You can simpley use df.reset_index() before this method and df.set_index() after to have the index be a column which
is transferred over to the SAS data set. If you want to create a SAS index at the same time, use the outdsopts dict.
:param df: Pandas Data Frame to import to a SAS Data Set
:param table: the name of the SAS Data Set to create
:param libref: the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
:param results: format of results, SASsession.results is default, PANDAS, HTML or TEXT are the alternatives
As of version 3.5.0, keep_outer_quotes is deprecated and embedded_newlines defaults to True
:param keep_outer_quotes: the defualt is for SAS to strip outer quotes from delimitted data. This lets you keep them
:param embedded_newlines: if any char columns have embedded CR or LF, set this to True to get them iported into the SAS data set
colrep is new as of version 3.5.0
:param LF: if embedded_newlines=True, the chacter to use for LF when transferring the data; defaults to hex(1)
:param CR: if embedded_newlines=True, the chacter to use for CR when transferring the data; defaults to hex(2)
:param colsep: the column seperator character used for streaming the delimmited data to SAS defaults to hex(3)
:param colrep: the char to convert to for any embedded colsep, LF, CR chars in the data; defaults to ' '
:param datetimes: dict with column names as keys and values of 'date' or 'time' to create SAS date or times instead of datetimes
:param outfmts: dict with column names and SAS formats to assign to the new SAS data set
:param outdsopts: a dictionary containing output data set options for the table being created \
for instance, compress=, encoding=, index=, outrep=, replace=, rename= ... \
the options will be generated simply as key=value, so if a value needs quotes or parentheses, provide them in the value
.. code-block:: python
{'compress' : 'yes' ,
'encoding' : 'latin9' ,
'replace' : 'NO' ,
'index' : 'coli' ,
'rename' : "(col1 = Column_one col2 = 'Column Two'n)"
}
:param encode_errors: 'fail', 'replace' or 'ignore' - default is to 'fail', other choice is to 'replace' \
invalid chars with the replacement char. 'ignore' doesn't try to transcode in python, so you \
get whatever happens in SAS based upon the data you send over. Note 'ignore' is only valid for IOM and HTTP
:param char_lengths: How to determine (and declare) lengths for CHAR variables in the output SAS data set \
SAS declares lenghts in bytes, not characters, so multibyte encodings require more bytes per character (BPC)
- 'exact' - the default if SAS is in a multibyte encoding. calculate the max number of bytes, in SAS encoding, \
required for the longest actual value. This is slowest but most accurate. For big data, this can \
take excessive time. If SAS is running in a single byte encoding then this defaults to '1' (see below), \
but you can override even that by explicitly specifying 'exact' when SAS is a single byte encoding.
- 'safe' - use char len of the longest values in the column, multiplied by max BPC of the SAS multibyte \
encoding. This is much faster, but could declare SAS Char variables longer than absolutely required \
for multibyte SAS encodings. If SAS is running in a single byte encoding then '1' (see below) is used. \
Norte that SAS has no fixed length multibyte encodings, so BPC is always between 1-2 or 1-4 for these. \
ASCII characters hex 00-7F use one btye in all of these, which other characters use more BPC; it's variable
- [1|2|3|4]- this is 'safe' except the number (1 or 2 or 3 or 4) is the multiplier to use (BPC) instead of the \
default BPC of the SAS session encoding. For SAS single byte encodings, the valuse of 1 is the default \
used, since characters can only be 1 byte long so char len == byte len \
For UTF-8 SAS session, 4 is the BPC, so if you know you don't have many actual unicode characters \
you could specify 2 so the SAS column lengths are only twice the length as the longest value, instead \
of 4 times the, which would be much longer than actually needed. Or if you know you have no unicode \
chars (all the char data is actual only 1 byte), you could specify 1 since it only requires 1 BPC.
- dictionary - a dictionary containing the names:lengths of all of the character columns. This eliminates \
runmning the code to calculate the lengths, and goes strainght to transferring the data \
:return: SASdata object
"""
lastlog = len(self._io._log)
if self.sascfg.pandas:
raise type(self.sascfg.pandas)(self.sascfg.pandas.msg)
if libref != '':
if libref.upper() not in self.assigned_librefs():
print("The libref specified is not assigned in this SAS Session.")
return None
# support oringinal implementation of outencoding - should have done it as a ds option to begin with
outencoding = kwargs.pop('outencoding', None)
if outencoding:
outdsopts['encoding'] = outencoding
if results == '':
results = self.results
if self.nosub:
print("too complicated to show the code, read the source :), sorry.")
return None
else:
rc = self._io.dataframe2sasdata(df, table, libref, keep_outer_quotes, embedded_newlines, LF, CR, colsep, colrep,
datetimes, outfmts, labels, outdsopts, encode_errors, char_lengths, **kwargs)
if rc is None:
if self.exist(table, libref):
dsopts = {}
if outencoding:
dsopts['encoding'] = outencoding
sd = SASdata(self, libref, table, results, dsopts)
else:
sd = None
else:
sd = None
self._lastlog = self._io._log[lastlog:]
return sd
def sd2df(self, table: str, libref: str = '', dsopts: dict = None,
method: str = 'MEMORY', **kwargs) -> 'pandas.DataFrame':
"""
This is an alias for 'sasdata2dataframe'. Why type all that?
:param table: the name of the SAS Data Set you want to export to a Pandas Data Frame
:param libref: the libref for the SAS Data Set.
:param dsopts: a dictionary containing any of the following SAS data set options(where, drop, keep, obs, firstobs):
- where is a string
- keep are strings or list of strings.
- drop are strings or list of strings.
- obs is a numbers - either string or int
- first obs is a numbers - either string or int
- format is a string or dictionary { var: format }
- encoding is a string
.. code-block:: python
{'where' : 'msrp < 20000 and make = "Ford"' ,
'keep' : 'msrp enginesize Cylinders Horsepower Weight' ,
'drop' : ['msrp', 'enginesize', 'Cylinders', 'Horsepower', 'Weight'] ,
'obs' : 10 ,
'firstobs' : '12' ,
'format' : {'money': 'dollar10', 'time': 'tod5.'} ,
'encoding' : 'latin9'
}
:param method: defaults to MEMORY;
- MEMORY the original method. Streams the data over and builds the dataframe on the fly in memory
- CSV uses an intermediary Proc Export csv file and pandas read_csv() to import it; faster for large data
- DISK uses the original (MEMORY) method, but persists to disk and uses pandas read to import. \
this has better support than CSV for embedded delimiters (commas), nulls, CR/LF that CSV \
has problems with
For the CSV and DISK methods, the following 2 parameters are also available
:param tempfile: [optional] an OS path for a file to use for the local file; default it a temporary file that's cleaned up
:param tempkeep: if you specify your own file to use with tempfile=, this controls whether it's cleaned up after using it
For the MEMORY and DISK methods, the following 4 parameters are also available, depending upon access method
:param rowsep: the row seperator character to use; defaults to hex(1)
:param colsep: the column seperator character to use; defaults to hex(2)
:param rowrep: the char to convert to for any embedded rowsep chars, defaults to ' '
:param colrep: the char to convert to for any embedded colsep chars, defaults to ' '
:param kwargs: a dictionary. These vary per access method, and are generally NOT needed.
They are either access method specific parms or specific pandas parms.
See the specific sasdata2dataframe* method in the access method for valid possibilities.
:return: Pandas data frame
"""
dsopts = dsopts if dsopts is not None else {}
return self.sasdata2dataframe(table, libref, dsopts, method, **kwargs)
def sd2df_CSV(self, table: str, libref: str = '', dsopts: dict = None, tempfile: str = None,
tempkeep: bool = False, opts: dict = None, **kwargs) -> 'pandas.DataFrame':
"""
This is an alias for 'sasdata2dataframe' specifying method='CSV'. Why type all that?
:param table: the name of the SAS Data Set you want to export to a Pandas Data Frame
:param libref: the libref for the SAS Data Set.
:param dsopts: a dictionary containing any of the following SAS data set options(where, drop, keep, obs, firstobs):
- where is a string
- keep are strings or list of strings.
- drop are strings or list of strings.
- obs is a numbers - either string or int
- first obs is a numbers - either string or int
- format is a string or dictionary { var: format }
- encoding is a string
.. code-block:: python
{'where' : 'msrp < 20000 and make = "Ford"' ,
'keep' : 'msrp enginesize Cylinders Horsepower Weight' ,
'drop' : ['msrp', 'enginesize', 'Cylinders', 'Horsepower', 'Weight'] ,
'obs' : 10 ,
'firstobs' : '12' ,
'format' : {'money': 'dollar10', 'time': 'tod5.'} ,
'encoding' : 'latin9'
}
:param tempfile: [optional] an OS path for a file to use for the local CSV file; default it a temporary file that's cleaned up
:param tempkeep: if you specify your own file to use with tempfile=, this controls whether it's cleaned up after using it
:param opts: a dictionary containing any of the following Proc Export options(delimiter, putnames)
- delimiter is a single character
- putnames is a bool [True | False]
.. code-block:: python
{'delimiter' : '~',
'putnames' : True
}
:param kwargs: a dictionary. These vary per access method, and are generally NOT needed.
They are either access method specific parms or specific pandas parms.
See the specific sasdata2dataframe* method in the access method for valid possibilities.
:return: Pandas data frame
"""
dsopts = dsopts if dsopts is not None else {}
opts = opts if opts is not None else {}
return self.sasdata2dataframe(table, libref, dsopts, method='CSV', tempfile=tempfile, tempkeep=tempkeep,
opts=opts, **kwargs)
def sd2df_DISK(self, table: str, libref: str = '', dsopts: dict = None, tempfile: str = None,
tempkeep: bool = False, rowsep: str = '\x01', colsep: str = '\x02',
rowrep: str = ' ', colrep: str = ' ', **kwargs) -> 'pandas.DataFrame':
"""
This is an alias for 'sasdata2dataframe' specifying method='DISK'. Why type all that?
:param table: the name of the SAS Data Set you want to export to a Pandas Data Frame
:param libref: the libref for the SAS Data Set.
:param dsopts: a dictionary containing any of the following SAS data set options(where, drop, keep, obs, firstobs):
- where is a string
- keep are strings or list of strings.
- drop are strings or list of strings.
- obs is a numbers - either string or int
- first obs is a numbers - either string or int
- format is a string or dictionary { var: format }
- encoding is a string
.. code-block:: python
{'where' : 'msrp < 20000 and make = "Ford"' ,
'keep' : 'msrp enginesize Cylinders Horsepower Weight' ,
'drop' : ['msrp', 'enginesize', 'Cylinders', 'Horsepower', 'Weight'] ,
'obs' : 10 ,
'firstobs' : '12' ,
'format' : {'money': 'dollar10', 'time': 'tod5.'} ,
'encoding' : 'latin9'
}
:param tempfile: [optional] an OS path for a file to use for the local file; default it a temporary file that's cleaned up
:param tempkeep: if you specify your own file to use with tempfile=, this controls whether it's cleaned up after using it
:param rowsep: the row seperator character to use; defaults to hex(1)
:param colsep: the column seperator character to use; defaults to hex(2)
:param rowrep: the char to convert to for any embedded rowsep chars, defaults to ' '
:param colrep: the char to convert to for any embedded colsep chars, defaults to ' '
:param kwargs: a dictionary. These vary per access method, and are generally NOT needed.
They are either access method specific parms or specific pandas parms.
See the specific sasdata2dataframe* method in the access method for valid possibilities.
:return: Pandas data frame
"""
dsopts = dsopts if dsopts is not None else {}
return self.sasdata2dataframe(table, libref, dsopts, method='DISK', tempfile=tempfile, tempkeep=tempkeep,
rowsep=rowsep, colsep=colsep, rowrep=rowrep, colrep=colrep, **kwargs)
def sasdata2dataframe(self, table: str, libref: str = '', dsopts: dict = None,
method: str = 'MEMORY', **kwargs) -> 'pandas.DataFrame':
"""
This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object.
:param table: the name of the SAS Data Set you want to export to a Pandas Data Frame
:param libref: the libref for the SAS Data Set.
:param dsopts: a dictionary containing any of the following SAS data set options(where, drop, keep, obs, firstobs):
- where is a string
- keep are strings or list of strings.
- drop are strings or list of strings.
- obs is a numbers - either string or int
- first obs is a numbers - either string or int
- format is a string or dictionary { var: format }
- encoding is a string
.. code-block:: python
{'where' : 'msrp < 20000 and make = "Ford"' ,
'keep' : 'msrp enginesize Cylinders Horsepower Weight' ,
'drop' : ['msrp', 'enginesize', 'Cylinders', 'Horsepower', 'Weight'] ,
'obs' : 10 ,
'firstobs' : '12' ,
'format' : {'money': 'dollar10', 'time': 'tod5.'} ,
'encoding' : 'latin9'
}
:param method: defaults to MEMORY:
- MEMORY the original method. Streams the data over and builds the dataframe on the fly in memory
- CSV uses an intermediary Proc Export csv file and pandas read_csv() to import it; faster for large data
- DISK uses the original (MEMORY) method, but persists to disk and uses pandas read to import. \
this has better support than CSV for embedded delimiters (commas), nulls, CR/LF that CSV \
has problems with
For the CSV and DISK methods, the following 2 parameters are also available
:param tempfile: [optional] an OS path for a file to use for the local file; default it a temporary file that's cleaned up
:param tempkeep: if you specify your own file to use with tempfile=, this controls whether it's cleaned up after using it
For the MEMORY and DISK methods, the following 4 parameters are also available, depending upon access method
:param rowsep: the row seperator character to use; defaults to hex(1)
:param colsep: the column seperator character to use; defaults to hex(2)
:param rowrep: the char to convert to for any embedded rowsep chars, defaults to ' '
:param colrep: the char to convert to for any embedded colsep chars, defaults to ' '
:param kwargs: a dictionary. These vary per access method, and are generally NOT needed.
They are either access method specific parms or specific pandas parms.
See the specific sasdata2dataframe* method in the access method for valid possibilities.
:return: Pandas data frame
"""
lastlog = len(self._io._log)
if self.sascfg.pandas:
raise type(self.sascfg.pandas)(self.sascfg.pandas.msg)
if method.lower() not in ['memory', 'csv', 'disk']:
print("The specified method is not valid. Supported methods are MEMORY, CSV and DISK")
return None
dsopts = dsopts if dsopts is not None else {}
if self.exist(table, libref) == 0:
print('The SAS Data Set ' + libref + '.' + table + ' does not exist')
return None
if self.nosub:
print("too complicated to show the code, read the source :), sorry.")
df = None
else:
df = self._io.sasdata2dataframe(table, libref, dsopts, method=method, **kwargs)
self._lastlog = self._io._log[lastlog:]
return df
def _dsopts(self, dsopts):
"""
:param dsopts: a dictionary containing any of the following SAS data set options(where, drop, keep, obs, firstobs):
- where is a string or list of strings
- keep are strings or list of strings.
- drop are strings or list of strings.
- obs is a numbers - either string or int
- first obs is a numbers - either string or int
- format is a string or dictionary { var: format }
- encoding is a string
.. code-block:: python
{'where' : 'msrp < 20000 and make = "Ford"' ,
'keep' : 'msrp enginesize Cylinders Horsepower Weight' ,
'drop' : ['msrp', 'enginesize', 'Cylinders', 'Horsepower', 'Weight'] ,
'obs' : 10 ,
'firstobs' : '12' ,
'format' : {'money': 'dollar10', 'time': 'tod5.'} ,
'encoding' : 'latin9'
}
:return: str
"""
opts = ''
fmat = ''
if len(dsopts):
for key in dsopts:
if len(str(dsopts[key])):
if key == 'where':
if isinstance(dsopts[key], str):
opts += 'where=(' + dsopts[key] + ') '
elif isinstance(dsopts[key], list):
opts += 'where=(' + " and ".join(dsopts[key]) + ') '
else:
raise TypeError("Bad key type. {} must be a str or list type".format(key))
elif key == 'drop':
opts += 'drop='
if isinstance(dsopts[key], list):
for var in dsopts[key]:
opts += var + ' '
else:
opts += dsopts[key] + ' '
elif key == 'keep':
opts += 'keep='
if isinstance(dsopts[key], list):
for var in dsopts[key]:
opts += var + ' '
else:
opts += dsopts[key] + ' '
elif key == 'obs':
opts += 'obs=' + str(dsopts[key]) + ' '
elif key == 'firstobs':
opts += 'firstobs=' + str(dsopts[key]) + ' '
elif key == 'encoding':
opts += 'encoding="' + str(dsopts[key]) + '" '
elif key == 'format':
if isinstance(dsopts[key], str):
fmat = 'format ' + dsopts[key] + ';'
elif isinstance(dsopts[key], dict):
fmat = 'format '
for k, v in dsopts[key].items():
fmat += ' '.join((k, v)) + ' '
fmat += ';'
else:
raise TypeError("Bad key type. {} must be a str or dict type".format(key))
else:
opts += key+'='+str(dsopts[key]) + ' '
if len(opts):
opts = '(' + opts + ')'
if len(fmat) > 0:
opts += ';\n\t' + fmat
elif len(fmat) > 0:
opts = ';\n\t' + fmat
return opts
def _impopts(self, opts):
"""
:param opts: a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows):
- datarow is a number
- delimiter is a character
- getnames is a boolean
- guessingrows is a numbers or the string 'MAX'
.. code-block:: python
{'datarow' : 2
'delimiter' : ',''
'getnames' : True
'guessingrows': 20
}
:return: str
"""
optstr = ''
if len(opts):
for key in opts:
if len(str(opts[key])):
if key == 'datarow':
optstr += 'datarow=' + str(opts[key]) + ';'
elif key == 'delimiter':
optstr += 'delimiter='
optstr += "'" + '%02x' % ord(opts[key].encode(self._io.sascfg.encoding)) + "'x; "
elif key == 'getnames':
optstr += 'getnames='
if opts[key]:
optstr += 'YES; '
else:
optstr += 'NO; '
elif key == 'guessingrows':
optstr += 'guessingrows='
if opts[key] == 'MAX':
optstr += 'MAX; '
else:
optstr += str(opts[key]) + '; '
return optstr
def _expopts(self, opts):
"""
:param opts: a dictionary containing any of the following Proc Export options(delimiter, putnames):
- delimiter is a character
- putnames is a boolean
.. code-block:: python
{'delimiter' : ',''
'putnames' : True
}
:return: str
"""
optstr = ''
if len(opts):
for key in opts:
if len(str(opts[key])):
if key == 'delimiter':
optstr += 'delimiter='
optstr += "'" + '%02x' % ord(opts[key].encode(self._io.sascfg.encoding)) + "'x; "
elif key == 'putnames':
optstr += 'putnames='
if opts[key]:
optstr += 'YES; '
else:
optstr += 'NO; '
return optstr
def symput(self, name: str, value, quoting='NRBQUOTE'):
"""
:param name: name of the macro varable to set
:param value: python variable, that can be resolved to a string, to use for the value to assign to the macro variable
:param quoting: None for 'asis' macro definition. Or any of the special SAS quoting function like \
BQUOTE, NRBQUOTE, QUOTE, NRQUOTE, STR, NRSTR, SUPERQ, ... default is NRBQUOTE
"""
if quoting:
ll = self._io.submit("%let " + name + "=%" + quoting.upper() + "(" + str(value) + ");\n", results='text')
else:
ll = self._io.submit("%let " + name + "=" + str(value) + ";\n", results='text')
def symget(self, name: str, outtype=None):
"""
:param name: [required] name of the macro varable to get
:param outtype: [optional] desired output type of the python variable; valid types are [int, float, str] \
provide an object of the type [1, 1.0, ' '] or a string of 'int', 'float' or 'str'
"""
ll = self._io.submit("%put " + name + "BEGIN=&" + name + " "+ name+"END=;\n", results='text')
l2 = ll['LOG'].rpartition(name + "BEGIN=")[2].rpartition(name+"END=")[0].strip().replace('\n','')
if outtype is not None:
if outtype == 'int':
outtype = 1
elif outtype == 'float':
outtype = 1.0
if outtype is not None and type(outtype) not in [int, float, str]:
print("invalid type specified. supported are [int, float, str], will return default type")
outtype=None
if outtype is not None:
if type(outtype) == int:
var = int(l2)
elif type(outtype) == float:
var = float(l2)
elif type(outtype) == str:
var = l2
else:
try:
var = int(l2)
except:
try:
var = float(l2)
except:
var = l2
return var
def symexist(self, name: str):
"""
:param name: [required] name of the macro varable to check for existence
:return: bool
"""
ll = self._io.submit("%put " + name + "BEGIN=%symexist(" + name + ") "+ name+"END=;\n")
l2 = ll['LOG'].rpartition(name + "BEGIN=")[2].rpartition(name+"END=")[0].strip().replace('\n','')
var = int(l2)
return bool(var)
def disconnect(self):
"""
This method disconnects an IOM session to allow for reconnecting when switching networks
See the Advanced topics section of the doc for details
"""
if self.sascfg.mode != 'IOM':
res = "This method is only available with the IOM access method"
else:
res = self._io.disconnect()
return res
def SYSINFO(self):
"""
This method returns the SAS Automatic Macro Variable SYSINFO which
contains return codes provided by some SAS procedures.
"""
return self.symget("SYSINFO")
def SYSERR(self):
"""
This method returns the SAS Automatic Macro Variable SYSERR which
contains a return code status set by some SAS procedures and the DATA step.
"""
return self.symget("SYSERR")
def SYSERRORTEXT(self):
"""
This method returns the SAS Automatic Macro Variable SYSERRORTEXT which
is the text of the last error message generated in the SAS log.
"""
return self.symget("SYSERRORTEXT")
def SYSWARNINGTEXT(self):
"""
This method returns the SAS Automatic Macro Variable SYSWARNINGTEXT which
is the text of the last warning message generated in the SAS log.
"""
return self.symget("SYSWARNINGTEXT")
def SYSFILRC(self):
"""
This method returns the SAS Automatic Macro Variable SYSFILRC which
identifies whether or not the last FILENAME statement executed correctly.
"""
return self.symget("SYSFILRC")
def SYSLIBRC(self):
"""
This method returns the SAS Automatic Macro Variable SYSLIBRC which
reports whether the last LIBNAME statement executed correctly.
"""
return self.symget("SYSLIBRC")
def assigned_librefs(self) -> list:
"""
This method returns the list of currently assigned librefs
"""
code = """
data _null_; retain libref; retain cobs 1;
set sashelp.vlibnam end=last;
if cobs EQ 1 then
put "LIBREFSSTART=";
cobs = 2;
if libref NE libname then
put %upcase("lib=") libname %upcase('libEND=');
libref = libname;
if last then
put "LIBREFSEND=";
run;
"""
if self.nosub:
print(code)
return None
else:
ll = self._io.submit(code, results='text')
librefs = []
log = ll['LOG'].rpartition('LIBREFSEND=')[0].rpartition('LIBREFSSTART=')
for i in range(log[2].count('LIB=')):
log = log[2].partition('LIB=')[2].partition(' LIBEND=')
librefs.append(log[0])
return librefs
def dirlist(self, path) -> dict:
"""
This method returns the directory list for the path specified where SAS is running
"""
code = """
data _null_;
spd = '""" + path + """';
rc = filename('saspydir', spd);
did = dopen('saspydir');
if did > 0 then
do;
memcount = dnum(did);
put 'MEMCOUNT=' memcount 'MEMCOUNTEND=';
do while (memcount > 0);
name = dread(did, memcount);
memcount = memcount - 1;
qname = spd || '"""+self.hostsep+"""' || name;
rc = filename('saspydq', qname);
dq = dopen('saspydq');
if dq NE 0 then
do;
dname = strip(name) || '"""+self.hostsep+"""';
put %upcase('DIR_file=') dname %upcase('fileEND=');
rc = dclose(dq);
end;
else
put %upcase('file=') name %upcase('fileEND=');
end;
put 'MEMEND=';
rc = dclose(did);
end;
else
do;
put 'MEMCOUNT=0 MEMCOUNTEND=';
put 'MEMEND=';
end;
rc = filename('saspydq');
rc = filename('saspydir');
run;
"""
if self.nosub:
print(code)
return None
else:
ll = self._io.submit(code, results='text')
dirlist = []
l2 = ll['LOG'].rpartition("MEMCOUNT=")[2].partition(" MEMCOUNTEND=")
memcount = int(l2[0])
dirlist = []
log = ll['LOG'].rpartition('MEMEND=')[0].rpartition('MEMCOUNTEND=')
for i in range(log[2].count('FILE=')):
log = log[2].partition('FILE=')[2].partition(' FILEEND=')
dirlist.append(log[0])
if memcount != len(dirlist):
print("Some problem parsing list. Should be " + str(memcount) + " entries but got " + str(
len(dirlist)) + " instead.")
return dirlist
def list_tables(self, libref: str='work', results: str = 'list') -> list:
"""
This method returns a list of tuples containing MEMNAME, MEMTYPE of members in the library of memtype data or view
If you would like a Pandas dataframe returned instead of a list, specify results='pandas'
"""
lastlog = len(self._io._log)
if not self.nosub:
ll = self._io.submit("%put LIBREF_EXISTS=%sysfunc(libref("+libref+")) LIB_EXT_END=;")
exists = int(ll['LOG'].rpartition('LIBREF_EXISTS=')[2].rpartition('LIB_EXT_END=')[0])
if exists != 0:
print('Libref provided is not assigned')
return None
code = """
proc datasets dd=librefx nodetails nolist noprint;
contents memtype=(data view) nodetails
dir out=work._saspy_lib_list(keep=memname memtype) data=_all_ noprint;
run;
proc sql;
create table work._saspy_lib_list as select distinct * from work._saspy_lib_list;
quit;
""".replace('librefx', libref)
if self.nosub:
print(code)
return None
else:
ll = self._io.submit(code, results='text')
if results != 'list':
res = self.sd2df('_saspy_lib_list', 'work')
ll = self._io.submit("proc delete data=work._saspy_lib_list;run;", results='text')
self._lastlog = self._io._log[lastlog:]
return res
code = """
data _null_;
set work._saspy_lib_list end=last curobs=first;
if first EQ 1 then
put 'MEMSTART=';
put %upcase('memNAME=') memname %upcase('memNAMEEND=');
put %upcase('memTYPE=') memtype %upcase('memTYPEEND=');
if last then
put 'MEMEND=';
run;
proc delete data=work._saspy_lib_list;run;
"""
ll = self._io.submit(code, results='text')
log = ll['LOG'].rpartition('MEMEND=')[0].rpartition('MEMSTART=')
tablist = []
for i in range(log[2].count('MEMNAME=')):
log = log[2].partition('MEMNAME=')[2].partition(' MEMNAMEEND=')
key = log[0]
log = log[2].partition('MEMTYPE=')[2].partition(' MEMTYPEEND=')
val = log[0]
tablist.append(tuple((key, val)))
self._lastlog = self._io._log[lastlog:]
return tablist
def file_info(self, filepath, results: str = 'dict', fileref: str = '_spfinfo', quiet: bool = False) -> dict:
"""
This method returns a dictionary containing the file attributes for the file name provided
If you would like a Pandas dataframe returned instead of a dictionary, specify results='pandas'
"""
lastlog = len(self._io._log)
if not self.nosub:
code = "filename "+fileref+" '"+filepath+"';\n"
code += "%put FILEREF_EXISTS=%sysfunc(fexist("+fileref+")) FILE_EXTEND=;"
ll = self._io.submit(code)
exists = int(ll['LOG'].rpartition('FILEREF_EXISTS=')[2].rpartition(' FILE_EXTEND=')[0])
if exists != 1:
if not quiet:
print('The filepath provided does not exist')
ll = self._io.submit("filename "+fileref+" clear;")
return None
if results != 'dict':
code="""
proc delete data=work._SASPY_FILE_INFO;run;
data work._SASPY_FILE_INFO;
length infoname $256 infoval $4096;
drop rc fid infonum i close;
fid=fopen('filerefx');
if fid then
do;
infonum=foptnum(fid);
do i=1 to infonum;
infoname=foptname(fid, i);
infoval=finfo(fid, infoname);
output;
end;
end;
close=fclose(fid);
rc = filename('filerefx');
run;
""".replace('filerefx', fileref)
if self.nosub:
print(code)
return None
else:
ll = self._io.submit(code, results='text')
res = self.sd2df('_SASPY_FILE_INFO', 'work')
ll = self._io.submit("proc delete data=work._SASPY_FILE_INFO;run;", results='text')
self._lastlog = self._io._log[lastlog:]
return res
code="""options nosource;
data _null_;
length infoname $256 infoval $4096;
"""
if self.sascfg.mode in ['STDIO', 'SSH', '']:
code +=" file STDERR; "
code +="""
drop rc fid infonum i close;
put 'INFOSTART=';
fid=fopen('filerefx');
if fid then
do;
infonum=foptnum(fid);
do i=1 to infonum;
infoname=foptname(fid, i);
infoval=finfo(fid, infoname);
put %upcase('infoNAME=') infoname %upcase('infoNAMEEND=');
put %upcase('infoVAL=') infoval %upcase('infoVALEND=');
end;
end;
put 'INFOEND=';
close=fclose(fid);
rc = filename('filerefx');
run; options source;
""".replace('filerefx', fileref)
if self.nosub:
print(code)
return None
else:
ll = self._io.submit(code, results='text')
vi = len(ll['LOG'].rpartition('INFOEND=')[0].rpartition('\n')[2])
res = {}
log = ll['LOG'].rpartition('INFOEND=')[0].rpartition('INFOSTART=')
if vi > 0:
for i in range(log[2].count('INFONAME=')):
log = log[2].partition('INFONAME=')[2].partition(' INFONAMEEND=')
key = log[0]
log = log[2].partition('INFOVAL=')[2].partition('INFOVALEND=')
vx = log[0].split('\n')
val = vx[0]
for x in vx[1:]:
val += x[vi:]
res[key] = val.strip()
else:
for i in range(log[2].count('INFONAME=')):
log = log[2].partition('INFONAME=')[2].partition(' INFONAMEEND=')
key = log[0]
log = log[2].partition('INFOVAL=')[2].partition('INFOVALEND=')
val = log[0].replace('\n', '').strip()
res[key] = val
self._lastlog = self._io._log[lastlog:]
return res
def file_delete(self, filepath, fileref: str = '_spfinfo', quiet: bool = False) -> dict:
"""
This method deletes an external file or directory on the SAS server side
:param filepath: path to the remote file to delete
:param fileref: fileref to use on the generated filename stmt
:param quiet: print any messages or not
:return: dict with 2 keys {'Success' : bool, 'LOG' : str}
"""
lastlog = len(self._io._log)
code = "data _null_;\n rc=filename('"+fileref+"', '"+filepath+"');\n"
code += " if rc = 0 and fexist('"+fileref+"') then do;\n"
code += " rc = fdelete('"+fileref+"');\n"
code += " put 'FILEREF_EXISTS= ' rc 'FILE_EXTEND=';\n"
code += " end; else do;\n"
code += " put 'FILEREF_EXISTS= -1 FILE_EXTEND=';\n"
code += " end; run;\n"
if self.nosub:
print(code)
return None
else:
ll = self._io.submit(code, results='text')
exists = int(ll['LOG'].rpartition('FILEREF_EXISTS=')[2].rpartition(' FILE_EXTEND=')[0])
if exists != 0:
if not quiet:
print('The filepath provided does not exist')
self._lastlog = self._io._log[lastlog:]
return {'Success' : not bool(exists), 'LOG' : ll['LOG']}
def file_copy(self, source_path, dest_path, fileref: str = '_spfinf', quiet: bool = False) -> dict:
"""
This method copies one external file to another on the SAS server side
:param source_path: path to the remote source file to copy
:param dest_path: path for the remote file write to
:param fileref: fileref (first 7 chars of one) to use on the two generated filename stmts
:param quiet: print any messages or not
:return: dict with 2 keys {'Success' : bool, 'LOG' : str}
"""
lastlog = len(self._io._log)
code = "filename {} '{}' recfm=n;\n".format(fileref[:7]+'s', source_path)
code += "filename {} '{}' recfm=n;\n".format(fileref[:7]+'d', dest_path)
code += "data _null_;\n"
code += " rc = fcopy('{}', '{}');\n".format(fileref[:7]+'s',fileref[:7]+'d')
code += " put 'FILEREF_EXISTS= ' rc 'FILE_EXTEND=';\n"
code += "run;\n"
code += "filename {} clear;\n".format(fileref[:7]+'s')
code += "filename {} clear;\n".format(fileref[:7]+'d')
if self.nosub:
print(code)
return None
else:
ll = self._io.submit(code, results='text')
exists = int(ll['LOG'].rpartition('FILEREF_EXISTS=')[2].rpartition(' FILE_EXTEND=')[0])
if exists != 0:
if not quiet:
print('Non Zero return code. Check the SASLOG for messages')
self._lastlog = self._io._log[lastlog:]
return {'Success' : not bool(exists), 'LOG' : ll['LOG']}
def cat(self, path) -> str:
"""
Like Linux 'cat' - open and print the contents of a file
"""
fd = open(path, 'r')
dat = fd.read()
fd.close()
print(dat)
def sil(self, life=None, rate=None, amount=None, payment=None, out: object = None, out_summary: object = None):
"""
Alias for simple_interest_loan
"""
return self.simple_interest_loan(life, rate, amount, payment, out, out_summary)
def simple_interest_loan(self, life=None, rate=None, amount=None, payment=None, out: object = None, out_summary: object = None):
"""
Calculate the amortization schedule of a simple interest load given 3 of the 4 variables
You must specify 3 of the for variables, to solve for the 4th.
:param life: length of loan in months
:param rate: interest rate as a decimal percent: .03 is 3% apr
:param amount: amount of loan
:param payment: monthly payment amount
:return: SAS Lst showing the amortization schule calculated for the missing variable
"""
vars = 0
code = "proc mortgage"
if life is not None:
code += " life="+str(life)
vars += 1
if rate is not None:
code += " rate="+str(rate)
vars += 1
if amount is not None:
code += " amount="+str(amount)
vars += 1
if payment is not None:
code += " payment="+str(payment)
vars += 1
if out is not None:
code += " out="+out.libref + ".'" + out.table +"'n " + out._dsopts()
if out_summary is not None:
code += " outsum="+out_summary.libref + ".'" + out_summary.table +"'n " + out_summary._dsopts()
code += "; run;"
if vars != 3:
print("Must suply 3 of the 4 variables. Only "+str(vars)+" variables provided.")
return None
if self.nosub:
print(code)
else:
if self.results.lower() == 'html':
ll = self._io.submit(code, "html")
if not self.batch:
self._render_html_or_log(ll)
else:
return ll
else:
ll = self._io.submit(code, "text")
if self.batch:
return ll
else:
print(ll['LST'])
def validvarname(self, df: 'pandas.DataFrame', version: str = "v7" ) -> 'pandas.DataFrame':
"""
Creates a copy of a Data Frame with SAS compatible column names. The version= parameter allows
you to choose the compatability setting to use.
:param df: a Pandas Data Frame whose column names you wish to make SAS compatible.
:param version: select the validvarname version using SAS convention.
- V7: ensures the following conditions are met:
- up to 32 mixed case alphanumeric characters are allowed.
- names must begin with alphabetic characters or an underscore.
- non SAS characters are mapped to underscores.
- any column name that is not unique when normalized is made unique by appending a counter (0,1,2,...) to the name.
- V6: like V7, but column names truncated to 8 characters.
- upcase: like V7, but columns names will be uppercase.
- any: any characters are valid, but column names truncated to 32 characters.
:return: a Pandas DataFrame whose column names are SAS compatible according to the selected version.
"""
if version.lower() not in ['v6', 'v7', 'upcase', 'any']:
print("The specified version is not valid. Using the default: 'V7'")
version = 'v7'
max_length = 8 if version.lower() == 'v6' else 32
names = {}
# normalize variable names
for col_name in df.columns:
new_name = col_name[:max_length]
if version.lower() != 'any':
new_name = re.sub(r'[^\d\w]+', r'_' , new_name)
new_name = re.sub(r'^(\d+)', r'_\1', new_name)
if version.lower() == 'upcase':
new_name = new_name.upper()
names[col_name] = new_name
# serialize duplicates in normalized variable names
for col_name in df.columns:
duplicate_keys = [key for key in names.keys()
if names[key].upper() == names[col_name].upper() ]
duplicate_count = len(duplicate_keys)-1
if duplicate_count>0:
count = 0
padding = len(str(duplicate_count))
for val in df.columns:
if val in duplicate_keys:
names[val] = "{}{}".format(names[val][:max_length-padding], count)
count += 1
return df.rename(columns=names)
if __name__ == "__main__":
startsas()
submit(sys.argv[1], "text")
print(_getlog())
print(_getlsttxt())
endsas()
sas_date_fmts = (
'AFRDFDD', 'AFRDFDE', 'AFRDFDE', 'AFRDFDN', 'AFRDFDWN', 'AFRDFMN', 'AFRDFMY', 'AFRDFMY', 'AFRDFWDX', 'AFRDFWKX',
'ANYDTDTE', 'B8601DA', 'B8601DA', 'B8601DJ', 'CATDFDD', 'CATDFDE', 'CATDFDE', 'CATDFDN', 'CATDFDWN', 'CATDFMN',
'CATDFMY', 'CATDFMY', 'CATDFWDX', 'CATDFWKX', 'CRODFDD', 'CRODFDE', 'CRODFDE', 'CRODFDN', 'CRODFDWN', 'CRODFMN',
'CRODFMY', 'CRODFMY', 'CRODFWDX', 'CRODFWKX', 'CSYDFDD', 'CSYDFDE', 'CSYDFDE', 'CSYDFDN', 'CSYDFDWN', 'CSYDFMN',
'CSYDFMY', 'CSYDFMY', 'CSYDFWDX', 'CSYDFWKX', 'DANDFDD', 'DANDFDE', 'DANDFDE', 'DANDFDN', 'DANDFDWN', 'DANDFMN',
'DANDFMY', 'DANDFMY', 'DANDFWDX', 'DANDFWKX', 'DATE', 'DATE', 'DAY', 'DDMMYY', 'DDMMYY', 'DDMMYYB', 'DDMMYYC',
'DDMMYYD', 'DDMMYYN', 'DDMMYYP', 'DDMMYYS', 'DESDFDD', 'DESDFDE', 'DESDFDE', 'DESDFDN', 'DESDFDWN', 'DESDFMN',
'DESDFMY', 'DESDFMY', 'DESDFWDX', 'DESDFWKX', 'DEUDFDD', 'DEUDFDE', 'DEUDFDE', 'DEUDFDN', 'DEUDFDWN', 'DEUDFMN',
'DEUDFMY', 'DEUDFMY', 'DEUDFWDX', 'DEUDFWKX', 'DOWNAME', 'E8601DA', 'E8601DA', 'ENGDFDD', 'ENGDFDE', 'ENGDFDE',
'ENGDFDN', 'ENGDFDWN', 'ENGDFMN', 'ENGDFMY', 'ENGDFMY', 'ENGDFWDX', 'ENGDFWKX', 'ESPDFDD', 'ESPDFDE', 'ESPDFDE',
'ESPDFDN', 'ESPDFDWN', 'ESPDFMN', 'ESPDFMY', 'ESPDFMY', 'ESPDFWDX', 'ESPDFWKX', 'EURDFDD', 'EURDFDE', 'EURDFDE',
'EURDFDN', 'EURDFDWN', 'EURDFMN', 'EURDFMY', 'EURDFMY', 'EURDFWDX', 'EURDFWKX', 'FINDFDD', 'FINDFDE', 'FINDFDE',
'FINDFDN', 'FINDFDWN', 'FINDFMN', 'FINDFMY', 'FINDFMY', 'FINDFWDX', 'FINDFWKX', 'FRADFDD', 'FRADFDE', 'FRADFDE',
'FRADFDN', 'FRADFDWN', 'FRADFMN', 'FRADFMY', 'FRADFMY', 'FRADFWDX', 'FRADFWKX', 'FRSDFDD', 'FRSDFDE', 'FRSDFDE',
'FRSDFDN', 'FRSDFDWN', 'FRSDFMN', 'FRSDFMY', 'FRSDFMY', 'FRSDFWDX', 'FRSDFWKX', 'HUNDFDD', 'HUNDFDE', 'HUNDFDE',
'HUNDFDN', 'HUNDFDWN', 'HUNDFMN', 'HUNDFMY', 'HUNDFMY', 'HUNDFWDX', 'HUNDFWKX', 'IS8601DA', 'IS8601DA', 'ITADFDD',
'ITADFDE', 'ITADFDE', 'ITADFDN', 'ITADFDWN', 'ITADFMN', 'ITADFMY', 'ITADFMY', 'ITADFWDX', 'ITADFWKX', 'JDATEMD',
'JDATEMDW', 'JDATEMNW', 'JDATEMON', 'JDATEQRW', 'JDATEQTR', 'JDATESEM', 'JDATESMW', 'JDATEWK', 'JDATEYDW', 'JDATEYM',
'JDATEYMD', 'JDATEYMD', 'JDATEYMW', 'JNENGO', 'JNENGO', 'JNENGOW', 'JULDATE', 'JULDAY', 'JULIAN', 'JULIAN', 'MACDFDD',
'MACDFDE', 'MACDFDE', 'MACDFDN', 'MACDFDWN', 'MACDFMN', 'MACDFMY', 'MACDFMY', 'MACDFWDX', 'MACDFWKX', 'MINGUO',
'MINGUO', 'MMDDYY', 'MMDDYY', 'MMDDYYB', 'MMDDYYC', 'MMDDYYD', 'MMDDYYN', 'MMDDYYP', 'MMDDYYS', 'MMYY', 'MMYYC',
'MMYYD', 'MMYYN', 'MMYYP', 'MMYYS', 'MONNAME', 'MONTH', 'MONYY', 'MONYY', 'ND8601DA', 'NENGO', 'NENGO', 'NLDATE',
'NLDATE', 'NLDATEL', 'NLDATEM', 'NLDATEMD', 'NLDATEMDL', 'NLDATEMDM', 'NLDATEMDS', 'NLDATEMN', 'NLDATES', 'NLDATEW',
'NLDATEW', 'NLDATEWN', 'NLDATEYM', 'NLDATEYML', 'NLDATEYMM', 'NLDATEYMS', 'NLDATEYQ', 'NLDATEYQL', 'NLDATEYQM',
'NLDATEYQS', 'NLDATEYR', 'NLDATEYW', 'NLDDFDD', 'NLDDFDE', 'NLDDFDE', 'NLDDFDN', 'NLDDFDWN', 'NLDDFMN', 'NLDDFMY',
'NLDDFMY', 'NLDDFWDX', 'NLDDFWKX', 'NORDFDD', 'NORDFDE', 'NORDFDE', 'NORDFDN', 'NORDFDWN', 'NORDFMN', 'NORDFMY',
'NORDFMY', 'NORDFWDX', 'NORDFWKX', 'POLDFDD', 'POLDFDE', 'POLDFDE', 'POLDFDN', 'POLDFDWN', 'POLDFMN', 'POLDFMY',
'POLDFMY', 'POLDFWDX', 'POLDFWKX', 'PTGDFDD', 'PTGDFDE', 'PTGDFDE', 'PTGDFDN', 'PTGDFDWN', 'PTGDFMN', 'PTGDFMY',
'PTGDFMY', 'PTGDFWDX', 'PTGDFWKX', 'QTR', 'QTRR', 'RUSDFDD', 'RUSDFDE', 'RUSDFDE', 'RUSDFDN', 'RUSDFDWN', 'RUSDFMN',
'RUSDFMY', 'RUSDFMY', 'RUSDFWDX', 'RUSDFWKX', 'SLODFDD', 'SLODFDE', 'SLODFDE', 'SLODFDN', 'SLODFDWN', 'SLODFMN',
'SLODFMY', 'SLODFMY', 'SLODFWDX', 'SLODFWKX', 'SVEDFDD', 'SVEDFDE', 'SVEDFDE', 'SVEDFDN', 'SVEDFDWN', 'SVEDFMN',
'SVEDFMY', 'SVEDFMY', 'SVEDFWDX', 'SVEDFWKX', 'WEEKDATE', 'WEEKDATX', 'WEEKDAY', 'WEEKU', 'WEEKU', 'WEEKV', 'WEEKV',
'WEEKW', 'WEEKW', 'WORDDATE', 'WORDDATX', 'XYYMMDD', 'XYYMMDD', 'YEAR', 'YYMM', 'YYMMC', 'YYMMD', 'YYMMDD', 'YYMMDD',
'YYMMDDB', 'YYMMDDC', 'YYMMDDD', 'YYMMDDN', 'YYMMDDP', 'YYMMDDS', 'YYMMN', 'YYMMN', 'YYMMP', 'YYMMS', 'YYMON', 'YYQ',
'YYQ', 'YYQC', 'YYQD', 'YYQN', 'YYQP', 'YYQR', 'YYQRC', 'YYQRD', 'YYQRN', 'YYQRP', 'YYQRS', 'YYQS', 'YYQZ', 'YYQZ',
'YYWEEKU', 'YYWEEKV', 'YYWEEKW',
)
sas_time_fmts = (
'ANYDTTME', 'B8601LZ', 'B8601LZ', 'B8601TM', 'B8601TM', 'B8601TZ', 'B8601TZ', 'E8601LZ', 'E8601LZ', 'E8601TM',
'E8601TM', 'E8601TZ', 'E8601TZ', 'HHMM', 'HOUR', 'IS8601LZ', 'IS8601LZ', 'IS8601TM', 'IS8601TM', 'IS8601TZ',
'IS8601TZ', 'JTIMEH', 'JTIMEHM', 'JTIMEHMS', 'JTIMEHW', 'JTIMEMW', 'JTIMESW', 'MMSS', 'ND8601TM', 'ND8601TZ',
'NLTIMAP', 'NLTIMAP', 'NLTIME', 'NLTIME', 'STIMER', 'TIME', 'TIMEAMPM', 'TOD',
)
sas_datetime_fmts = (
'AFRDFDT', 'AFRDFDT', 'ANYDTDTM', 'B8601DN', 'B8601DN', 'B8601DT', 'B8601DT', 'B8601DZ', 'B8601DZ', 'CATDFDT',
'CATDFDT', 'CRODFDT', 'CRODFDT', 'CSYDFDT', 'CSYDFDT', 'DANDFDT', 'DANDFDT', 'DATEAMPM', 'DATETIME', 'DATETIME',
'DESDFDT', 'DESDFDT', 'DEUDFDT', 'DEUDFDT', 'DTDATE', 'DTMONYY', 'DTWKDATX', 'DTYEAR', 'DTYYQC', 'E8601DN',
'E8601DN', 'E8601DT', 'E8601DT', 'E8601DZ', 'E8601DZ', 'ENGDFDT', 'ENGDFDT', 'ESPDFDT', 'ESPDFDT', 'EURDFDT',
'EURDFDT', 'FINDFDT', 'FINDFDT', 'FRADFDT', 'FRADFDT', 'FRSDFDT', 'FRSDFDT', 'HUNDFDT', 'HUNDFDT', 'IS8601DN',
'IS8601DN', 'IS8601DT', 'IS8601DT', 'IS8601DZ', 'IS8601DZ', 'ITADFDT', 'ITADFDT', 'JDATEYT', 'JDATEYTW', 'JNENGOT',
'JNENGOTW', 'MACDFDT', 'MACDFDT', 'MDYAMPM', 'MDYAMPM', 'ND8601DN', 'ND8601DT', 'ND8601DZ', 'NLDATM', 'NLDATM',
'NLDATMAP', 'NLDATMAP', 'NLDATMDT', 'NLDATML', 'NLDATMM', 'NLDATMMD', 'NLDATMMDL', 'NLDATMMDM', 'NLDATMMDS',
'NLDATMMN', 'NLDATMS', 'NLDATMTM', 'NLDATMTZ', 'NLDATMW', 'NLDATMW', 'NLDATMWN', 'NLDATMWZ', 'NLDATMYM', 'NLDATMYML',
'NLDATMYMM', 'NLDATMYMS', 'NLDATMYQ', 'NLDATMYQL', 'NLDATMYQM', 'NLDATMYQS', 'NLDATMYR', 'NLDATMYW', 'NLDATMZ',
'NLDDFDT', 'NLDDFDT', 'NORDFDT', 'NORDFDT', 'POLDFDT', 'POLDFDT', 'PTGDFDT', 'PTGDFDT', 'RUSDFDT', 'RUSDFDT',
'SLODFDT', 'SLODFDT', 'SVEDFDT', 'SVEDFDT', 'TWMDY', 'YMDDTTM',
)
sas_encoding_mapping = {
'arabic': [1, 'iso8859_6', 'iso-8859-6', 'arabic'],
'big5': [2, 'big5', 'big5-tw', 'csbig5'],
'cyrillic': [1, 'iso8859_5', 'iso-8859-5', 'cyrillic'],
'ebcdic037': [1, 'cp037', 'ibm037', 'ibm039'],
'ebcdic273': [1, 'cp273', '273', 'ibm273', 'csibm273'],
'ebcdic500': [1, 'cp500', 'ebcdic-cp-be', 'ebcdic-cp-ch', 'ibm500'],
'euc-cn': [2, 'gb2312', 'chinese', 'csiso58gb231280', 'euc-cn', 'euccn', 'eucgb2312-cn', 'gb2312-1980', 'gb2312-80', 'iso-ir-58'],
'euc-jp': [4, 'euc_jis_2004', 'jisx0213', 'eucjis2004'],
'euc-kr': [4, 'euc_kr', 'euckr', 'korean', 'ksc5601', 'ks_c-5601', 'ks_c-5601-1987', 'ksx1001', 'ks_x-1001'],
'greek': [1, 'iso8859_7', 'iso-8859-7', 'greek', 'greek8'],
'hebrew': [1, 'iso8859_8', 'iso-8859-8', 'hebrew'],
'ibm-949': [1, 'cp949', '949', 'ms949', 'uhc'],
'kz1048': [1, 'kz1048', 'kz_1048', 'strk1048_2002', 'rk1048'],
'latin10': [1, 'iso8859_16', 'iso-8859-16', 'latin10', 'l10'],
'latin1': [1, 'latin_1', 'iso-8859-1', 'iso8859-1', '8859', 'cp819', 'latin', 'latin1', 'l1'],
'latin2': [1, 'iso8859_2', 'iso-8859-2', 'latin2', 'l2'],
'latin3': [1, 'iso8859_3', 'iso-8859-3', 'latin3', 'l3'],
'latin4': [1, 'iso8859_4', 'iso-8859-4', 'latin4', 'l4'],
'latin5': [1, 'iso8859_9', 'iso-8859-9', 'latin5', 'l5'],
'latin6': [1, 'iso8859_10', 'iso-8859-10', 'latin6', 'l6'],
'latin7': [1, 'iso8859_13', 'iso-8859-13', 'latin7', 'l7'],
'latin8': [1, 'iso8859_14', 'iso-8859-14', 'latin8', 'l8'],
'latin9': [1, 'iso8859_15', 'iso-8859-15', 'latin9', 'l9'],
'ms-932': [2, 'cp932', '932', 'ms932', 'mskanji', 'ms-kanji'],
'msdos737': [1, 'cp737'],
'msdos775': [1, 'cp775', 'ibm775'],
'open_ed-1026':[1, 'cp1026', 'ibm1026'],
'open_ed-1047':[1, 'cp1047'], # Though this isn't available in base python, it's 3rd party
'open_ed-1140':[1, 'cp1140', 'ibm1140'],
'open_ed-424': [1, 'cp424', 'ebcdic-cp-he', 'ibm424'],
'open_ed-875': [1, 'cp875'],
'pcoem437': [1, 'cp437', '437', 'ibm437'],
'pcoem850': [1, 'cp850', '850', 'ibm850'],
'pcoem852': [1, 'cp852', '852', 'ibm852'],
'pcoem857': [1, 'cp857', '857', 'ibm857'],
'pcoem858': [1, 'cp858', '858', 'ibm858'],
'pcoem860': [1, 'cp860', '860', 'ibm860'],
'pcoem862': [1, 'cp862', '862', 'ibm862'],
'pcoem863': [1, 'cp863'],
'pcoem864': [1, 'cp864', 'ibm864'],
'pcoem865': [1, 'cp865', '865', 'ibm865'],
'pcoem866': [1, 'cp866', '866', 'ibm866'],
'pcoem869': [1, 'cp869', '869', 'cp-gr', 'ibm869'],
'pcoem874': [1, 'cp874'],
'shift-jis': [2, 'shift_jis', 'csshiftjis', 'shiftjis', 'sjis', 's_jis'],
'thai': [1, 'iso8859_11', 'so-8859-11', 'thai'],
'us-ascii': [1, 'ascii', '646', 'us-ascii'],
'utf-8': [4, 'utf_8', 'u8', 'utf', 'utf8', 'utf-8'],
'warabic': [1, 'cp1256', 'windows-1256'],
'wbaltic': [1, 'cp1257', 'windows-1257'],
'wcyrillic': [1, 'cp1251', 'windows-1251'],
'wgreek': [1, 'cp1253', 'windows-1253'],
'whebrew': [1, 'cp1255', 'windows-1255'],
'wlatin1': [1, 'cp1252', 'windows-1252'],
'wlatin2': [1, 'cp1250', 'windows-1250'],
'wturkish': [1, 'cp1254', 'windows-1254'],
'wvietnamese': [1, 'cp1258', 'windows-1258'],
'any':None,
'dec-cn':None,
'dec-jp':None,
'dec-tw':None,
'ebcdic1025':None,
'ebcdic1026':None,
'ebcdic1047':None,
'ebcdic1112':None,
'ebcdic1122':None,
'ebcdic1130':None,
'ebcdic1137':None,
'ebcdic1140':None,
'ebcdic1141':None,
'ebcdic1142':None,
'ebcdic1143':None,
'ebcdic1144':None,
'ebcdic1145':None,
'ebcdic1146':None,
'ebcdic1147':None,
'ebcdic1148':None,
'ebcdic1149':None,
'ebcdic1153':None,
'ebcdic1154':None,
'ebcdic1155':None,
'ebcdic1156':None,
'ebcdic1157':None,
'ebcdic1158':None,
'ebcdic1160':None,
'ebcdic1164':None,
'ebcdic275':None,
'ebcdic277':None,
'ebcdic278':None,
'ebcdic280':None,
'ebcdic284':None,
'ebcdic285':None,
'ebcdic297':None,
'ebcdic424':None,
'ebcdic425':None,
'ebcdic838':None,
'ebcdic870':None,
'ebcdic875':None,
'ebcdic905':None,
'ebcdic924':None,
'ebcdic-any':None,
'euc-tw':None,
'hp15-tw':None,
'ibm-930':None,
'ibm-933':None,
'ibm-935':None,
'ibm-937':None,
'ibm-939e':None,
'ibm-939':None,
'ibm-942':None,
'ibm-950':None,
'ms-936':None,
'ms-949':None,
'ms-950':None,
'msdos720':None,
'open_ed-037':None,
'open_ed-1025':None,
'open_ed-1112':None,
'open_ed-1122':None,
'open_ed-1130':None,
'open_ed-1137':None,
'open_ed-1141':None,
'open_ed-1142':None,
'open_ed-1143':None,
'open_ed-1144':None,
'open_ed-1145':None,
'open_ed-1146':None,
'open_ed-1147':None,
'open_ed-1148':None,
'open_ed-1149':None,
'open_ed-1153':None,
'open_ed-1154':None,
'open_ed-1155':None,
'open_ed-1156':None,
'open_ed-1157':None,
'open_ed-1158':None,
'open_ed-1160':None,
'open_ed-1164':None,
'open_ed-1166':None,
'open_ed-273':None,
'open_ed-275':None,
'open_ed-277':None,
'open_ed-278':None,
'open_ed-280':None,
'open_ed-284':None,
'open_ed-285':None,
'open_ed-297':None,
'open_ed-425':None,
'open_ed-500':None,
'open_ed-838':None,
'open_ed-870':None,
'open_ed-905':None,
'open_ed-924':None,
'open_ed-930':None,
'open_ed-933':None,
'open_ed-935':None,
'open_ed-937':None,
'open_ed-939e':None,
'open_ed-939':None,
'pc1098':None,
'pciscii806':None,
'pcoem1129':None,
'pcoem921':None,
'pcoem922':None,
'roman8':None
}
``` |
{
"source": "jpreese/nifi-cluster-coordinator",
"score": 3
} |
#### File: nifi_cluster_coordinator/configuration/config_loader.py
```python
import yaml
import logging
def load(config_file_location):
logger = logging.getLogger(__name__)
logger.info(f"Attempting to load config file from {config_file_location}")
stream = open(config_file_location, 'r')
return yaml.safe_load(stream)
```
#### File: nifi-cluster-coordinator/test/test_main.py
```python
import unittest
class MainUnitTests(unittest.TestCase):
def test_foo_true_is_true(self):
self.assertTrue(True)
def test_bar_false_is_false(self):
self.assertFalse(False)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jpreistad/ocbpy",
"score": 4
} |
#### File: ocbpy/instruments/general.py
```python
import numpy as np
import logbook as logging
import datetime as dt
def test_file(filename):
"""Test to ensure the file is small enough to read in. Python can only
allocate 2GB of data without crashing
Parameters
------------
filename : (str)
Filename to test
Returns
---------
good_flag : (bool)
True if good, bad if false
"""
from os import path
if not path.isfile(filename):
logging.warning("name provided is not a file")
return False
fsize = path.getsize(filename)
if(fsize > 2.0e9):
logging.warning("File size [{:.2f} GB > 2 GB]".format(fsize*1e-9))
return False
elif(fsize == 0):
logging.warning("empty file [{:s}]".format(filename))
return False
return True
def load_ascii_data(filename, hlines, miss=None, fill=np.nan, hsplit=None,
inline_comment=None, invalid_raise=False, datetime_cols=[],
datetime_fmt=None, int_cols=[], str_cols=[],
max_str_length=50, header=list()):
""" Load an ascii data file into a dict of numpy array.
Parameters
------------
filename : (str)
data file name
hlines : (int)
number of lines in header. If zero, must include header.
miss : (str, sequence, or dict)
Denotes missing value options (default=None)
fill : (value, sequence, or dict)
fill value (default=NaN)
hsplit : (str, NoneType)
character seperating data labels in header. None splits on all
whitespace characters. (default=None)
inline_comment : (str or NoneType)
If there are comments inline, denote the charater that indicates it has
begun. If there are no comments inline, leave as the default.
(default=None)
invalid_raise : (bool)
Should the routine fail if a row of data with a different number of
columns is encountered? If false, these lines will be skipped and
all other lines will be read in. (default=False)
datetime_cols : (list of ints)
If there are date strings or values that should be converted to a
datetime object, list them in order here. Not processed as floats.
(default=[])
datetime_fmt : (str or NoneType)
Format needed to convert the datetime_cols entries into a datetime
object. Special formats permitted are: 'YEAR SOY', 'YYDDD', 'SOD'.
'YEAR SOY' must be used together; 'YYDDD' indicates years since 1900 and
day of year, and may be used with any time format; 'SOD' indicates
seconds of day, and may be used with any date format (default=None)
int_cols : (list of ints)
Data that should be processed as integers, not floats. (default=[])
str_cols : (list of ints)
Data that should be processed as strings, not floats. (default=[])
max_str_length : (int)
Maximum allowed string length. (default=50)
header : (list of str)
Header string(s) where the last line contains whitespace separated data
names (default=list())
Returns
----------
header : (list of strings)
Contains all specified header lines
out : (dict of numpy.arrays)
The dict keys are specified by the header data line, the data
for each key are stored in the numpy array
Notes
-------
Data is assumed to be float unless otherwise stated.
"""
import ocbpy.ocb_time as ocbt
#-----------------------------------------------------------------------
# Test to ensure the file is small enough to read in. Python can only
# allocate 2GB of data. If you load something larger, python will crash
if not test_file(filename):
return header, dict()
#--------------------------------------------------
# Initialize the convert_time input dictionary
dfmt_parts = list() if datetime_fmt is None else datetime_fmt.split(" ")
convert_time_input = {"year":None, "soy":None, "yyddd":None,
"date":None, "tod":None, "datetime_fmt":datetime_fmt}
time_formats = ["H", "I", "p", "M", "S", "f", "z", "Z"]
#----------------------------------------------------------------------
# Make sure the max_str_length is long enough to read datetime and that
# the time data will be cast in the correct format
if datetime_fmt is not None:
if max_str_length < len(datetime_fmt):
max_str_length = len(datetime_fmt)
if datetime_fmt.find("%y") >= 0 or datetime_fmt.find("%j") >= 0:
max_str_length += 2
if(datetime_fmt.find("%a") >= 0 or datetime_fmt.find("%b") >= 0 or
datetime_fmt.find("%Z") >= 0):
max_str_length += 1
if(datetime_fmt.find("%B") >= 0 or datetime_fmt.find("%X") >= 0 or
datetime_fmt.find("%x") >= 0):
max_str_length += 10
if datetime_fmt.find("%f") >= 0 or datetime_fmt.find("%Y") >= 0:
max_str_length += 4
if datetime_fmt.find("%z") >= 0:
max_str_length += 3
if datetime_fmt.find("%c") >= 0:
max_str_length += 20
if datetime_fmt.upper().find("YYDDD"):
max_str_length += 8
if datetime_fmt.upper().find("YEAR") >= 0:
ipart = datetime_fmt.upper().find("YEAR")
case_part = datetime_fmt[ipart:ipart+4]
int_cols.append(dfmt_parts.index(case_part))
if datetime_fmt.upper().find("SOY") >= 0:
ipart = datetime_fmt.upper().find("SOY")
case_part = datetime_fmt[ipart:ipart+3]
int_cols.append(dfmt_parts.index(case_part))
#----------------------------------------------
# Open the datafile and read the header rows
f = open(filename, "r")
in_header = str(header[-1]) if len(header) > 0 else None
if not f:
logging.error("unable to open input file [{:s}]".format(filename))
return header, dict()
for h in range(hlines):
header.append(f.readline())
f.close()
#---------------------------------------------------------------------
# Create the output dictionary keylist
if len(header) == 0:
logging.error("unable to find header of [{:d}] lines".format(hlines))
return header, dict()
keyheader = in_header if in_header is not None else header[-1]
if inline_comment is not None:
keyheader = keyheader.split(inline_comment)[0]
keyheader = keyheader.replace("#", "")
keylist = keyheader.split(hsplit)
nhead = len(keylist)
out = {k:list() for k in keylist}
#---------------------------------------------------------------------
# Build the dtype list
ldtype = [float for i in range(nhead)]
for icol in int_cols:
ldtype[icol] = int
for icol in str_cols:
ldtype[icol] = '|U{:d}'.format(max_str_length)
#---------------------------------------------------------------------
# Build and add the datetime objects to the output dictionary
dt_keys = ['datetime', 'DATETIME', 'DT', 'dt']
if len(datetime_cols) > 0 and datetime_fmt is not None:
idt = 0
while dt_keys[idt] in out.keys(): idt += 1
if idt < len(dt_keys):
keylist.append(dt_keys[idt])
out[dt_keys[idt]] = list()
# Change the datetime column input from float to string, if it is not
# supposed to be an integer
for icol in datetime_cols:
if(not icol in int_cols and
dfmt_parts[icol].upper().find("SOD") < 0):
ldtype[icol] = '|U{:d}'.format(max_str_length)
else:
idt = len(dt_keys)
#-------------------------------------------
# Open the datafile and read the data rows
try:
temp = np.genfromtxt(filename, skip_header=hlines, missing_values=miss,
filling_values=fill, comments=inline_comment,
invalid_raise=False, dtype=ldtype)
except:
logging.error("unable to read data in file [{:s}]".format(filename))
return header, out
if len(temp) > 0:
noff = 0
# When dtype is specified, output comes as a np.array of np.void objects
for line in temp:
if len(line) == nhead:
for num,name in enumerate(keylist):
if len(name) > 0:
if idt < len(dt_keys) and name == dt_keys[idt]:
# Build the convert_time input
for icol,dcol in enumerate(datetime_cols):
if dfmt_parts[dcol].find("%") == 0:
if dfmt_parts[dcol][1] in time_formats:
ckey = "tod"
else:
ckey = "date"
else:
ckey = dfmt_parts[dcol].lower()
if ckey in ['year', 'soy']:
line[dcol] = int(line[dcol])
elif ckey == 'sod':
line[dcol] = float(line[dcol])
convert_time_input[ckey] = line[dcol]
# Convert the string into a datetime object
try:
ftime = ocbt.convert_time(**convert_time_input)
except ValueError as v:
raise v
# Save the output data
out[dt_keys[idt]].append(ftime)
else:
out[name].append(line[num-noff])
else:
noff += 1
else:
estr = "unknown genfromtxt output for [{:s}]".format(filename)
logging.error(estr)
return header, dict()
del temp
# Cast all lists and numpy arrays
for k in out.keys():
try:
out[k] = np.array(out[k], dtype=type(out[k][0]))
except:
pass
return header, out
```
#### File: ocbpy/tests/test_general.py
```python
import ocbpy.instruments.general as ocb_igen
import unittest
import numpy as np
import logbook
class TestGeneralMethods(unittest.TestCase):
def setUp(self):
""" Initialize the OCBoundary object using the test file, as well as
the VectorData object
"""
from os import path
import ocbpy
ocb_dir = path.split(ocbpy.__file__)[0]
self.test_file = path.join(ocb_dir, "tests", "test_data",
"test_north_circle")
self.assertTrue(path.isfile(self.test_file))
self.temp_output = path.join(ocb_dir, "tests", "test_data",
"temp_gen")
self.log_handler = logbook.TestHandler()
self.log_handler.push_thread()
def tearDown(self):
import os
if os.path.isfile(self.temp_output):
os.remove(self.temp_output)
self.log_handler.pop_thread()
del self.test_file, self.log_handler
def test_file_test_true(self):
""" Test the general file testing routine with a good file
"""
self.assertTrue(ocb_igen.test_file(self.test_file))
def test_file_test_not_file(self):
""" Test the general file testing routine with a bad filename
"""
self.assertFalse(ocb_igen.test_file("/"))
self.assertEqual(len(self.log_handler.formatted_records), 1)
self.assertTrue(self.log_handler.formatted_records[0].find( \
'name provided is not a file') > 0)
def test_file_test_empty_file(self):
""" Test the general file testing routine with a bad filename
"""
# Create an empty file
open(self.temp_output, 'a').close()
self.assertFalse(ocb_igen.test_file(self.temp_output))
self.assertEqual(len(self.log_handler.formatted_records), 1)
self.assertTrue(self.log_handler.formatted_records[0].find('empty file')
> 0)
def test_load_ascii_data_badfile(self):
""" Test the general loading routine for ASCII data with bad input
"""
header, data = ocb_igen.load_ascii_data("/", 0)
self.assertIsInstance(header, list)
self.assertEqual(len(header), 0)
self.assertIsInstance(data, dict)
self.assertEqual(len(data.keys()), 0)
self.assertEqual(len(self.log_handler.formatted_records), 1)
self.assertTrue(self.log_handler.formatted_records[0].find( \
'name provided is not a file') > 0)
def test_load_ascii_data_standard(self):
""" Test the general routine to load ASCII data
"""
hh = ["YEAR SOY NB PHICENT RCENT R A RERR"]
header, data = ocb_igen.load_ascii_data(self.test_file, 0, header=hh)
# Test to ensure the output header equals the input header
self.assertListEqual(header, hh)
# Test to see that the data keys are all in the header
ktest = sorted(hh[0].split())
self.assertListEqual(ktest, sorted(list(data.keys())))
# Test the length of the data file
self.assertEqual(data['A'].shape[0], 75)
# Test the values of the last data line
test_vals = {"YEAR":2000.0, "SOY":11187202.0, "NB":9.0, "A":1.302e+07,
"PHICENT":315.29, "RCENT":2.67, "R":18.38, "RERR":0.47}
for kk in test_vals.keys():
self.assertEqual(data[kk][-1], test_vals[kk])
del hh, header, data, ktest, test_vals
def test_load_ascii_data_int_cols(self):
""" Test the general routine to load ASCII data assigning some
columns as integers
"""
hh = ["YEAR SOY NB PHICENT RCENT R A RERR"]
int_cols = [0, 1, 2]
int_keys = ["YEAR", "SOY", "NB"]
header, data = ocb_igen.load_ascii_data(self.test_file, 0, header=hh,
int_cols=int_cols)
# Test to ensure the output header equals the input header
self.assertListEqual(header, hh)
# Test to see that the data keys are all in the header
ktest = sorted(hh[0].split())
self.assertListEqual(ktest, sorted(list(data.keys())))
# Test the length of the data file
self.assertEqual(data['A'].shape[0], 75)
# Test the values of the last data line
test_vals = {"YEAR":2000, "SOY":11187202, "NB":9, "A":1.302e+07,
"PHICENT":315.29, "RCENT":2.67, "R":18.38, "RERR":0.47}
for kk in test_vals.keys():
self.assertEqual(data[kk][-1], test_vals[kk])
if kk in int_keys:
isint = (isinstance(data[kk][-1], np.int64) or
isinstance(data[kk][-1], np.int32) or
isinstance(data[kk][-1], int))
self.assertTrue(isint)
del isint
else:
self.assertIsInstance(data[kk][-1], float)
del hh, int_cols, int_keys, header, data, ktest, test_vals
def test_load_ascii_data_str_cols(self):
""" Test the general routine to load ASCII data assigning some
columns as strings
"""
hh = ["YEAR SOY NB PHICENT RCENT R A RERR"]
str_cols = [0, 1]
str_keys = ["YEAR", "SOY"]
header, data = ocb_igen.load_ascii_data(self.test_file, 0, header=hh,
str_cols=str_cols)
# Test to ensure the output header equals the input header
self.assertListEqual(header, hh)
# Test to see that the data keys are all in the header
ktest = sorted(hh[0].split())
self.assertListEqual(ktest, sorted(list(data.keys())))
# Test the length of the data file
self.assertEqual(data['A'].shape[0], 75)
# Test the values of the last data line
test_vals = {"YEAR":"2000", "SOY":"11187202", "NB":9, "A":1.302e+07,
"PHICENT":315.29, "RCENT":2.67, "R":18.38, "RERR":0.47}
for kk in test_vals.keys():
self.assertEqual(data[kk][-1], test_vals[kk])
if kk in str_keys:
try:
self.assertIsInstance(data[kk][-1], str)
except:
self.assertIsInstance(data[kk][-1], unicode)
else:
self.assertIsInstance(data[kk][-1], float)
del hh, str_cols, str_keys, ktest, test_vals, header, data
def test_load_ascii_data_w_datetime(self):
""" Test the general routine to load ASCII data
"""
import datetime as dt
hh = ["YEAR SOY NB PHICENT RCENT R A RERR"]
header, data = ocb_igen.load_ascii_data(self.test_file, 0,
datetime_cols=[0,1],
datetime_fmt="YEAR SOY",
header=hh)
# Test to ensure the output header equals the input header
self.assertListEqual(header, hh)
# Test to see that the data keys are all in the header
ktest = hh[0].split()
ktest.append("datetime")
self.assertListEqual(sorted(ktest), sorted(list(data.keys())))
# Test the length of the data file
self.assertEqual(data['A'].shape[0], 75)
# Test the values of the last data line
test_vals = {"YEAR":2000, "SOY":11187202, "NB":9.0, "A":1.302e+07,
"PHICENT":315.29, "RCENT":2.67, "R":18.38, "RERR":0.47,
"datetime":dt.datetime(2000,5,9,11,33,22)}
for kk in test_vals.keys():
self.assertEqual(data[kk][-1], test_vals[kk])
del hh, header, data, ktest, test_vals
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jpremo/open_bar",
"score": 2
} |
#### File: migrations/versions/4263983bb360_.py
```python
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '4263983bb360'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=50), nullable=False),
sa.Column('firstName', sa.String(length=50), nullable=False),
sa.Column('lastName', sa.String(length=50), nullable=False),
sa.Column('email', sa.String(length=255), nullable=False),
sa.Column('profileImg', sa.String(length=1000), nullable=False),
sa.Column('hashed_password', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('bars',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.Column('description', sa.String(length=1000), nullable=False),
sa.Column('phoneNumber', sa.String(length=25), nullable=False),
sa.Column('longitude', sa.Numeric(), nullable=False),
sa.Column('latitude', sa.Numeric(), nullable=False),
sa.Column('street', sa.String(length=255), nullable=False),
sa.Column('state', sa.String(length=50), nullable=False),
sa.Column('zipcode', sa.Integer(), nullable=False),
sa.Column('barSeats', sa.Integer(), nullable=False),
sa.Column('dayAndTime', postgresql.JSONB(astext_type=sa.Text()), nullable=False),
sa.Column('bannerImg', sa.String(length=1000), nullable=False),
sa.Column('ownerId', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['ownerId'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('favorites',
sa.Column('barId', sa.Integer(), nullable=True),
sa.Column('userId', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['barId'], ['bars.id'], ),
sa.ForeignKeyConstraint(['userId'], ['users.id'], )
)
op.create_table('images',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('photoUrl', sa.String(length=1000), nullable=False),
sa.Column('barId', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['barId'], ['bars.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('reservations',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('partySize', sa.Integer(), nullable=False),
sa.Column('date', sa.Date(), nullable=False),
sa.Column('time', sa.Time(), nullable=False),
sa.Column('barId', sa.Integer(), nullable=False),
sa.Column('userId', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['barId'], ['bars.id'], ),
sa.ForeignKeyConstraint(['userId'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('reviews',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('overall', sa.Integer(), nullable=False),
sa.Column('food', sa.Integer(), nullable=False),
sa.Column('service', sa.Integer(), nullable=False),
sa.Column('ambience', sa.Integer(), nullable=False),
sa.Column('value', sa.Integer(), nullable=False),
sa.Column('review', sa.String(length=2500), nullable=False),
sa.Column('barId', sa.Integer(), nullable=False),
sa.Column('userId', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['barId'], ['bars.id'], ),
sa.ForeignKeyConstraint(['userId'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('reviews')
op.drop_table('reservations')
op.drop_table('images')
op.drop_table('favorites')
op.drop_table('bars')
op.drop_table('users')
# ### end Alembic commands ###
``` |
{
"source": "jprester/pyfra",
"score": 3
} |
#### File: pyfra/utils/files.py
```python
import json
import csv
from pyfra.remote import RemoteFile
__all__ = ['fwrite', 'fread', 'jread', 'jwrite', 'csvread', 'csvwrite']
def fname_fn(fn):
def _fn(fname, *a, **k):
# use Remote impl if argument is RemoteFile
if isinstance(fname, RemoteFile):
remfile = fname
return getattr(remfile.remote, fn.__name__)(remfile.fname, *a, **k)
# map over list if fname is list
if isinstance(fname, list):
fnames = fname
return [
fn(fname, *a, **k)
for fname in fnames
]
return fn(fname, *a, **k)
return _fn
@fname_fn
def fwrite(fname, content):
with open(fname, 'w') as fh:
fh.write(content)
@fname_fn
def fread(fname):
with open(fname) as fh:
return fh.read()
@fname_fn
def jread(fname):
with open(fname) as fh:
return json.load(fh)
@fname_fn
def jwrite(fname, content):
with open(fname, 'w') as fh:
json.dump(content, fh)
@fname_fn
def csvread(fname, colnames=None):
fh = open(fname)
if fname[-4:] == ".tsv":
rdr = csv.reader(fh, delimiter="\t")
else:
rdr = csv.reader(fh)
if colnames:
cols = colnames
else:
cols = list(next(rdr))
for ob in rdr:
yield {
k: v for k, v in zip(cols, [*ob, *[None for _ in range(len(cols) - len(ob))]])
}
@fname_fn
def csvwrite(fname, data, colnames=None):
fh = open(fname, 'w')
if colnames is None:
colnames = data[0].keys()
wtr = csv.writer(fh)
wtr.writerow(colnames)
for dat in data:
assert dat.keys() == colnames
wtr.writerow([dat[k] for k in colnames])
```
#### File: pyfra/utils/misc.py
```python
from sqlitedict import SqliteDict
import hashlib
import json
import os
os.makedirs('state', exist_ok=True)
main_state = SqliteDict("state/main.db", autocommit=True)
def once(fn, name=None):
""" Only run a function once, saving its return value to disk. Args must be json-encodable. """
fname = name if name is not None else fn.__name__
def _fn(*args, **kwargs):
# hash the arguments
arghash = hashlib.sha256(json.dumps([args, kwargs], sort_keys=True)).hexdigest()
key = f"once-{fname}-{arghash}-seen"
if key in main_state: return main_state[key]
ret = fn(*args, **kwargs)
main_state[key] = ret
return ret
``` |
{
"source": "jpreszler/tslearn",
"score": 3
} |
#### File: tslearn/tslearn/datasets.py
```python
import numpy
import zipfile
import tempfile
import shutil
import os
import sys
import csv
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
try:
from zipfile import BadZipFile as BadZipFile
except ImportError:
from zipfile import BadZipfile as BadZipFile
from tslearn.utils import to_time_series_dataset
__author__ = '<NAME> <EMAIL>ain.tavenard[at]univ-rennes2.fr'
def extract_from_zip_url(url, target_dir=None, verbose=False):
"""Download a zip file from its URL and unzip it.
Parameters
----------
url : string
URL from which to download.
target_dir : str or None (default: None)
Directory to be used to extract unzipped downloaded files.
verbose : bool (default: False)
Whether to print information about the process (cached files used, ...)
Returns
-------
str or None
Directory in which the zip file has been extracted if the process was
successful, None otherwise
"""
fname = os.path.basename(url)
tmpdir = tempfile.mkdtemp()
local_zip_fname = os.path.join(tmpdir, fname)
urlretrieve(url, local_zip_fname)
try:
if not os.path.exists(target_dir):
os.makedirs(target_dir)
zipfile.ZipFile(local_zip_fname, "r").extractall(path=target_dir)
shutil.rmtree(tmpdir)
if verbose:
print("Successfully extracted file %s to path %s" %
(local_zip_fname, target_dir))
return target_dir
except BadZipFile:
shutil.rmtree(tmpdir)
if verbose:
sys.stderr.write("Corrupted zip file encountered, aborting.\n")
return None
def in_file_string_replace(filename, old_string, new_string):
""" String replacement within a text file. It is used to fix typos in
downloaded csv file.
The code was modified from "https://stackoverflow.com/questions/4128144/\
replace-string-within-file-contents"
Parameters
----------
filename : string
Path to the file where strings should be replaced
old_string : str
The string to be replaced in the file.
new_string : str
The new string that will replace old_string
"""
with open(filename) as f:
s = f.read()
with open(filename, 'w') as f:
s = s.replace(old_string, new_string)
f.write(s)
class UCR_UEA_datasets(object):
"""A convenience class to access UCR/UEA time series datasets.
When using one (or several) of these datasets in research projects, please
cite [1]_.
Parameters
----------
use_cache : bool (default: True)
Whether a cached version of the dataset should be used, if found.
Notes
-----
Downloading dataset files can be time-consuming, it is recommended
using `use_cache=True` (default) in order to
only experience downloading time once per dataset and work on a cached
version of the datasets after it.
References
----------
.. [1] <NAME>, <NAME>, <NAME> and <NAME>, The UEA & UCR Time
Series Classification Repository, www.timeseriesclassification.com
"""
def __init__(self, use_cache=True):
self.use_cache = use_cache
base_dir = os.path.expanduser(os.path.join("~", ".tslearn",
"datasets", "UCR_UEA"))
self._data_dir = base_dir
if not os.path.exists(self._data_dir):
os.makedirs(self._data_dir)
try:
url_baseline = ("http://www.timeseriesclassification.com/" +
"singleTrainTest.csv")
self._baseline_scores_filename = os.path.join(
self._data_dir, os.path.basename(url_baseline))
urlretrieve(url_baseline, self._baseline_scores_filename)
# fix typos in that CSV to match with the name in the download link
in_file_string_replace(self._baseline_scores_filename,
"CinCECGtorso", "CinCECGTorso")
in_file_string_replace(self._baseline_scores_filename,
"StarlightCurves", "StarLightCurves")
except:
self._baseline_scores_filename = None
self._ignore_list = ["Data Descriptions"]
def baseline_accuracy(self, list_datasets=None, list_methods=None):
"""Report baseline performances as provided by UEA/UCR website.
Parameters
----------
list_datasets: list or None (default: None)
A list of strings indicating for which datasets performance should
be reported.
If None, performance is reported for all datasets.
list_methods: list or None (default: None)
A list of baselines methods for which performance should be
reported.
If None, performance for all baseline methods is reported.
Returns
-------
dict
A dictionary in which keys are dataset names and associated values
are themselves dictionaries that provide accuracy scores for the
requested methods.
Examples
--------
>>> uea_ucr = UCR_UEA_datasets()
>>> dict_acc = uea_ucr.baseline_accuracy(
... list_datasets=["Adiac", "ChlorineConcentration"],
... list_methods=["C45"])
>>> len(dict_acc)
2
>>> dict_acc["Adiac"] # doctest: +ELLIPSIS
{'C45': 0.542199...}
>>> dict_acc = uea_ucr.baseline_accuracy()
>>> len(dict_acc)
85
"""
d_out = {}
for perfs_dict in csv.DictReader(
open(self._baseline_scores_filename, "r"), delimiter=","):
dataset_name = perfs_dict[""]
if list_datasets is None or dataset_name in list_datasets:
d_out[dataset_name] = {}
for m in perfs_dict.keys():
if m != "" and (list_methods is None or m in list_methods):
try:
d_out[dataset_name][m] = float(perfs_dict[m])
except ValueError: # Missing score case (score == "")
pass
return d_out
def list_datasets(self):
"""List datasets in the UCR/UEA archive.
Examples
--------
>>> l = UCR_UEA_datasets().list_datasets()
>>> len(l)
85
"""
datasets = []
for perfs_dict in csv.DictReader(
open(self._baseline_scores_filename, "r"), delimiter=","):
datasets.append(perfs_dict[""])
return datasets
def list_cached_datasets(self):
"""List datasets from the UCR/UEA archive that are available in cache.
Examples
--------
>>> l = UCR_UEA_datasets().list_cached_datasets()
>>> 0 <= len(l) <= len(UCR_UEA_datasets().list_datasets())
True
"""
return [path for path in os.listdir(self._data_dir)
if os.path.isdir(os.path.join(self._data_dir, path)) and
path not in self._ignore_list]
def load_dataset(self, dataset_name):
"""Load a dataset from the UCR/UEA archive from its name.
Parameters
----------
dataset_name : str
Name of the dataset. Should be in the list returned by
`list_datasets`
Returns
-------
numpy.ndarray of shape (n_ts_train, sz, d) or None
Training time series. None if unsuccessful.
numpy.ndarray of integers with shape (n_ts_train, ) or None
Training labels. None if unsuccessful.
numpy.ndarray of shape (n_ts_test, sz, d) or None
Test time series. None if unsuccessful.
numpy.ndarray of integers with shape (n_ts_test, ) or None
Test labels. None if unsuccessful.
Examples
--------
>>> data_loader = UCR_UEA_datasets()
>>> X_train, y_train, X_test, y_test = data_loader.load_dataset(
... "TwoPatterns")
>>> print(X_train.shape)
(1000, 128, 1)
>>> print(y_train.shape)
(1000,)
>>> X_train, y_train, X_test, y_test = data_loader.load_dataset(
... "StarLightCurves")
>>> print(X_train.shape)
(1000, 1024, 1)
>>> X_train, y_train, X_test, y_test = data_loader.load_dataset(
... "CinCECGTorso")
>>> print(X_train.shape)
(40, 1639, 1)
>>> X_train, y_train, X_test, y_test = data_loader.load_dataset(
... "DatasetThatDoesNotExist")
>>> print(X_train)
None
"""
full_path = os.path.join(self._data_dir, dataset_name)
fname_train = dataset_name + "_TRAIN.txt"
fname_test = dataset_name + "_TEST.txt"
if (not os.path.exists(os.path.join(full_path, fname_train)) or
not os.path.exists(os.path.join(full_path, fname_test))):
url = ("http://www.timeseriesclassification.com/Downloads/%s.zip"
% dataset_name)
for fname in [fname_train, fname_test]:
if os.path.exists(os.path.join(full_path, fname)):
os.remove(os.path.join(full_path, fname))
extract_from_zip_url(url, target_dir=full_path, verbose=False)
try:
data_train = numpy.loadtxt(os.path.join(full_path, fname_train),
delimiter=None)
data_test = numpy.loadtxt(os.path.join(full_path, fname_test),
delimiter=None)
except:
return None, None, None, None
X_train = to_time_series_dataset(data_train[:, 1:])
y_train = data_train[:, 0].astype(numpy.int)
X_test = to_time_series_dataset(data_test[:, 1:])
y_test = data_test[:, 0].astype(numpy.int)
return X_train, y_train, X_test, y_test
def cache_all(self):
"""Cache all datasets from the UCR/UEA archive for later use.
"""
for dataset_name in self.list_datasets():
try:
self.load_dataset(dataset_name)
except:
sys.stderr.write("Could not cache dataset %s properly.\n"
% dataset_name)
class CachedDatasets(object):
"""A convenience class to access cached time series datasets.
When using the Trace dataset, please cite [1]_.
References
----------
.. [1] <NAME>, <NAME>, <NAME> and <NAME>, The UEA & UCR Time
Series Classification Repository, www.timeseriesclassification.com
"""
def __init__(self):
self.path = os.path.join(os.path.dirname(__file__), ".cached_datasets")
def list_datasets(self):
"""List cached datasets."""
return [fname[:fname.rfind(".")]
for fname in os.listdir(self.path)
if fname.endswith(".npz")]
def load_dataset(self, dataset_name):
"""Load a cached dataset from its name.
Parameters
----------
dataset_name : str
Name of the dataset. Should be in the list returned by
`list_datasets`
Returns
-------
numpy.ndarray of shape (n_ts_train, sz, d) or None
Training time series. None if unsuccessful.
numpy.ndarray of integers with shape (n_ts_train, ) or None
Training labels. None if unsuccessful.
numpy.ndarray of shape (n_ts_test, sz, d) or None
Test time series. None if unsuccessful.
numpy.ndarray of integers with shape (n_ts_test, ) or None
Test labels. None if unsuccessful.
Examples
--------
>>> data_loader = CachedDatasets()
>>> X_train, y_train, X_test, y_test = data_loader.load_dataset(
... "Trace")
>>> print(X_train.shape)
(100, 275, 1)
>>> print(y_train.shape)
(100,)
"""
npzfile = numpy.load(os.path.join(self.path, dataset_name + ".npz"))
X_train = npzfile["X_train"]
X_test = npzfile["X_test"]
y_train = npzfile["y_train"]
y_test = npzfile["y_test"]
return X_train, y_train, X_test, y_test
```
#### File: tslearn/tslearn/preprocessing.py
```python
import numpy
from sklearn.base import TransformerMixin
from scipy.interpolate import interp1d
import warnings
from tslearn.utils import to_time_series_dataset, check_equal_size, ts_size
__author__ = '<NAME> <EMAIL>ain.tavenard[at]univ-rennes2.fr'
class TimeSeriesResampler(TransformerMixin):
"""Resampler for time series. Resample time series so that they reach the
target size.
Parameters
----------
sz : int
Size of the output time series.
Examples
--------
>>> TimeSeriesResampler(sz=5).fit_transform([[0, 3, 6]])
array([[[0. ],
[1.5],
[3. ],
[4.5],
[6. ]]])
"""
def __init__(self, sz):
self.sz_ = sz
def fit(self, X, y=None, **kwargs):
"""A dummy method such that it complies to the sklearn requirements.
Since this method is completely stateless, it just returns itself.
Parameters
----------
X
Ignored
Returns
-------
self
"""
return self
def transform(self, X, **kwargs):
"""Fit to data, then transform it.
Parameters
----------
X : array-like
Time series dataset to be resampled.
Returns
-------
numpy.ndarray
Resampled time series dataset.
"""
X_ = to_time_series_dataset(X)
n_ts, sz, d = X_.shape
equal_size = check_equal_size(X_)
X_out = numpy.empty((n_ts, self.sz_, d))
for i in range(X_.shape[0]):
xnew = numpy.linspace(0, 1, self.sz_)
if not equal_size:
sz = ts_size(X_[i])
for di in range(d):
f = interp1d(numpy.linspace(0, 1, sz), X_[i, :sz, di],
kind="slinear")
X_out[i, :, di] = f(xnew)
return X_out
class TimeSeriesScalerMinMax(TransformerMixin):
"""Scaler for time series. Scales time series so that their span in each
dimension is between ``min`` and ``max``.
Parameters
----------
value_range : tuple (default: (0., 1.))
The minimum and maximum value for the output time series.
min : float (default: 0.)
Minimum value for output time series.
.. deprecated:: 0.2
min is deprecated in version 0.2 and will be
removed in 0.4. Use value_range instead.
max : float (default: 1.)
Maximum value for output time series.
.. deprecated:: 0.2
min is deprecated in version 0.2 and will be
removed in 0.4. Use value_range instead.
Notes
-----
This method requires a dataset of equal-sized time series.
Examples
--------
>>> TimeSeriesScalerMinMax(value_range=(1., 2.)).fit_transform([[0, 3, 6]])
array([[[1. ],
[1.5],
[2. ]]])
"""
def __init__(self, value_range=(0., 1.), min=None, max=None):
self.value_range = value_range
self.min_ = min
self.max_ = max
def fit(self, X, y=None, **kwargs):
"""A dummy method such that it complies to the sklearn requirements.
Since this method is completely stateless, it just returns itself.
Parameters
----------
X
Ignored
Returns
-------
self
"""
return self
def transform(self, X, y=None, **kwargs):
"""Will normalize (min-max) each of the timeseries. IMPORTANT: this
transformation is completely stateless, and is applied to each of
the timeseries individually.
Parameters
----------
X : array-like
Time series dataset to be rescaled.
Returns
-------
numpy.ndarray
Rescaled time series dataset.
"""
if self.min_ is not None:
warnings.warn(
"'min' is deprecated in version 0.2 and will be "
"removed in 0.4. Use value_range instead.",
DeprecationWarning, stacklevel=2)
self.value_range = (self.min_, self.value_range[1])
if self.max_ is not None:
warnings.warn(
"'max' is deprecated in version 0.2 and will be "
"removed in 0.4. Use value_range instead.",
DeprecationWarning, stacklevel=2)
self.value_range = (self.value_range[0], self.max_)
if self.value_range[0] >= self.value_range[1]:
raise ValueError("Minimum of desired range must be smaller"
" than maximum. Got %s." % str(self.value_range))
X_ = to_time_series_dataset(X)
min_t = numpy.min(X_, axis=1)[:, numpy.newaxis, :]
max_t = numpy.max(X_, axis=1)[:, numpy.newaxis, :]
range_t = max_t - min_t
nomin = (X_ - min_t) * (self.value_range[1] - self.value_range[0])
X_ = nomin / range_t + self.value_range[0]
return X_
class TimeSeriesScalerMeanVariance(TransformerMixin):
"""Scaler for time series. Scales time series so that their mean (resp.
standard deviation) in each dimension is
mu (resp. std).
Parameters
----------
mu : float (default: 0.)
Mean of the output time series.
std : float (default: 1.)
Standard deviation of the output time series.
Notes
-----
This method requires a dataset of equal-sized time series.
Examples
--------
>>> TimeSeriesScalerMeanVariance(mu=0.,
... std=1.).fit_transform([[0, 3, 6]])
array([[[-1.22474487],
[ 0. ],
[ 1.22474487]]])
"""
def __init__(self, mu=0., std=1.):
self.mu_ = mu
self.std_ = std
self.global_mean = None
self.global_std = None
def fit(self, X, y=None, **kwargs):
"""A dummy method such that it complies to the sklearn requirements.
Since this method is completely stateless, it just returns itself.
Parameters
----------
X
Ignored
Returns
-------
self
"""
return self
def transform(self, X, **kwargs):
"""Fit to data, then transform it.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
numpy.ndarray
Rescaled time series dataset
"""
X_ = to_time_series_dataset(X)
mean_t = numpy.mean(X_, axis=1)[:, numpy.newaxis, :]
std_t = numpy.std(X_, axis=1)[:, numpy.newaxis, :]
std_t[std_t == 0.] = 1.
X_ = (X_ - mean_t) * self.std_ / std_t + self.mu_
return X_
``` |
{
"source": "jprevc/news_crawlers",
"score": 3
} |
#### File: news_crawlers/spiders/carobni_svet_spider.py
```python
import os
from typing import List
import yaml
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
class CarobniSvetSpider(scrapy.Spider):
"""
Spider for carobni-svet.com
"""
# 'parse' method does not have to be overriden here
# pylint: disable=abstract-method
name = "carobni_svet"
home_path = os.environ.get('NEWS_CRAWLERS_HOME', os.path.dirname(__file__))
# get configuration data
with open(os.path.join(home_path, 'carobni_svet_configuration.yaml'), 'r') as f:
config_data = yaml.safe_load(f)
def start_requests(self) -> List[scrapy.http.FormRequest]:
"""
Starts requests for crawling. First request needs to enter the login page.
:return: List of requests for further crawling.
"""
return [scrapy.http.FormRequest(
url=self.config_data['urls']['login'],
formdata={
'email': os.environ.get('EMAIL_USER'),
'password': <PASSWORD>('<PASSWORD>')
},
callback=self.open_images
)]
def open_images(self, response) -> scrapy.Request:
"""
Opens images page.
:return: Request for further crawling.
"""
# with login complete, access photo library
return scrapy.Request(url=self.config_data['urls']['photos'], callback=self.get_images)
def get_images(self, response):
"""
Yields crawled data for images - URL for each image
"""
# get all image items in 'galerija'
image_items = response.xpath('//div[@id="galerija"]/a')
for item in image_items:
yield {
'type': "image",
'url': item.attrib['href']
}
yield scrapy.Request(url=self.config_data['urls']['blog'], callback=self.get_blog, dont_filter=True)
@staticmethod
def get_blog(response):
"""
Yields crawled data for blog - ID (title) of each blog entry
"""
# get all blog items
blog_items = response.xpath('//div[@id="blogs"]/*')
for item in blog_items:
yield {
'type': "blog",
'url': item.attrib['id']
}
if __name__ == '__main__':
process = CrawlerProcess(get_project_settings())
process.crawl('carobni_svet')
process.start() # the script will block here until the crawling is finished
```
#### File: jprevc/news_crawlers/scrape.py
```python
import os
import json
import sys
import yaml
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from notificators import EmailNotificator, PushoverNotificator
def run_spider(spider_name: str) -> list:
"""
Runs spider and returns collected (scraped) data.
:param spider_name: Name of spider to be run.
:return: Collected (scraped) data.
"""
crawled_output_path = spider_name + '.json'
# if output file exists, remove it, otherwise scrapy will just append new data to it
if os.path.exists(crawled_output_path):
os.remove(crawled_output_path)
# set settings for spider
settings = get_project_settings()
settings['FEED_FORMAT'] = 'json'
settings['FEED_URI'] = crawled_output_path
# create new crawler process and run it
process = CrawlerProcess(settings)
process.crawl(spider_name)
process.start() # the script will block here until the crawling is finished
# open resulting json file and read its contents
with open(crawled_output_path, 'r') as file:
scraped_data = json.load(file)
# remove json file, which was created when crawling - it is not needed anymore
os.remove(crawled_output_path)
return scraped_data
def get_cached_items(cached_items_path: str) -> list:
"""
Returns cached (previously scraped) items from file.
:param cached_items_path: Path to file which contains items, that were scraped
in the previous run.
:return: List of cached items. If specified file does not exist, an empty list
will be returned.
"""
if os.path.exists(cached_items_path):
with open(cached_items_path, 'r') as file:
cached_data = json.load(file)
else:
cached_data = []
return cached_data
def get_notificator(notificator_type: str, recipients: list):
"""
Creates a notificator according to specified type.
:param notificator_type: Notificator type. Can either be 'email' or 'pushover'.
:param recipients: List of recipients to which messages should be sent.
:return: Notificator object.
:rtype: NotificatorBase
"""
notificator_map = {'email': lambda: EmailNotificator(recipients, os.environ.get('EMAIL_USER'),
os.environ.get('EMAIL_PASS')),
'pushover': lambda: PushoverNotificator(recipients, os.environ.get('PUSHOVER_APP_TOKEN'))}
return notificator_map[notificator_type]()
if __name__ == '__main__':
spider = sys.argv[1]
home_path = os.environ.get('NEWS_CRAWLERS_HOME', os.path.dirname(__file__))
# create __cache__ folder in which *_cache.json files will be stored
cache_folder = os.path.join(home_path, '__cache__')
if not os.path.exists(cache_folder):
os.makedirs(cache_folder)
# read configuration for this spider
configuration_path = os.path.join(home_path, spider + '_configuration.yaml')
with open(configuration_path, 'r') as f:
spider_configuration = yaml.safe_load(f)
# run spider to acquire crawled data
crawled_data = run_spider(spider)
# get previously crawled cached items
cached_json = os.path.join(cache_folder, spider + '_cached.json')
cached_spider_data = get_cached_items(cached_json)
# check which crawled items are new
new_data = [item for item in crawled_data if item not in cached_spider_data]
# if new items have been found, send a notification and add that data to cached items
if new_data:
# send message with each configured notificator
for notificator_type_str, notificator_data in spider_configuration['notifications'].items():
notificator = get_notificator(notificator_type_str, notificator_data['recipients'])
notificator.send_items(spider + ' news', new_data, notificator_data['message_body_format'],
send_separately=notificator_data.get('send_separately', False))
# append new items to cached ones and write all back to file
cached_spider_data += list(new_data)
with open(cached_json, 'w+') as f:
json.dump(cached_spider_data, f)
```
#### File: news_crawlers/tests/mocks.py
```python
class HttpsSessionMock:
"""
HTTPS Session mock class.
"""
def __init__(self):
self.simulated_messages = []
def post(self, url, data, headers):
self.simulated_messages.append(data['message'])
class SmtpMock():
"""
SMTP mock class
"""
def __init__(self):
self.simulated_messages = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def ehlo(self):
pass
def starttls(self):
pass
def login(self, user, password):
pass
def sendmail(self, user, recipients, message):
self.simulated_messages.append(message)
``` |
{
"source": "jprevc/SnakeGame",
"score": 4
} |
#### File: jprevc/SnakeGame/snake_utility.py
```python
import random
class Snake:
"""
Snake class. Defines one snake for one player.
:param start_pos: Starting position for the snake. This is position of the head block
in pixels, as tuple of x and y position.
:type start_pos: tuple of int
:param move_keys: Dictionary that binds directions to keyboard keys. Dictionary should
have four keys: 'up', 'right', 'down', and 'left'. Corresponding values
should be pygame keyboard codes.
:type move_keys: dict
:param color: Color of the snake as rgb code in tuple.
:type color: tuple
:param block_size: Size of one block of the snake in pixels.
:type block_size: int
:param num_of_start_blocks: Number of starting blocks for the snake.
:type num_of_start_blocks: int
"""
def __init__(self,
start_pos,
move_keys,
color,
block_size,
num_of_start_blocks):
self.block_size = block_size
self.start_pos = start_pos
self.move_keys = move_keys
self.color = color
self.num_of_start_blocks = num_of_start_blocks
self.curr_dir = [1, 0]
self.key_stack = []
self.collision = False
# set first start blocks
self.block_pos_lst = []
for i in range(num_of_start_blocks):
self.block_pos_lst.append((self.start_pos[0] - i * self.block_size, self.start_pos[1]))
def get_dir_from_keystack(self):
"""
Updates snake's direction by checking which key was pressed.
"""
if self.key_stack:
key_pressed = self.key_stack[0]
if key_pressed == self.move_keys['up']:
new_dir = [0, -1]
elif key_pressed == self.move_keys['right']:
new_dir = [1, 0]
elif key_pressed == self.move_keys['down']:
new_dir = [0, 1]
elif key_pressed == self.move_keys['left']:
new_dir = [-1, 0]
else:
new_dir = self.curr_dir
# if snake just reverts direction, don't allow it
if new_dir == [-self.curr_dir[0], -self.curr_dir[1]]:
new_dir = self.curr_dir
self.curr_dir = new_dir
self.key_stack.pop(0)
def set_new_state(self, game_dims, snakes_lst):
"""
Sets new snake position and also checks if there was a collision with game
frames or other snakes.
:param game_dims: Game frame dimensions as tuple of x and y.
:type game_dims: tuple
:param snakes_lst: List containing all snakes in the game.
:type snakes_lst: list of Snake
"""
# add new block to front of snake according to direction
new_block = [(self.block_pos_lst[0][0] + self.curr_dir[0]*self.block_size,
self.block_pos_lst[0][1] + self.curr_dir[1]*self.block_size)]
self.block_pos_lst = new_block + self.block_pos_lst
# remove last block from snake
self.block_pos_lst.pop()
# check for collision with screen frame or with other snakes
# get list of snakes with self removed from it
othr_snake_lst = [snake for snake in snakes_lst if snake is not self]
if self.is_frame_collision(game_dims) or self.is_snake_collision(othr_snake_lst):
self.collision = True
else:
self.collision = False
def is_snake_collision(self, other_snakes):
"""
Returns True if snake is in collision with itself or other snakes.
:param other_snakes: List of other snakes in the game.
:type other_snakes: list of Snake
:return: True, if snake is in collision with itself or other snakes, False
otherwise.
:rtype: bool
"""
# check for collision with itself
if self.block_pos_lst[0] in self.block_pos_lst[1:]:
return True
# check for collision with other snakes
for snake in other_snakes:
if self.block_pos_lst[0] in snake.block_pos_lst:
return True
return False
def is_frame_collision(self, game_dims):
"""
Returns True if snake is in collision with game frame.
:param game_dims: Game frame dimensions as tuple of x and y.
:type game_dims: tuple
:return: True, if snake is in collision with game frame, False
otherwise.
:rtype: bool
"""
return not ((0 <= self.block_pos_lst[0][0] < game_dims[0]) and
(0 <= self.block_pos_lst[0][1] < game_dims[1]))
class Cherry:
"""
Cherry class, defines one cherry in the game.
:param block_size: Dimension of the block, which represents a cherry.
:type block_size: int
"""
def __init__(self, block_size):
self.block_size = block_size
self.position = None
def _is_cherry_position_valid(self, snake_lst):
"""
Checks that cherry position is not placed onto some snake.
:param snake_lst: List of snakes in the game.
:type snake_lst: list of Snake
:return: True, if cherry is not placed on one of the snakes, False otherwise.
:rtype: bool
"""
for snake in snake_lst:
if self.position in snake.block_pos_lst:
return False
return True
def set_new_random_position(self, snake_lst, game_dims):
"""
Sets new random position for cherry.
:param snake_lst: List of snakes in the game.
:type snake_lst: list of Snake
:param game_dims: Game frame dimensions as tuple of x and y.
:type game_dims: tuple
"""
self.position = (random.randrange(0, game_dims[0], self.block_size),
random.randrange(0, game_dims[1], self.block_size))
# recursively call function until new cherry position is valid
if not self._is_cherry_position_valid(snake_lst):
self.set_new_random_position(snake_lst, game_dims)
class SnakeGameStatusFlags:
COLLISION_OCCURENCE = 1
``` |
{
"source": "JPrevost/oai-pmh-harvester",
"score": 2
} |
#### File: oai-pmh-harvester/harvester/oai.py
```python
import json
import logging
from typing import Iterator, Optional
import smart_open
from sickle import Sickle
from sickle.models import Record
logger = logging.getLogger(__name__)
class OAIClient:
def __init__(
self,
source_url: str,
metadata_format: Optional[str] = None,
from_date: Optional[str] = None,
until_date: Optional[str] = None,
set_spec: Optional[str] = None,
) -> None:
self.source_url = source_url
self.client = Sickle(self.source_url)
self.metadata_format = metadata_format
self._set_params(metadata_format, from_date, until_date, set_spec)
def _set_params(
self,
metadata_format: Optional[str],
from_date: Optional[str],
until_date: Optional[str],
set_spec: Optional[str],
) -> None:
params = {}
if metadata_format:
params["metadataPrefix"] = metadata_format
if from_date:
params["from"] = from_date
if until_date:
params["until"] = until_date
if set_spec:
params["set"] = set_spec
self.params = params
def get_identifiers(self) -> list[str]:
responses = self.client.ListIdentifiers(**self.params)
return [record.identifier for record in responses]
def get_records(
self, identifiers: list[str], exclude_deleted: bool
) -> Iterator[Record]:
for identifier in identifiers:
record = self.client.GetRecord(
identifier=identifier, metadataPrefix=self.metadata_format
)
logger.debug(
"Record retrieved:\n Deleted:%s\n Header:%s\n Raw:%s\n",
record.deleted,
record.header,
record.raw,
)
if exclude_deleted is True and record.deleted is True:
continue
yield record
def get_sets(self):
responses = self.client.ListSets()
sets = [{"Set name": set.setName, "Set spec": set.setSpec} for set in responses]
return sets
def write_records(records: Iterator, filepath: str) -> int:
count = 0
with smart_open.open(filepath, "wb") as file:
file.write("<records>\n".encode())
for record in records:
file.write(" ".encode() + record.raw.encode() + "\n".encode())
count += 1
if count % 1000 == 0:
logger.info(
"Status update: %s records written to output file so far!", count
)
file.write("</records>".encode())
return count
def write_sets(sets: list[dict[str, str]], filepath: str) -> None:
with open(filepath, "w") as file:
file.write(json.dumps(sets, indent=2))
```
#### File: oai-pmh-harvester/tests/test_cli.py
```python
import vcr
from harvester.cli import main
@vcr.use_cassette("tests/fixtures/vcr_cassettes/get-records-exclude-deleted.yaml")
def test_harvest_all_options_except_set_spec(caplog, monkeypatch, cli_runner, tmp_path):
monkeypatch.setenv("SENTRY_DSN", "https://[email protected]/123456")
with cli_runner.isolated_filesystem(temp_dir=tmp_path):
filepath = tmp_path / "records.xml"
result = cli_runner.invoke(
main,
[
"-h",
"https://dspace.mit.edu/oai/request",
"-o",
filepath,
"-v",
"harvest",
"-m",
"oai_dc",
"-f",
"2017-12-14",
"-u",
"2017-12-14",
"--exclude-deleted",
],
)
assert result.exit_code == 0
assert "Logger 'root' configured with level=DEBUG" in caplog.text
assert (
"Sentry DSN found, exceptions will be sent to Sentry with env=test"
in caplog.text
)
assert (
"OAI-PMH harvesting from source https://dspace.mit.edu/oai/request with "
"parameters: metadata_format=oai_dc, from_date=2017-12-14, until_date="
"2017-12-14, set=None, exclude_deleted=True" in caplog.text
)
assert (
"Number of records to harvest (including deleted records): 1" in caplog.text
)
assert "Writing records to output file:" in caplog.text
assert (
"Record retrieved:\n Deleted:True\n Header:"
'<header xmlns="http://www.openarchives.org/OAI/2.0/" ' in caplog.text
)
assert (
"Harvest completed. Total records harvested (not including deleted "
"records): 0" in caplog.text
)
@vcr.use_cassette("tests/fixtures/vcr_cassettes/harvest-from-set.yaml")
def test_harvest_no_options_except_set_spec(caplog, cli_runner, tmp_path):
with cli_runner.isolated_filesystem(temp_dir=tmp_path):
filepath = tmp_path / "records.xml"
result = cli_runner.invoke(
main,
[
"-h",
"https://dspace.mit.edu/oai/request",
"-o",
filepath,
"harvest",
"-s",
"com_1721.1_140587",
],
)
assert result.exit_code == 0
assert "Logger 'root' configured with level=INFO" in caplog.text
assert (
"No Sentry DSN found, exceptions will not be sent to Sentry" in caplog.text
)
assert (
"OAI-PMH harvesting from source https://dspace.mit.edu/oai/request with "
"parameters: metadata_format=oai_dc, from_date=None, until_date="
"None, set=com_1721.1_140587, exclude_deleted=False" in caplog.text
)
assert (
"Number of records to harvest (including deleted records): 58"
in caplog.text
)
assert "Writing records to output file:" in caplog.text
assert (
"Harvest completed. Total records harvested (including deleted "
"records): 58" in caplog.text
)
@vcr.use_cassette("tests/fixtures/vcr_cassettes/harvest-no-records.yaml")
def test_harvest_no_records(caplog, cli_runner, tmp_path):
with cli_runner.isolated_filesystem(temp_dir=tmp_path):
filepath = tmp_path / "records.xml"
result = cli_runner.invoke(
main,
[
"-h",
"https://dspace.mit.edu/oai/request",
"-o",
filepath,
"harvest",
"-s",
"com_1721.1_100263",
],
)
assert result.exit_code == 0
assert (
"No records harvested: the combination of the provided options results in "
"an empty list." in caplog.text
)
@vcr.use_cassette("tests/fixtures/vcr_cassettes/get-sets.yaml")
def test_setlist(caplog, cli_runner, tmp_path):
with cli_runner.isolated_filesystem(temp_dir=tmp_path):
filepath = tmp_path / "sets.json"
result = cli_runner.invoke(
main,
[
"-h",
"https://dspace.mit.edu/oai/request",
"-o",
filepath,
"setlist",
],
)
assert result.exit_code == 0
assert (
"Getting set list from source: https://dspace.mit.edu/oai/request"
in caplog.text
)
assert "Writing setlist to output file " in caplog.text
assert "Setlist completed" in caplog.text
``` |
{
"source": "jprichards/PyVMwareAirWatch",
"score": 3
} |
#### File: pyairwatch/mdm/tags.py
```python
class Tags(object):
"""A class to manage various AirWatch device tag functionalities"""
def __init__(self, client):
self.client = client
def get_id_by_name(self, name, og_id):
# mdm/tags/search?name={name}
response = self._get(path='/tags/search', params={'name':str(name), 'organizationgroupid':str(og_id)})
return response
def _get(self, module='mdm', path=None, version=None, params=None, header=None):
"""GET requests for the /MDM/Tags module."""
response = self.client.get(module=module, path=path, version=version, params=params, header=header)
return response
def _post(self, module='mdm', path=None, version=None, params=None, data=None, json=None, header=None):
"""POST requests for the /MDM/Tags module."""
response = self.client.post(module=module, path=path, version=version, params=params, data=data, json=json, header=header)
return response
``` |
{
"source": "jp-richter/formelbaer-rnn",
"score": 2
} |
#### File: jp-richter/formelbaer-rnn/converter.py
```python
from config import config, paths
import subprocess
import os
import shutil
import psutil
import math
import pathlib
import tree
import ray
import template
# multiprocessing module ray should give better performance, windows is not supported
NUM_CPUS = psutil.cpu_count(logical=False)
# preamble preloaded in preamble.fmt, saved in preamble.tex, pdf compression set to 0
CODE_DIRECTORY = pathlib.Path(__file__).resolve().parent
PREAMBLE_PATH = pathlib.PurePath(CODE_DIRECTORY, 'preamble.fmt')
PREAMBLE_PRECOMPILED = False
def precompile_preamble():
"""
This function precompiles the preamble of the template and saves the .fmt file at the synthetic data folder.
"""
precompile_cmd = 'pdflatex -ini -jobname="preamble" "&pdflatex preamble.tex\\dump" > ' + paths.dump
subprocess.run(precompile_cmd, cwd=CODE_DIRECTORY, shell=True, timeout=10)
@ray.remote
def conversion(pid, offset, sequences, directory, file_count):
"""
This function calls all functions required for the full latex to png conversion for a subset of the sequences.
It is meant to be called for a single process. The respective subset depends on the given offset.
:param pid: The identifier of a process i with i=0,1,2,.. .
:param offset: The subset of indices the process will process gets computed by pid * offset - (pid+1) * offset.
:param sequences: The list of sequences that is being processed.
:param directory: The directory path which the png files should be written to.
:param file_count: The amount of files already in the directory. This is required to generate file names.
:return: The return value serves as synchronization point for ray and is of no other use.
"""
start_index = pid * offset
end_index = (pid + 1) * offset
end_index = min(end_index, len(sequences))
latex = template.get_template(sequences[start_index:end_index])
name = str(file_count + start_index)
file = pdflatex(latex, directory, name)
file = pdf2png(directory, file, name)
return True
def convert_to_png(batch, directory) -> None:
"""
This function takes a batch of seqences or a list of batches in form of onehot encodings and converts them to
the .png format. Lists of batches are encouraged to justify the multiprocessing overhead.
:param sequences: An array of size (batch size, sequence length, onehot length) is expected. This function
assumes that the given onehot encodings in the array are valid. A list of batches is also allowed.
:param directory: The directory path where the png files will get saved. The function assumes the directory exists.
"""
global NUM_CPUS, PREAMBLE_PATH
if not PREAMBLE_PRECOMPILED:
precompile_preamble()
shutil.copyfile(PREAMBLE_PATH, directory + '/preamble.fmt')
sequences = batch.tolist()
trees = tree.to_trees(sequences)
latexs = [t.latex() for t in trees]
num_sequences = len(latexs)
cpus_used = min(num_sequences, NUM_CPUS)
offset = math.ceil(num_sequences / cpus_used)
file_count = len(os.listdir(directory))
# copy to shared memory once instead of copying to each cpu
latexs_id = ray.put(latexs)
offset_id = ray.put(offset)
directory_id = ray.put(directory)
file_count_id = ray.put(file_count)
# no need for return value but call get for synchronisation
ray.get([conversion.remote(pid, offset_id, latexs_id, directory_id, file_count_id) for pid in range(cpus_used)])
def clean_up(directory) -> None:
"""
This function will delete anything but .png files in a given directory. It is usefull to remove auxiliary files
that get generated by pdflatex.
:param directory: The path to the directory that should be cleared.
"""
with os.scandir(directory) as iterator:
for entry in iterator:
if entry.is_file() and not entry.name.endswith('.png'):
os.remove(entry)
def pdflatex(latex, directory, name) -> str:
"""
This function generates a .pdf file at the target location. It uses pdflatex to compile the given latex code.
:param expr: A single latex formular string without $$ or an equation environment.
:param directory: The directory path in which the .pdf file should be saved in.
:param name: An unique identifier for the generated file.
:return: Returns the full path of the generated .pdf file.
"""
file = directory + '/' + name + '.tex'
with open(file, 'w') as f:
f.write(latex)
cmd = ['pdflatex',
'-interaction=batchmode',
'-interaction=nonstopmode',
file]
subprocess.run(cmd, cwd=directory, stdout=subprocess.DEVNULL) # errors are critical
return file[:-3] + 'pdf'
def pdf2png(directory, file, name) -> str:
"""
This function generates a .png file at the target location from a given .pdf file. It uses ghostscript
internally.
:param directory: The directory path where the target .png file is located.
:param file: The target .pdf file which should be converted to .png format.
:param name: The unique identifier for the generated file.
:return: Returns the full path to the generated .png file.
"""
cmd = ['gs',
'-dUseCropBox',
'-dSAFER',
'-dBATCH',
'-dNOPAUSE',
'-sDEVICE=pngalpha',
'-r90',
'-sOutputFile=' + name + '%09d.png',
file]
subprocess.run(cmd, cwd=directory, stdout=subprocess.DEVNULL) # errors are critical
return directory + '/' + name + '.png'
```
#### File: jp-richter/formelbaer-rnn/discriminator.py
```python
import torch
from torch import nn
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.pool2x4 = nn.MaxPool2d((2, 4))
self.pool3x3 = nn.MaxPool2d((3, 3))
self.pool2d = nn.AvgPool2d((1, 15))
in_ch = 1
out_ch = 32
self.conv1 = nn.Sequential(nn.Conv2d(in_ch, out_ch, 3))
self.conv2 = nn.Sequential(nn.Conv2d(out_ch, out_ch, 5))
self.conv3 = nn.Sequential(nn.Conv2d(out_ch, out_ch, 3))
self.fc3 = nn.Linear(5 * 32, 64)
self.fc4 = nn.Linear(64, 32)
self.fc5 = nn.Linear(32, 1)
self.selu = nn.SELU()
self.sigmoid = nn.Sigmoid()
self.loss = []
self.acc = []
self.criterion = None
self.optimizer = None
def forward(self, x):
out = self.conv1(x)
out = self.selu(out)
out = self.pool2x4(out)
out = self.conv2(out)
out = self.selu(out)
out = self.pool2x4(out)
out = self.conv3(out)
out = self.selu(out)
out = self.pool3x3(out)
out = out.view(-1, 5 * 32)
out = self.fc3(out)
out = self.selu(out)
out = self.fc4(out)
out = self.selu(out)
out = self.fc5(out)
out = self.sigmoid(out)
out = out.squeeze()
return out
def save(self, file):
torch.save(self.state_dict(), file)
def load(self, file):
self.load_state_dict(torch.load(file))
def reset(self):
def weights_reset(model):
if isinstance(model, nn.Conv2d) or isinstance(model, nn.Linear):
model.reset_parameters()
self.apply(weights_reset)
```
#### File: jp-richter/formelbaer-rnn/template.py
```python
DOCUMENT_START = '''%&preamble
\\begin{{document}}
'''
FORMULAR = '''
\\begin{{page}}
\\begin{{minipage}}[c][1cm]{{50cm}}
\\centering{{
$ {} $
}}
\\end{{minipage}}
\\end{{page}}
'''
DOCUMENT_END = '''\\end{{document}}'''
def get_template(formulars: list):
template = DOCUMENT_START
for _ in range(len(formulars)):
template += FORMULAR
template += DOCUMENT_END
template = template.format(*formulars)
return template
```
#### File: jp-richter/formelbaer-rnn/tokens.py
```python
from dataclasses import dataclass
# its beneficial to have all possible tokens or actions respectively defined
# in a single place with uniform data types and all necessary information
# associated with them. even if storing those for atomic types such as
# simple numbers and letters seems verbose and unnecessary, it might
# avoid hacky solutions later on when choices can have different types or
# token information is defined redundantly in different places and has no
# garantuee of being in a legal cohesive state.
# hard code one hot encoding to ensure consistency independant from order of iteration
# assumption: format string for latex translation requires the token arguments in
# sequential order with sequences taking the subscripted argument first
@dataclass
class TokenInfo:
name: str
arity: int
onehot: int
latex: str
_tokens = {
# unary
0 : TokenInfo('root',1,0,'\\sqrt{{{}}}'),
1 : TokenInfo('fac',1,1,'{}!'),
2 : TokenInfo('max',1,2,'max {}'),
3 : TokenInfo('min',1,3,'min {}'),
4 : TokenInfo('argmax',1,4,'argmax {}'),
5 : TokenInfo('argmin',1,5,'argmin {}'),
6 : TokenInfo('inverse',1,6,'{}^{{-1}}'),
7 : TokenInfo('sin',1,7,'sin {}'),
8 : TokenInfo('cos',1,8,'cos {}'),
9 : TokenInfo('tan',1,9,'tan {}'),
10 : TokenInfo('sinh',1,10,'sinh {}'),
11 : TokenInfo('cosh',1,11,'cosh {}'),
12 : TokenInfo('tanh',1,12,'tanh {}'),
13 : TokenInfo('sigmoid',1,13,'\\sigma({})'),
14 : TokenInfo('transpose',1,14,'{}^T'),
15 : TokenInfo('prime',1,15,'{}\''),
16 : TokenInfo('absolute',1,16,'|{}|'),
17 : TokenInfo('norm',1,17,'||{}||'),
18 : TokenInfo('mathbbe',1,18,'\\mathbb{{E}}[{}]'),
19 : TokenInfo('mathbbp',1,19,'\\mathbb{{P}}[{}]'),
# subscripted
20 : TokenInfo('maxsub',2,20,'max_{{{}}} {}'),
21 : TokenInfo('minsub',2,21,'min_{{{}}} {}'),
22 : TokenInfo('argmaxsub',2,22,'argmax_{{{}}} {}'),
23 : TokenInfo('argminsub',2,23,'argmin_{{{}}} {}'),
24 : TokenInfo('mathbbesub',2,24,'\\mathbb{{E}}_{{{}}}[{}]'),
25 : TokenInfo('mathbbpsub',2,25,'\\mathbb{{P}}_{{{}}}[{}]'),
# binary
26 : TokenInfo('add',2,26,'{} + {}'),
27 : TokenInfo('sub',2,27,'{} - {}'),
28 : TokenInfo('dot',2,28,'{} \\cdot {}'),
29 : TokenInfo('cross',2,29,'{} \\times {}'),
30 : TokenInfo('fract',2,30,'\\frac{{{}}}{{{}}}'),
31 : TokenInfo('mod',2,31,'{} mod {}'),
32 : TokenInfo('power',2,32,'{}^{{{}}}'),
33 : TokenInfo('derive', 2, 1, '\\frac{{\\delta{}}}{{\\delta {}}}'),
# sequences
34 : TokenInfo('sum',3,33,'\\sum\\nolimits_{{{}}}^{{{}}} {}'),
35 : TokenInfo('product',3,34,'\\prod\\nolimits_{{{}}}^{{{}}} {}'),
36 : TokenInfo('integral',3,35,'\\int\\nolimits_{{{}}}^{{{}}} {}'),
# equalities
37 : TokenInfo('equals',2,36,'{} = {}'),
38 : TokenInfo('lesser',2,37,'{} < {}'),
39 : TokenInfo('greater',2,38,'{} > {}'),
40 : TokenInfo('lessereq',2,39,'{} \\leq {}'),
41 : TokenInfo('greatereq',2,40,'{} \\geq {}'),
# sets
42 : TokenInfo('subset',2,41,'{} \\subset {}'),
43 : TokenInfo('subseteq',2,42,'{} \\subseteq {}'),
44 : TokenInfo('union',2,43,'{} \\cup {}'),
45 : TokenInfo('difference',2,44,'{} \\cap {}'),
46 : TokenInfo('elementof',2,45,'{} \\in {}'),
# special
47 : TokenInfo('apply',2,46,'{}({})'),
48 : TokenInfo('brackets',1,47,'({})'),
# atomic
49 : TokenInfo(u'\u0393',0,50,'\\Gamma'),
50 : TokenInfo(u'\u0394',0,51,'\\Delta'),
51 : TokenInfo(u'\u0398',0,55,'\\Theta'),
52 : TokenInfo(u'\u039B',0,58,'\\Lambda'),
53 : TokenInfo(u'\u039E',0,61,'\\Xi'),
54 : TokenInfo(u'\u03A0',0,63,'\\Pi'),
55 : TokenInfo(u'\u03A3',0,65,'\\Sigma'),
56 : TokenInfo(u'\u03A5',0,67,'\\Upsilon'),
57 : TokenInfo(u'\u03A6',0,68,'\\Phi'),
58 : TokenInfo(u'\u03A8',0,70,'\\Psi'),
59 : TokenInfo(u'\u03A9',0,71,'\\Omega'),
60 : TokenInfo(u'\u03B1',0,72,'\\alpha'),
61 : TokenInfo(u'\u03B2',0,73,'\\beta'),
62 : TokenInfo(u'\u03B3',0,74,'\\gamma'),
63 : TokenInfo(u'\u03B4',0,75,'\\delta'),
64 : TokenInfo(u'\u03B5',0,76,'\\epsilon'),
65 : TokenInfo(u'\u03B6',0,77,'\\zeta'),
66 : TokenInfo(u'\u03B7',0,78,'\\eta'),
67 : TokenInfo(u'\u03B8',0,79,'\\theta'),
68 : TokenInfo(u'\u03B9',0,80,'\\iota'),
69 : TokenInfo(u'\u03BA',0,81,'\\kappa'),
70 : TokenInfo(u'\u03BB',0,82,'\\lambda'),
71 : TokenInfo(u'\u03BC',0,83,'\\mu'),
72 : TokenInfo(u'\u03BD',0,84,'\\nu'),
73 : TokenInfo(u'\u03BE',0,85,'\\xi'),
74 : TokenInfo(u'\u03C0',0,87,'\\pi'),
75 : TokenInfo(u'\u03C1',0,88,'\\rho'),
76 : TokenInfo(u'\u03C3',0,89,'\\sigma'),
77 : TokenInfo(u'\u03C4',0,90,'\\tau'),
78 : TokenInfo(u'\u03C5',0,91,'\\upsilon'),
79 : TokenInfo(u'\u03C6',0,92,'\\phi'),
80 : TokenInfo(u'\u03C7',0,93,'\\chi'),
81 : TokenInfo(u'\u03C8',0,94,'\\psi'),
82 : TokenInfo(u'\u03C9',0,95,'\\omega'),
83 : TokenInfo('A',0,96,'A'),
84 : TokenInfo('B',0,97,'B'),
85 : TokenInfo('C',0,98,'C'),
86 : TokenInfo('D',0,99,'D'),
87 : TokenInfo('E',0,100,'E'),
88 : TokenInfo('F',0,101,'F'),
89 : TokenInfo('G',0,102,'G'),
90 : TokenInfo('H',0,103,'H'),
91 : TokenInfo('I',0,104,'I'),
92 : TokenInfo('J',0,105,'J'),
93 : TokenInfo('K',0,106,'K'),
94 : TokenInfo('L',0,107,'L'),
95 : TokenInfo('M',0,108,'M'),
96 : TokenInfo('N',0,109,'N'),
97 : TokenInfo('O',0,110,'O'),
98 : TokenInfo('P',0,111,'P'),
99 : TokenInfo('Q',0,112,'Q'),
100 : TokenInfo('R',0,113,'R'),
101 : TokenInfo('S',0,114,'S'),
102 : TokenInfo('T',0,115,'T'),
103 : TokenInfo('U',0,116,'U'),
104 : TokenInfo('V',0,117,'V'),
105 : TokenInfo('W',0,118,'W'),
106 : TokenInfo('X',0,119,'X'),
107 : TokenInfo('Y',0,120,'Y'),
108 : TokenInfo('Z',0,121,'Z'),
109 : TokenInfo('a',0,122,'a'),
110 : TokenInfo('b',0,123,'b'),
111 : TokenInfo('c',0,124,'c'),
112 : TokenInfo('d',0,125,'d'),
113 : TokenInfo('e',0,126,'e'),
114 : TokenInfo('f',0,127,'f'),
115 : TokenInfo('g',0,128,'g'),
116 : TokenInfo('h',0,129,'h'),
117 : TokenInfo('i',0,130,'i'),
118 : TokenInfo('j',0,131,'j'),
119 : TokenInfo('k',0,132,'k'),
120 : TokenInfo('l',0,133,'l'),
121 : TokenInfo('m',0,134,'m'),
122 : TokenInfo('n',0,135,'n'),
123 : TokenInfo('o',0,136,'o'),
124 : TokenInfo('p',0,137,'p'),
125 : TokenInfo('q',0,138,'q'),
126 : TokenInfo('r',0,139,'r'),
127 : TokenInfo('s',0,140,'s'),
128 : TokenInfo('t',0,141,'t'),
129 : TokenInfo('u',0,142,'u'),
130 : TokenInfo('v',0,143,'v'),
131 : TokenInfo('w',0,144,'w'),
132 : TokenInfo('x',0,145,'x'),
133 : TokenInfo('y',0,146,'y'),
134 : TokenInfo('z',0,147,'z'),
135 : TokenInfo('1',0,148,'1'),
136 : TokenInfo('2',0,149,'2'),
137 : TokenInfo('3',0,150,'3'),
138 : TokenInfo('4',0,151,'4'),
139 : TokenInfo('5',0,152,'5'),
140 : TokenInfo('6',0,153,'6'),
141 : TokenInfo('7',0,154,'7'),
142 : TokenInfo('8',0,155,'8'),
143 : TokenInfo('9',0,156,'9'),
144 : TokenInfo('0',0,157,'0'),
# added later
145 : TokenInfo('infty', 0, 0, '\\infty'),
146 : TokenInfo('propto', 2, 0, '{} \\propto {}'),
147 : TokenInfo('negate', 1, 0, '-{}')
}
def get(id): return _tokens[id]
def count(): return len(_tokens)
def possibilities(): return list(_tokens.keys())
def empty(): return [0] * len(_tokens)
def id(onehot):
for i in range(len(onehot)):
if onehot[i] == 1:
return i
raise ValueError('Got encoding of empty start token, but start token has no ID.')
def onehot(id):
template = [0] * len(_tokens)
template[id] = 1
return template
assert len(_tokens) == 148
assert not [i for i in _tokens.keys() if i < 0 or i > 148]
assert not [(i,(k,t)) for (i,(k,t)) in enumerate(_tokens.items()) if not i == k]
for i,t in _tokens.items():
t.onehot = onehot(i)
``` |
{
"source": "jp-richter/pg637",
"score": 2
} |
#### File: src/examples/labyrinth_reinforce.py
```python
import torch
import numpy
import environment as env
from environment import get_valid_directions, move, prettyprint
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.input_dim = 4 # onehot of possible paths
self.output_dim = 4 # action probs
self.hidden_dim = 32
self.layers = 2
self.temperature = 1.2
self.gru = nn.GRU(self.input_dim, self.hidden_dim, self.layers, batch_first=True)
self.lin = nn.Linear(self.hidden_dim, self.output_dim)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, x, h=None):
if h is not None:
out, h = self.gru(x, h)
else:
out, h = self.gru(x)
out = out[:, -1]
out = self.lin(out)
out = self.relu(out)
out = self.softmax(out / self.temperature)
return out, h
def save(self, file):
torch.save(self.state_dict(), file)
def load(self, file, device):
self.load_state_dict(torch.load(file, map_location=torch.device(device)))
def set_parameters_to(self, policy):
self.load_state_dict(policy.state_dict())
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
batch_size = 16
discount = 0.8
learnrate = 0.02
epochs = 100
simulations = 10
max_steps = 50
with_baseline = True
# An dieser Stelle nicht genannt und etwas mit dem man rumspielen kann ist ein temperature Parameter im GRU Netz. Der smoothed ein
# wenig die Policy und soll verhindern, dass die Funktion nicht auf ungewünschte Modalwerte kollabiert. Das kann gerade am Anfang schnell
# passieren.
# Außerdem wird im environment ein negativer Reward von 0.5 für das gegen die Wand laufen gegeben.
# Der Prozess hängt extrem stark vom Zufall ab! Es kann durchaus Runs geben, bei denen mit den gegebenen Epochen und Parametern kein
# nennenswerter Erfolg erzielt wird. Es sollte aber abgesehen von Ausreißern recht zuverlässig funktionieren. Man muss das zum Testen
# auch nicht komplett durchlaufen lassen.
direction_indices = {
'left': 0,
'right': 1,
'up': 2,
'down': 3
}
direction_strings = {
v: k for (k, v) in direction_indices.items()
}
def to_onehot(directions):
state = torch.zeros((4,), device=device)
for direction in directions:
state[direction_indices[direction]] = 1
return state
"""
Gibt eine Matrix der Größe (batchsize, 1, 4) zurück, wobei jedes Element in Dimension 0 ein one hot kodierter Zustand ist. Die 1 in
der Mitte kann man ignorieren. Das ist die Eingabe in das Netz.
"""
def to_batch(directions):
batch = torch.zeros((batch_size, 1, 4), device=device)
for i, dirs in enumerate(directions):
batch[i] = to_onehot(dirs)
return batch
cache = []
"""
Ein Policy-Gradient Update.
param probs: Ein Tensor [trajectory_length, batch_size] von Logprobabilities der ausgeführten Aktionen
param rewards: Ein Tensor [trajectory_length, batch_size] von Belohnungen, die für die jeweils ausgeführten Aktionen erhalten wurden.
"""
def policy_gradient(optimizer, probs, rewards):
total = torch.zeros((batch_size,), device=device)
optimizer.zero_grad()
if with_baseline:
baseline = 0
if len(cache) > 10:
history = torch.stack(cache, dim=0)
baseline = torch.mean(history)
#DEBUG
print('BASELINE ', baseline.item())
#/DEBUG
cache.append(torch.stack(rewards, dim=0))
if len(cache) > 20:
cache.pop(0)
for step, (prob, reward) in enumerate(zip(probs, rewards)): # Jeweils ein Schritt für alle Trajektorien im Batch
if with_baseline:
reward = reward - baseline
total = total + discount**step * reward * prob
total = torch.sum(total) / batch_size
loss = -total
loss.backward()
optimizer.step()
#DEBUG
print('LOSS ', loss.item())
#/DEBUG
"""
Gegeben ein Batch von Positionen und einer Policy werden Aktionen ausgewählt und im Environment ausgeführt.
param policy: Das Policy Netzwerk
param positions: Ein Batch (als Liste) von Feld-IDs. (Nummer des Feldes, wo sich der Agent befindet)
param hidden: Der hidden state des Policy-RNNs
"""
def step(policy, positions, hidden=None):
directions = [get_valid_directions(p) for p in positions]
batch = to_batch(directions)
if hidden is not None:
policies, hidden = policy(batch, hidden)
else:
policies, hidden = policy(batch)
# Sample Aktionen (Indizes) aus der aktuellen Policy
distributions = torch.distributions.Categorical(policies)
actions = distributions.sample()
probs = distributions.log_prob(actions)
# Transformation der Aktionen in Strings (left, up ...)
actions = [direction_strings[index.item()] for index in actions]
rewards = torch.zeros((batch_size,), device=device)
next_positions = []
# Ausführen der Aktionen und Feedback speichern
for i, (action, position) in enumerate(zip(actions, positions)):
next_position, reward = move(action, position)
rewards[i] = reward
next_positions.append(next_position)
return next_positions, probs, rewards, hidden
"""
Eine Monte-Carlo Simulation, die den Wert eines Zustandes approximieren soll.
param policy: Die Policy der während der Simulation gefolgt werden soll.
param hidden: Der hidden state des Policy Netzes.
param positions: Ein Batch von Positionen (Feld_IDs), für die wir den Wert approximieren wollen.
param simulations: Anzahl der Simulationen, die wir machen. Am Ende wird über alle Simulationen gemittelt.
param steps: Anzahl der Schritte, die wir pro Simulation machen
param current_reward: Die Belohnung für den Schritt der uns in die aktuelle Position gebracht hat.
"""
def montecarlo(policy, hidden, positions, simulations, steps, current_reward):
with torch.no_grad():
rewards = torch.zeros((simulations, batch_size), device=device)
for s in range(simulations):
simulated_rewards = torch.zeros((0, batch_size), device=device)
for i in range(steps): # steps
positions, _, reward, hidden = step(policy, positions, hidden)
simulated_rewards = torch.cat((simulated_rewards, reward[None, :]), dim=0)
rewards[s] = torch.sum(simulated_rewards, dim=0) + current_reward
rewards = torch.mean(rewards, dim=0)
return rewards
"""
Diese Methode geht nun einen Schritt weiter: Die Umgebung gibt uns nicht mehr nach jeder Aktion einen
Reward, wie in der basic.py:train_with_policy_gradient() Funktion. Stattdessen müssen wir diesen über
Simulationen ermitteln.
Außerdem weiß der Agent nun nicht mehr, auf welchem Feld er sich befindet. In den Methoden zuvor haben
wir für die Zustandskodierung einen Onehot Vektor der Länge 36 für 36 Felder verwendet. Nun geben wir
dem Netz nur noch einen Onehot Vektor der Länge 4, für den gilt, dass Index i = 1, gdw. i frei und
i = 0, wenn sich in Richtung i eine Mauer befindet.
Wir verwenden deshalb statt eines einfachen Feed-Forward Netzes ein rekurrentes Netz, mit der Idee,
dass die Policy gegeben einen Zustand von der bisherigen Trajektorie abhängt (sonst ließen sich zwei
Felder mit identischen "Ausgängen" ja auch nicht unterscheiden).
Die Funktionen unterscheiden sich im Wesentlichen nicht: Dazugekommen ist der Aufruf der montecarlo()
Funktion, statt ein Abruf des Feld-Values.
"""
def train():
policy = Policy().to(device)
rollout = Policy().to(device)
optimizer = torch.optim.Adam(policy.parameters(), lr=learnrate)
for epoch in range(epochs):
rollout.set_parameters_to(policy) # Kopie des Netzes für die MC-Simulation
policy.train()
rollout.eval() # Sehr wichtig in PyTorch, wenn ihr ein Netz nutzt, dass man nicht trainieren will!
position = [env.entry_id for _ in range(batch_size)] # Die Startpositionen für einen Batch
hidden = None
probs = []
rewards = []
#DEBUG
render_positions = {i: [] for i in range(36)}
#/DEBUG
for current_step in range(max_steps):
position, prob, reward, hidden = step(policy, position, hidden)
#missing_steps = max_steps - (current_step + 1)
simulation = montecarlo(rollout, hidden, position, simulations, 20, reward)
#DEBUG
for sample in range(batch_size):
pos = position[sample]
val = simulation[sample].item()
render_positions[pos].append(val)
#/DEBUG
rewards.append(simulation)
probs.append(prob)
policy_gradient(optimizer, probs, rewards)
#DEBUG
prettyprint([len(item) for item in render_positions.values()])
prettyprint([numpy.mean(item) for item in render_positions.values()])
print('SUCESS CASES ', position.count(env.exit_id), ' of ', batch_size)
print('=========== FINISHED RUN ', epoch, ' ===========\n')
#/DEBUG
if __name__ == '__main__':
train()
```
#### File: src/streamlit/dashboard.py
```python
import streamlit
import altair
import numpy
import json
import pandas
import os
import statistics
import numbers
import math
import timeit
LAYOUT = 'centered' # options are wide and centered, has effect on plot sitze
PATH = './' # this is the path to the folder containing the experiments
FILL_BROWSER_WIDTH = False # iff true, the plots will expand to the full length of your browser window
NO_EPISODE_BUCKETS = 10
NO_VALUES_PER_VARIABLE = 1000 # compression factor, number of values per variable per plot
QUANTILE_UPPER = 0.95
QUANTILE_LOWER = 0.5
ADAPTIVE_Y_DOMAIN = True
# Run this script with "streamlit run dashboard.py" from the directory the script is located in. Change the parameters
# above. Depending on the plot type the tuples in the list entry of 'Values' can only be of certain length. If
# framestamps is set to 'True', one additional dimension is allowed. If you dont actually want to plot your log you
# should use plot type Empty.
# This script constructs a dict with all necessary data from the logs and runs it through a processing pipeline with
# steps for sanitizing and smoothing the dict keys and time series. The resulting dictionary can be seen below. Keys
# are always present, regardless if the entry contains data for consistency reasons.
#
# {
# KEY_METHOD_NAME: str,
# KEY_SHORT_DESCR: str,
# KEY_LONG_DESSCR: str,
# KEY_RUNTIME: str,
# KEY_NOTES: str,
# KEY_HYPERPARAMETERS: dict,
#
# KEY_LOGS_RAW: {
# KEY_FRAMESTAMPS: bool,
# KEY_VALUES: list,
# KEY_FRAMESTAMP_VALUES: list,
# KEY_X_AXIS: str,
# KEY_Y_AXIS: str,
# KEY_LENGTH: int,
# KEY_COMPRESSION: int
# }
#
# KEY_LOGS_PROCESSED: {
# KEY_FRAMESTAMPS: bool,
# KEY_VALUES: list,
# KEY_X_AXIS: str,
# KEY_Y_AXIS: str,
# KEY_LENGTH: int
# KEY_COMPRESSION: int,
# KEY_QUANTILE_UPPER: list,
# KEY_QUANTILE_LOWER: list
# }
# }
# dont change anything below
# info keys
KEY_METHOD_NAME = 'MethodName'
KEY_SHORT_DESCR = 'ShortDescription'
KEY_LONG_DESSCR = 'LongDescription'
KEY_NOTES = 'Notes'
KEY_RUNTIME = 'Runtime'
KEY_HYPERPARAMETERS = 'Hyperparameter'
KEY_LOGS_RAW = 'Logs Raw'
KEY_LOGS_PROCESSED = 'Logs Preprocessed'
# log keys
KEY_VALUES = 'Values'
KEY_FRAMESTAMPS = 'Framestamps'
KEY_FRAMESTAMP_VALUES = 'Framestamp Values'
KEY_PLOTTYPE = 'Plot Type '
KEY_LENGTH = 'Length'
KEY_COMPRESSION = 'Compression'
KEY_QUANTILE_UPPER = 'Upper Quantile Values'
KEY_QUANTILE_LOWER = 'Lower Quantile Values'
KEY_X_AXIS = 'X Axis Name'
KEY_Y_AXIS = 'Y Axis Name'
KEY_DIMENSIONS = 'Dim'
KEY_UNIQUE_FRAMES = 'Unique Frames'
HELP_MESSAGE = '''
* Logs with to many data points will be compressed to 1000 values per variable. Compression is done by taking the
mean for line plots, class modus of fixed 100 classes for histograms. The upper and lower line in line plots mark
the quantiles for p=5 and p=95 of raw values (if I ever add this feature).
* Please be aware, that the compression of histograms DISTORTS THE DATA A LOT! I am still working on a way, to
prevent this. If you have any good idea, feel free to make suggestions.
* You can still download high resolution images with the button next to the plots. You can also download the
smoothed plots directly at the three dots at each plot.
* To save plots you need to install some stuff, see https://github.com/altair-viz/altair_saver/ for .svg
files. The plots will be saved to the current directory. Until I refactored it to use a new thread it will
block streamlit for the runtime though.
* You can change the width of the plots with FILL_BROWSER_WIDTH in the script. This has an effect on the
plot size. For presentable plots consider FILL_BROWSER_WIDTH = False. You might have to restart! You can
also chose the LAYOUT as 'wide' or 'centered'.
* Note that you can always view the plots fullscreen with the arrow next to them. This is the size of your
browser window. This way you have complete control over plot sizes.
* Consider chosing plot type EMPTY for unneeded plots, since it speeds up the loading times.
* If you get any errors when the folder contains a preprocessed log from older versions try deleting the
preprocessed log, since this script won't trigger the preprocessing step if this file is present.
'''
def main():
streamlit.set_page_config(layout=LAYOUT)
experiment_folders = [os.path.basename(f.path) for f in os.scandir(PATH) if f.is_dir()]
experiment_chosen = streamlit.sidebar.selectbox('Choose an experiment!', experiment_folders)
with streamlit.sidebar.expander('Click here for some info and tipps!'):
streamlit.markdown(HELP_MESSAGE)
streamlit.title(experiment_chosen)
data = load(experiment_chosen) # see at the top of the script for doc
visualize(data)
@streamlit.cache
def load(folder):
data, is_preprocessed = preprocess_load(folder)
if not is_preprocessed:
print(f'PRE-PROCESSING {folder}..')
preprocess_check_validity(data)
preprocess_sanitize_keys(data)
preprocess_translate_logs(data)
preprocess_extract_framestamps(data)
preprocess_remove_framestamp_outlier(data)
preprocess_smooth_logs(data)
preprocess_save(data, folder)
return data
def preprocess_load(folder):
if os.path.exists(os.path.join(folder, 'Preprocessed.json')):
print(f'FOUND PRE-PROCESSED LOG FILE FOR {folder}, SKIPPING PRE-PROCESSING STEP')
with open(os.path.join(folder, 'Preprocessed.json'), 'r') as file:
data = json.load(file)
return data, True
if not os.path.exists(os.path.join(folder, 'Info.json')) or not os.path.exists(
os.path.join(folder, 'Logs.json')):
print(f'Error: Folder {folder} does not contain Info.json or Logs.json and will be omitted.')
with open(os.path.join(folder, 'Info.json'), 'r') as file:
info = json.load(file)
with open(os.path.join(folder, 'Logs.json'), 'r') as file:
logs = json.load(file)
info[KEY_LOGS_RAW] = logs
return info, False
def preprocess_check_validity(data):
to_delete = []
break_conditions = [
break_on_empty_log,
break_on_non_tuple_type,
break_on_non_number_input,
break_on_wrong_dimensions
]
for name, log in data[KEY_LOGS_RAW].items():
for condition in break_conditions:
if condition(name, log):
to_delete.append(name)
break
for key in to_delete:
del data[KEY_LOGS_RAW][key]
def break_on_empty_log(name, log):
if len(log[KEY_VALUES]) == 0:
print(f'Warning: Found empty log {name}.')
return True
return False
def break_on_non_tuple_type(name, log):
if not type(log[KEY_VALUES][0]) == list:
# print(f'Warning: Non-tuple type in value log of {name} in {folder}/Logs.json. The entries will be '
# f'interpreted as 1-dimensional tuples.')
try:
for i in range(len(log[KEY_VALUES])):
log[KEY_VALUES][i] = [log[KEY_VALUES][i]]
except Exception as e:
print(f'Error: Interpreting entries as 1-dimensional tuples failed, the log will be omitted. '
f'Message: {e}')
return True
return False
def break_on_non_number_input(name, log):
if not isinstance(log[KEY_VALUES][0][0], numbers.Number):
print(f'Warning: Non-number type in value log of {name}, found type '
f'{type(log[KEY_VALUES][0][0])} instead. Log will be omitted.')
return True
return False
allowed_dimensions = {
'line': 1,
'histogram': 1,
'histogram2d': 2,
'scatter': 2,
'tube': 2,
'Empty': 999999999
}
def break_on_wrong_dimensions(name, log):
dimension_allowed = allowed_dimensions[log[KEY_PLOTTYPE]]
actual_dimension = len(log[KEY_VALUES][0])
if log[KEY_FRAMESTAMPS]:
dimension_allowed += 1
if actual_dimension != dimension_allowed:
print(f'Warning: The variable {name} has dimensions {actual_dimension} and plot '
f'type {log[KEY_PLOTTYPE]} with Framestamps={log[KEY_FRAMESTAMPS]}, which allows only entries '
f'with dimension {dimension_allowed}. The log for {name} will not be visualized.')
if actual_dimension != dimension_allowed or log[KEY_PLOTTYPE] == 'Empty':
return True
return False
def preprocess_sanitize_keys(data):
required_info_keys = [
KEY_METHOD_NAME,
KEY_SHORT_DESCR,
KEY_LONG_DESSCR,
KEY_RUNTIME,
KEY_NOTES,
KEY_HYPERPARAMETERS
]
for key in required_info_keys:
if key not in data.keys():
data[key] = ''
data[KEY_LOGS_PROCESSED] = dict()
for log in data[KEY_LOGS_RAW].values():
log[KEY_LENGTH] = len(log[KEY_VALUES])
log[KEY_DIMENSIONS] = len(log[KEY_VALUES][0])
log[KEY_FRAMESTAMP_VALUES] = []
log[KEY_COMPRESSION] = 1
if log[KEY_FRAMESTAMPS]:
log[KEY_DIMENSIONS] -= 1
if 'Names' in log.keys():
log[KEY_X_AXIS] = log['Names'][0]
if len(log['Names']) > 1:
log[KEY_Y_AXIS] = log['Names'][1]
if KEY_X_AXIS not in log.keys():
log[KEY_X_AXIS] = 'x'
if KEY_Y_AXIS not in log.keys():
log[KEY_Y_AXIS] = 'y'
def preprocess_translate_logs(data):
for log in data[KEY_LOGS_RAW].values():
log[KEY_VALUES] = list(zip(*log[KEY_VALUES]))
def preprocess_extract_framestamps(data):
for log in data[KEY_LOGS_RAW].values():
if log[KEY_FRAMESTAMPS]:
log[KEY_FRAMESTAMP_VALUES] = log[KEY_VALUES][0]
log[KEY_VALUES] = log[KEY_VALUES][1:]
def preprocess_remove_framestamp_outlier(data):
for name, log in data[KEY_LOGS_RAW].items():
if not log[KEY_FRAMESTAMPS]:
continue
unique_frames = list(set(log[KEY_FRAMESTAMP_VALUES]))
unique_frame_count = [0 for _ in unique_frames]
for frame in log[KEY_FRAMESTAMP_VALUES]:
unique_frame_count[unique_frames.index(frame)] += 1
outlier = []
for count, unique_frame in zip(unique_frames, unique_frame_count):
if count < max(unique_frame_count):
outlier.append(unique_frame)
to_remove = []
for i in range(len(log[KEY_VALUES])):
if log[KEY_FRAMESTAMP_VALUES] in outlier:
to_remove.append(i)
if to_remove:
print(f'Found frame outliers in {name}: {to_remove}')
for index in to_remove:
del log[KEY_VALUES][index]
del log[KEY_FRAMESTAMP_VALUES][index]
def preprocess_smooth_logs(data):
for name, log in data[KEY_LOGS_RAW].items():
if log[KEY_LENGTH] < NO_VALUES_PER_VARIABLE:
data[KEY_LOGS_PROCESSED][name] = log
continue
sliding_window = log[KEY_LENGTH] // NO_VALUES_PER_VARIABLE
copy = {
KEY_VALUES: [[] for _ in range(len(log[KEY_VALUES]))],
KEY_QUANTILE_UPPER: [[] for _ in range(len(log[KEY_VALUES]))],
KEY_QUANTILE_LOWER: [[] for _ in range(len(log[KEY_VALUES]))],
KEY_FRAMESTAMPS: log[KEY_FRAMESTAMPS],
KEY_FRAMESTAMP_VALUES: list(log[KEY_FRAMESTAMP_VALUES]),
KEY_PLOTTYPE: log[KEY_PLOTTYPE],
KEY_X_AXIS: log[KEY_X_AXIS],
KEY_Y_AXIS: log[KEY_Y_AXIS],
KEY_COMPRESSION: sliding_window
}
if log[KEY_FRAMESTAMPS]:
unique_frames = set(log[KEY_FRAMESTAMP_VALUES])
copy[KEY_UNIQUE_FRAMES] = list(unique_frames)
splitter = len(unique_frames)
else:
splitter = 1 # equals no split
for v, variable in enumerate(log[KEY_VALUES]):
for i in range(NO_VALUES_PER_VARIABLE):
index = i * sliding_window
window_for_frame = variable[index:][::splitter]
window_for_frame = window_for_frame[:min(sliding_window, len(window_for_frame))]
mean = statistics.mean(window_for_frame)
copy[KEY_VALUES][v].append(mean)
if log[KEY_FRAMESTAMPS]:
copy[KEY_FRAMESTAMP_VALUES].append(log[KEY_FRAMESTAMP_VALUES][i])
upper, lower = numpy.quantile(
variable[index:index + sliding_window],
[QUANTILE_UPPER, QUANTILE_LOWER])
copy[KEY_QUANTILE_UPPER][v].append(upper)
copy[KEY_QUANTILE_LOWER][v].append(lower)
copy[KEY_LENGTH] = len(copy[KEY_VALUES][0])
data[KEY_LOGS_PROCESSED][name] = copy
def preprocess_save(data, folder):
with open(os.path.join(folder, 'Preprocessed.json'), 'w') as file:
json.dump(data, file, indent=4)
def visualize(data):
streamlit.markdown('''## Runtime: {}'''.format(data[KEY_RUNTIME]))
with streamlit.expander('Description'):
streamlit.write(data[KEY_LONG_DESSCR])
with streamlit.expander('Notes'):
streamlit.write(data[KEY_NOTES])
with streamlit.expander('Hyperparameters'):
streamlit.write(data[KEY_HYPERPARAMETERS])
for idx, (name, log) in enumerate(data[KEY_LOGS_PROCESSED].items()):
streamlit.markdown('''## {}'''.format(name))
slider_episodes = False
slider_frames = False
c1, c2, c3, c4 = streamlit.columns(4)
if c1.button(f'Download High Resolution ID{idx}'):
download_high_res(name, data[KEY_LOGS_RAW][name])
if c2.checkbox(f'Episode Slider ID{idx}'): # if plot type in ['histogram', 'histogram2d']
slider_episodes = True
if c3.checkbox(f'Frame Slider ID{idx}'):
slider_frames = True
slider_episodes = False
c4.markdown('''Compression Factor: x{}'''.format(log[KEY_COMPRESSION]))
figure = compute_figure(name, log, slider_episodes, slider_frames)
if figure:
streamlit.altair_chart(figure, use_container_width=FILL_BROWSER_WIDTH)
else:
streamlit.write('No data for this partition, how can this happen?')
def compute_figure(name, log, slider_episodes, slider_frames):
functions = {
'line': line,
'histogram': histogram,
'histogram2d': histogram2d,
'scatter': scatter,
'tube': tube
}
fn = functions[log[KEY_PLOTTYPE]] # see json logger for key
if slider_episodes:
buckets_size = max(log[KEY_LENGTH] // NO_EPISODE_BUCKETS, 1)
bucket_chosen = streamlit.slider(f'{name}: Choose one of {NO_EPISODE_BUCKETS}', 0, NO_EPISODE_BUCKETS - 1)
else:
buckets_size = log[KEY_LENGTH]
bucket_chosen = 0
partitioning = partition(log[KEY_VALUES], log[KEY_LENGTH], buckets_size)
if not [*partitioning[bucket_chosen]]:
streamlit.write('This bucket seems to be empty..')
return None
if slider_frames:
if slider_episodes:
streamlit.write('Please disable episode slider!')
return None
if not log[KEY_FRAMESTAMPS]:
streamlit.write('No Framestamps found for this log..')
return None
log[KEY_UNIQUE_FRAMES].sort()
frame_chosen = streamlit.selectbox(f'{name}: Choose a frame', log[KEY_UNIQUE_FRAMES])
result = []
for i in range(len(partitioning[bucket_chosen][0])):
if log[KEY_FRAMESTAMP_VALUES][i] == frame_chosen:
result.append(partitioning[bucket_chosen][0][i])
partitioning[bucket_chosen][0] = result
# TODO test this
return fn(*partitioning[bucket_chosen], x_name=log[KEY_X_AXIS], y_name=log[KEY_Y_AXIS])
@streamlit.cache
def partition(variables, no_values_per_variable, sizeof_buckets):
if no_values_per_variable == sizeof_buckets:
return [variables]
partitioning = []
for i in range(no_values_per_variable):
if i % sizeof_buckets == 0:
partitioning.append([[] for _ in range(len(variables))])
for j in range(len(variables)):
partitioning[-1][j].append(variables[j][i])
return partitioning
def download_high_res(name, raw_log):
figure = compute_figure(name, raw_log, False, False)
figure.save(f'{name}.svg', scale_factor=1.0)
@streamlit.cache
def build_line_dataframe(y, x_name, y_name):
return pandas.DataFrame({
x_name: numpy.linspace(0, len(y), len(y)),
y_name: numpy.array(y)
})
def line(y, x_name='x', y_name='y'):
frame = build_line_dataframe(y, x_name, y_name)
if ADAPTIVE_Y_DOMAIN:
return altair.Chart(frame).mark_line().encode(
x=x_name, y=altair.Y(y_name, scale=altair.Scale(zero=False)))
return altair.Chart(frame).mark_line().encode(x=x_name, y=y_name)
@streamlit.cache
def build_histogram_dataframe(x, name):
return pandas.DataFrame({
name: numpy.array(x),
})
def histogram(x, x_name='x', y_name='y'):
frame = build_histogram_dataframe(x, x_name)
return altair.Chart(frame).mark_bar().encode(x=altair.X(x_name + '', bin=True), y='count()')
@streamlit.cache
def build_histogram2d_dataframe(x, y, x_name, y_name):
return pandas.DataFrame({
x_name: numpy.array(x),
y_name: numpy.array(y)
})
def histogram2d(x, y, x_name='x', y_name='y'):
frame = build_histogram2d_dataframe(x, y, x_name, y_name)
# plot = altair.Chart(frame).mark_circle().encode(
# altair.X(x_name, bin=True),
# altair.Y(y_name, bin=True),
# size='count()'
# ).interactive()
plot = altair.Chart(frame).mark_rect().encode(
altair.X(x_name, bin=altair.Bin(maxbins=60)),
altair.Y(y_name, bin=altair.Bin(maxbins=40)),
altair.Color('count()', scale=altair.Scale(scheme='greenblue'))
)
return plot
@streamlit.cache
def build_scatter_dataframe(x, y, x_name, y_name):
return pandas.DataFrame({
x_name: numpy.array(x),
y_name: numpy.array(y)
})
def scatter(x, y, x_name='x', y_name='y'):
frame = build_scatter_dataframe(x, y, x_name, y_name)
plot = altair.Chart(frame).mark_circle(size=60).encode(
x=x_name,
y=y_name,
color='Group',
tooltip=['Name', 'Group', x_name, y_name]
).interactive()
return plot
@streamlit.cache
def build_tube_dataframe(x, y, x_name, y_name):
x_array = numpy.array(x)
tube_array = numpy.array(y)
return pandas.DataFrame({
x_name: numpy.linspace(0, len(x), len(x)),
y_name: x_array,
'lower': x_array - tube_array,
'upper': x_array + tube_array
})
def tube(x, y, x_name='x', y_name='y'):
frame = build_tube_dataframe(x, y, x_name, y_name)
line = altair.Chart(frame).mark_line().encode(
x=x_name,
y=y_name
)
band = altair.Chart(frame).mark_area(opacity=0.5).encode(
x=x_name,
y='lower',
y2='upper'
)
return band + line
main()
```
#### File: src/tutorial/rendering.py
```python
import pygame
import pygame.draw
import pygame.freetype
import pygame.font
from src.tutorial import environment
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
cell_size = 96, 96
wall_size = 5
map_size = environment.field_length * 96, environment.field_length * 96
pygame.init()
pygame.display.set_caption('Labyrinth PG637')
screen = pygame.display.set_mode(map_size)
surface = pygame.Surface(map_size)
# font = pygame.freetype.Font(pygame.font.get_default_font(), 16)
font = pygame.font.SysFont("Trebuchet MS", 16)
wall_offsets = {
'left': (0, 0, wall_size, cell_size[1]),
'right': (cell_size[0] - wall_size, 0, wall_size, cell_size[1]),
'up': (0, 0, cell_size[0], wall_size),
'down': (0, cell_size[1] - wall_size, cell_size[0], wall_size)
}
def render(position_id, value_map=None):
global screen
background = surface.copy()
background.fill(WHITE)
for pid in environment.position_ids:
x = (pid % environment.field_length) * cell_size[0]
y = (pid // environment.field_length) * cell_size[1]
wall_directions = [d for d in ['left', 'right', 'up', 'down'] if d not in environment.get_valid_directions(pid)]
for direction in wall_directions:
wall_offset = wall_offsets[direction]
wall_rect = pygame.Rect(x + wall_offset[0], y + wall_offset[1], wall_offset[2], wall_offset[3])
pygame.draw.rect(background, BLACK, wall_rect, 0)
if pid == position_id:
pygame.draw.circle(background, BLUE, (x + (cell_size[0] // 2), y + (cell_size[1] // 2)), 20)
if pid == environment.trap_id:
pygame.draw.circle(background, RED, (x + (cell_size[0] // 2), y + (cell_size[1] // 2)), 20)
if pid == environment.exit_id:
pygame.draw.circle(background, GREEN, (x + (cell_size[0] // 2), y + (cell_size[1] // 2)), 20)
# if value_map is not None and pid in value_map.keys():
# text = font.render(str(value_map[pid]), False, (0, 0, 0))
# background.blit(text, (x,y))
screen.blit(background, (0, 0))
pygame.display.flip()
def shutdown():
pygame.quit()
# BEISPIEL:
clock = pygame.time.Clock()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
render(environment.entry_id)
clock.tick(2)
``` |
{
"source": "JPRichtmann/JPRichtmann.github.io",
"score": 3
} |
#### File: back-end/flask/main.py
```python
from flask import Flask, jsonify
from flask_cors import CORS, cross_origin
from scraper import main_scraper
import json
import random
import atexit
from apscheduler.schedulers.background import BackgroundScheduler
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route("/country")
@cross_origin()
def get_emission_by_country():
with open('final/result.json', encoding="UTF-8") as json_file:
data = json.load(json_file)
return jsonify(data)
@app.route('/country-api')
@cross_origin()
def get_country_api():
with open('mid/result.json', encoding="UTF-8") as json_file:
data = json.load(json_file)
return jsonify(data)
@app.route('/')
@cross_origin()
def hello():
return "<h1 style='color:blue'>Hello There!</h1>"
def scraper_interval():
print("scraper run")
try:
main_scraper()
except Exception as e:
print(e)
sched = BackgroundScheduler(daemon=True)
sched.add_job(scraper_interval, 'interval', weeks=4)
sched.start()
atexit.register(lambda: sched.shutdown())
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
``` |
{
"source": "Jprillaman/pydatastructs",
"score": 3
} |
#### File: pydatastructs/miscellaneous_data_structures/queue.py
```python
from pydatastructs.linear_data_structures import DynamicOneDimensionalArray
from pydatastructs.utils.misc_util import NoneType
from copy import deepcopy as dc
__all__ = [
'Queue'
]
class Queue(object):
"""Representation of queue data structure.
Parameters
==========
implementation : str
Implementation to be used for queue.
By default, 'array'
Currently only supports 'array'
implementation.
items : list/tuple
Optional, by default, None
The inital items in the queue.
For array implementation.
dtype : A valid python type
Optional, by default NoneType if item
is None, otherwise takes the data
type of DynamicOneDimensionalArray
For array implementation.
Examples
========
>>> from pydatastructs import Queue
>>> q = Queue()
>>> q.append(1)
>>> q.append(2)
>>> q.append(3)
>>> q.popleft()
1
>>> len(q)
2
References
==========
.. [1] https://en.wikipedia.org/wiki/Queue_(abstract_data_type)
"""
def __new__(cls, implementation='array', **kwargs):
if implementation == 'array':
return ArrayQueue(
kwargs.get('items', None),
kwargs.get('dtype', int))
raise NotImplementedError(
"%s hasn't been implemented yet."%(implementation))
def append(self, *args, **kwargs):
raise NotImplementedError(
"This is an abstract method.")
def popleft(self, *args, **kwargs):
raise NotImplementedError(
"This is an abstract method.")
@property
def is_empty(self):
return None
class ArrayQueue(Queue):
__slots__ = ['front']
def __new__(cls, items=None, dtype=NoneType):
if items is None:
items = DynamicOneDimensionalArray(dtype, 0)
else:
dtype = type(items[0])
items = DynamicOneDimensionalArray(dtype, items)
obj = object.__new__(cls)
obj.items, obj.front = items, -1
if items.size == 0:
obj.front = -1
else:
obj.front = 0
return obj
def append(self, x):
if self.is_empty:
self.front = 0
self.items._dtype = type(x)
self.items.append(x)
def popleft(self):
if self.is_empty:
raise ValueError("Queue is empty.")
return_value = dc(self.items[self.front])
front_temp = self.front
if self.front == self.rear:
self.front = -1
else:
if (self.items._num - 1)/self.items._size < \
self.items._load_factor:
self.front = 0
else:
self.front += 1
self.items.delete(front_temp)
return return_value
@property
def rear(self):
return self.items._last_pos_filled
@property
def is_empty(self):
return self.__len__() == 0
def __len__(self):
return self.items._num
def __str__(self):
_data = []
for i in range(self.front, self.rear + 1):
_data.append(self.items._data[i])
return str(_data)
``` |
{
"source": "jprinc16/congressional-record",
"score": 2
} |
#### File: congressionalrecord/fdsys/cr_parser.py
```python
import re
import datetime
import os
import sys
import urllib2
from cStringIO import StringIO
from xml.sax.saxutils import escape
import lxml.etree
from .errors import *
from congressionalrecord.lib.xml_annotator import XMLAnnotator
from congressionalrecord.lib.logging import initialize_logfile, get_stack_trace
MONTHS = [datetime.date(2010, x, 1).strftime('%B') for x in range(1, 13)]
class CRParser(object):
''' Parser functionality and regular expressions common to all
congressional record documents'''
# Issue #15 - Ms./Mrs. <NAME> is not being caught as a speaker.
# https://github.com/jprinc16/congressional-record/issues/15
# Tweaks to RegEx should fix this error.
re_volume = r'(?<=Volume )\d+'
re_number = r'(?<=Number )\d+'
re_weekday = r'Number \d+ \((?P<weekday>[A-Za-z]+)'
re_month = r'\([A-Za-z]+, (?P<month>[a-zA-Z]+)'
re_day = r'\([A-Za-z]+, [A-Za-z]+ (?P<day>\d{1,2})'
re_year = r'\([A-Za-z]+, [A-Za-z]+ \d{1,2}, (?P<year>\d{4})'
re_chamber = r'(?<=\[)[A-Za-z]+'
re_pages = r'Pages? (?P<pages>[\w\-]+)'
re_title_start = r'\S+'
re_title = r'\s+(?P<title>(\S ?)+)'
re_title_end = r'.+'
re_newpage = r'\[\[Page \w+\]\]'
re_timestamp = r'{time}\s\d{4}'
re_underscore = r'\s+_+\s+'
re_underscore_sep = r'\s{33}_{6}$'
# a new speaker might either be a legislator's name, or a reference to the role of president of presiding officer.
re_newspeaker = r'^(<bullet> | )(?P<name>(%s|(((Mr)|(Ms)|(Mrs))\. [-A-Z\'\sa-z\'\s]+( of [A-Za-z ]+)?|((Miss) [-A-Z\'\sa-z\'\s]+)( of [A-Za-z ]+)?))|((The ((VICE|ACTING|Acting) )?(PRESIDENT|SPEAKER|CHAIR(MAN)?)( pro tempore)?)|(The PRESIDING OFFICER)|(The CLERK)|(The CHIEF JUSTICE)|(The VICE PRESIDENT)|(Mr\. Counsel [A-Z]+))( \([A-Za-z.\- ]+\))?)\.'
# whatever follows the statement of a new speaker marks someone starting to
# speak. if it's a new paragraph and there's already a current_speaker,
# then this re is also used to insert the <speaking> tag.
# re_speaking = r'^(<bullet> | )((((Mr)|(Ms)|(Mrs))\. [A-Za-z \-]+(of [A-Z][a-z]+)?)|((The (ACTING )?(PRESIDENT|SPEAKER)( pro tempore)?)|(The PRESIDING OFFICER))( \([A-Za-z.\- ]+\))?)\.'
re_speaking = r'^(<bullet> | )((((((Mr)|(Ms)|(Mrs))\. [A-Z\'a-z\']+( of [A-Za-z ]+)?|((Miss) [-A-Z\'a-z\']+)( of [A-Za-z ]+)?)|((The (VICE |Acting |ACTING )?(PRESIDENT|SPEAKER)( pro tempore)?)|(The PRESIDING OFFICER)|(The CLERK))( \([A-Za-z.\- ]+\))?))\. )?(?P<start>.)'
re_startshortquote = r'``'
re_endshortquote = r"''"
re_billheading = r'\s+SEC.[A-Z_0-9. \-()\[\]]+'
re_longquotestart = r' {7}(?P<start>.)'
re_longquotebody = r' {5}(?P<start>.)'
re_endofline = r'$'
re_startofline = r'^'
re_alltext = r"^\s+(?P<text>\S([\S ])+)"
re_rollcall = r'\[Roll(call)?( Vote)? No. \d+.*\]'
re_allcaps = r'^[^a-z]+$'
re_billdescription = r'^\s+The bill \('
re_date = r'^(Sun|Mon|Tues|Wednes|Thurs|Fri|Satur)day,\s(%s)\s\d\d?,\s\d{4}$' % '|'.join(MONTHS)
re_recorderstart = (r'^\s+(?P<start>'
+ r'(The (assistant )?legislative clerk read as follows)'
+ r'|(The nomination considered and confirmed is as follows)'
+ r'|(The (assistant )?legislative clerk)'
+ r'|(The nomination was confirmed)'
+ r'|(There being no objection, )'
+ r'|(The resolution .*?was agreed to.)'
+ r'|(The preamble was agreed to.)'
+ r'|(The resolution .*?reads as follows)'
+ r'|(The assistant editor .*?proceeded to call the roll)'
+ r'|(The bill clerk proceeded to call the roll.)'
+ r'|(The bill clerk called the roll.)'
+ r'|(The motion was agreed to.)'
# + r'|(The Clerk read the resolution, as follows:)'
+ r'|(The Clerk read (the resolution, )as follows:)'
+ r'|(The resolution(, with its preamble,)? reads as follows:)'
+ r'|(The amend(ment|ed).*?(is)? as follows:)'
+ r'|(Amendment No\. \d+.*?is as follows:)'
+ r'|(The yeas and nays resulted.*?, as follows:)'
+ r'|(The yeas and nays were ordered)'
+ r'|(The result was announced.*?, as follows:)'
+ r'|(The .*?editor of the Daily Digest)'
+ r'|(The (assistant )?bill clerk read as follows:)'
+ r'|(The .*?read as follows:)'
+ r'|(The text of the.*?is as follows)'
+ r'|(amended( to read)? as follows:)'
+ r'|(The material (previously )?referred to (by.*?)?is as follows:)'
+ r'|(There was no objection)'
+ r'|(The amendment.*?was agreed to)'
+ r'|(The motion to table was .*)'
+ r'|(The question was taken(;|.))'
+ r'|(The following bills and joint resolutions were introduced.*)'
+ r'|(The vote was taken by electronic device)'
+ r'|(A recorded vote was ordered)'
# + r'|()'
+ r').*')
# anchored at the end of the line
re_recorderend = (r'('
+ r'(read as follows:)'
+ r'|(the Record, as follows:)'
+ r'|(ordered to lie on the table; as follows:)'
+ r'|(resolutions as follows:)'
+ r')$')
# sometimes the recorder says something that is not unique to them but
# which, in the right context, we take to indicate a recorder comment.
re_recorder_fuzzy = (r'^\s+(?P<start>'
+ r'(Pending:)'
+ r'|(By M(r|s|rs)\. .* \(for .*)'
# + r'|()'
+ r').*')
LINE_MAX_LENGTH = 71
LONGQUOTE_INDENT = 5
NEW_PARA_INDENT = 2
LONGQUOTE_NEW_PARA_INDENT = [6, 7]
# documents with special titles need to be parsed differently than the
# topic documents, either because they follow a different format or because
# we derive special information from them. in many cases these special
# titles are matched as prefixes, not just full text match.
special_titles = {
"senate": "",
"Senate": "",
"prayer": "",
"PLEDGE OF ALLEGIANCE": "",
"APPOINTMENT OF ACTING PRESIDENT PRO TEMPORE": "",
"RECOGNITION OF THE MAJORITY LEADER": "",
"SCHEDULE": "",
"RESERVATION OF LEADER TIME": "",
"MORNING BUSINESS": "",
"MESSAGE FROM THE HOUSE": "",
"MESSAGES FROM THE HOUSE": "",
"MEASURES REFERRED": "",
"EXECUTIVE AND OTHER COMMUNICATIONS": "",
"SUBMITTED RESOLUTIONS": "",
"SENATE RESOLUTION": "",
"SUBMISSION OF CONCURRENT AND SENATE RESOLUTIONS": "",
"ADDITIONAL COSPONSORS": "",
"ADDITIONAL STATEMENTS": "",
"REPORTS OF COMMITTEES": "",
"INTRODUCTION OF BILLS AND JOINT RESOLUTIONS": "",
"ADDITIONAL COSPONSORS": "",
"INTRODUCTION OF BILLS AND JOINT RESOLUTIONS": "",
"STATEMENTS ON INTRODUCED BILLS AND JOINT RESOLUTIONS": "",
"AUTHORITY FOR COMMITTEES TO MEET": "",
"DISCHARGED NOMINATION": "",
"CONFIRMATIONS": "",
"AMENDMENTS SUBMITTED AND PROPOSED": "",
"TEXT OF AMENDMENTS": "",
"MEASURES PLACED ON THE CALENDAR": "",
"EXECUTIVE CALENDAR": "",
"NOTICES OF HEARINGS": "",
"REPORTS OF COMMITTEES DURING ADJOURNMENT": "",
"MEASURES DISCHARGED": "",
"REPORTS OF COMMITTEES ON PUBLIC BILLS AND RESOLUTIONS": "",
"INTRODUCTION OF BILLS AND JOINT RESOLUTIONS": "",
}
def __init__(self, abspath, **kwargs):
# track error conditions
self.error_flag = False
# file data
self.filepath = abspath
self.filedir, self.filename = os.path.split(self.filepath)
self.outdir = kwargs['outdir']
# fp = open(abspath)
#self.rawlines = fp.readlines()
# Remove internal page numbers and timestamps
content = open(abspath).read()
# the scraper expects the first line of a file to be blank
first_line = content.split("\n")
first_line = first_line[0]
if first_line.isspace() or first_line == '':
pass
else:
content = '\n' + content
content = re.sub(r'\n?\n?\[\[Page.*?\]\]\n?', ' ', content)
#content = re.sub(r'\n\n +\{time\} +\d+\n', '', content)
self.is_bullet = False
if re.search(r'<bullet>', content):
self.is_bullet = True
content = re.sub(r' *<bullet> *', ' ', content)
self.rawlines = StringIO(content).readlines()
self.get_metadata()
self.has_speakers = False
for line in self.rawlines:
if re.search(self.re_newspeaker, line):
self.has_speakers = True
break
self.date = None
# state information
self.currentline = 0
self.current_speaker = None
self.inquote = False
self.intitle = False
self.new_paragraph = False
self.recorder = False
self.inlongquote = False
self.newspeaker = False
self.inrollcall = False
# output
self.xml = ['<CRDoc>', ]
def spaces_indented(self, theline):
''' returns the number of spaces by which the line is indented. '''
re_textstart = r'\S'
try:
return re.search(re_textstart, theline).start()
except AttributeError:
return 0
def parse(self):
''' parses a raw senate document and returns the same document marked
up with XML '''
# self.get_metadata()
self.markup_preamble()
def download_mods_file(self):
url = 'http://www.gpo.gov/fdsys/pkg/%s/mods.xml' % '-'.join(self.filename.split('-')[0:4])
print 'No mods file found locally. Downloading from %s' % url
page = urllib2.urlopen(url).read()
fh = open(os.path.join(self.filedir, 'mods.xml'), 'w')
fh.write(page)
fh.close()
self.get_metadata()
def get_metadata(self):
path, filename = os.path.split(self.filepath)
xml_filename = os.path.join(path, 'mods.xml')
granule = filename.split('.')[0]
try:
xml = open(xml_filename, 'r').read()
except IOError:
self.download_mods_file()
xml = open(xml_filename, 'r').read()
# Remove namespace to make using xpath easier.
xml = xml.replace('xmlns="http://www.loc.gov/mods/v3" ', '')
doc = lxml.etree.fromstring(xml)
self.volume = doc.xpath('extension/volume')[0].text
self.issue = doc.xpath('extension/issue')[0].text
self.congress = doc.xpath('extension/congress')[0].text
self.session = doc.xpath('//session')[0].text
try:
pagenums = re.search(r'(Pg.*)', granule).groups()[0]
except AttributeError, IndexError:
print '%s does not contain any page numbers' % granule
return
# sys.exit()
try:
item = doc.xpath("//relatedItem[re:test(., '%s')]" % pagenums,
namespaces={'re': 'http://exslt.org/regular-expressions'})[0]
except IndexError:
print 'Item not found in xml: %s' % granule
sys.exit()
# Get the document title
self.document_title = escape(item.xpath('titleInfo/title')[0].text)
# Get the names of the members of Congress listed
self.members = []
for member in item.xpath('extension/congMember'):
data = member.attrib
data.update({'name': member.xpath('name')[0].text, })
self.members.append(data)
# print '|'.join([x['name'].replace('.', '\.') for x in self.members])
#print self.re_newspeaker
## re_newspeaker does not have any format strings in it...
# self.re_newspeaker = self.re_newspeaker % '|'.join([x['name'].replace('.', '\.') for x in self.members])
self.referenced_by = []
for related_item in item.xpath('relatedItem'):
if related_item.attrib.get('type') == 'isReferencedBy':
for identifier in related_item.xpath('identifier'):
data = identifier.attrib
data.update({'text': identifier.text or '', })
#print data
self.referenced_by.append(data)
def markup_preamble(self):
self.currentline = 1
theline = self.rawlines[self.currentline]
annotator = XMLAnnotator(theline)
annotator.register_tag(self.re_volume, '<volume>')
annotator.register_tag(self.re_number, '<number>')
annotator.register_tag(self.re_weekday, '<weekday>', group='weekday')
annotator.register_tag(self.re_month, '<month>', group='month')
annotator.register_tag(self.re_day, '<day>', group='day')
annotator.register_tag(self.re_year, '<year>', group='year')
xml_line = annotator.apply()
# print xml_line
self.xml.append(xml_line)
if self.is_bullet:
self.xml.append('<bullet>1</bullet>\n')
self.markup_chamber()
def markup_chamber(self):
self.currentline = 2
theline = self.rawlines[self.currentline]
annotator = XMLAnnotator(theline)
annotator.register_tag(self.re_chamber, '<chamber>')
xml_line = annotator.apply()
# print xml_line
self.xml.append(xml_line)
self.markup_pages()
def markup_pages(self):
self.currentline = 3
theline = self.rawlines[self.currentline]
annotator = XMLAnnotator(theline)
annotator.register_tag(self.re_pages, '<pages>', group='pages')
xml_line = annotator.apply()
# print xml_line
self.xml.append(xml_line)
self.xml.append('<congress>%s</congress>\n' % self.congress)
self.xml.append('<session>%s</session>\n' % self.session)
self.markup_title()
def clean_line(self, theline):
''' strip unwanted parts of documents-- page transitions and spacers.'''
newpage = re.match(self.re_newpage, theline)
if newpage:
theline = theline[:newpage.start()] + theline[newpage.end():]
underscore = re.match(self.re_underscore, theline)
if underscore:
theline = theline[:underscore.start()] + theline[underscore.end():]
# note: dont strip whitespace when cleaning the lines because
# whitespace is often the only indicator of the line's purpose or
# function.
return theline
def get_line(self, offset=0, **kwargs):
raw = kwargs.get('raw', False)
if self.currentline + offset > len(self.rawlines) - 1 or self.currentline + offset < 0:
return None
if raw:
return self.rawlines[self.currentline + offset]
return self.clean_line(self.rawlines[self.currentline + offset])
def is_special_title(self, title):
title = title.strip()
special_title_prefixes = self.special_titles.keys()
for prefix in special_title_prefixes:
if re.search(prefix, title):
return True
return False
def markup_title(self):
''' identify and markup the document title. the title is some lines of
text, usually but not always capitalized, usually but not always
centered, and followed by a least one empty line. they sometimes have a
line of dashes separating them from the body of the document. and
sometimes they don't exist at all.'''
MIN_TITLE_INDENT = 0
# skip line 4; it contains a static reference to the GPO website.
self.currentline = 5
theline = self.get_line()
while not theline.strip():
self.currentline += 1
theline = self.get_line()
# we're going to check what kind of title this is once we're done
# parsing it, so keep track of where it starts. since all the special
# titles are uniquely specified by their first line, we only need to
# track that.
title_startline = theline
# if it's not a specially formatted title and it's not indented enough,
# then it's probably missing a title altogether
if self.spaces_indented(theline) < MIN_TITLE_INDENT and not self.is_special_title(theline):
self.markup_paragraph()
else:
# a regular old title
annotator = XMLAnnotator(theline)
annotator.register_tag_open(self.re_title_start, '<document_title>')
self.currentline += 1
theline = self.get_line()
# check if the title finished on the sameline it started on:
if not theline.strip():
annotator.register_tag_close(self.re_title_end, '</document_title>')
xml_line = annotator.apply()
# print xml_line
self.xml.append(xml_line)
else:
# either way we need to apply the tags to the title start.
xml_line = annotator.apply()
# print xml_line
self.xml.append(xml_line)
# now find the title end
while theline.strip():
self.currentline += 1
theline = self.get_line()
# once we hit an empty line, we know the end of the *previous* line
# is the end of the title.
theline = self.get_line(-1)
annotator = XMLAnnotator(theline)
annotator.register_tag_close(self.re_title_end, '</document_title>')
xml_line = annotator.apply()
#print xml_line
self.xml.append(xml_line)
# note that as we exit this function, the current line is one PAST
# the end of the title, which should generally be a blank line.
self.markup_paragraph()
def set_speaker(self, theline):
# checks if there is a new speaker, and if so, set the current_speaker
# attribute, and returns the name of the new (and now current) speaker.
# else leaves the current speaker.
new_speaker = re.search(self.re_newspeaker, theline)
if new_speaker:
name = new_speaker.group('name')
self.current_speaker = name # XXX TODO this should be a unique ID
return self.current_speaker
def check_bullet(self, theline):
if theline.find('<bullet>') >= 0:
self.rawlines[self.currentline] = self.rawlines[self.currentline].replace('<bullet>', ' ')
# now start at the end of the document and walk up the doc, to find
# the closing bullet tag.
ix = len(self.rawlines) - 1
while True:
if self.rawlines[ix].find('<bullet>') >= 0:
self.rawlines[ix] = self.rawlines[ix].replace('<bullet>', '')
return self.rawlines[self.currentline]
ix -= 1
else:
return theline
def markup_paragraph(self):
''' this is the standard paragraph parser. handles new speakers,
standard recorder comments, long and short quotes, etc. '''
# get to the first line
theline = self.get_line()
while not theline.strip():
self.currentline += 1
theline = self.get_line()
# remove <bullet> tags if they exist
theline = self.check_bullet(theline)
self.document_first_line = True
if not self.has_speakers:
self.xml.append('<recorder>')
while theline:
self.xml.append(theline)
self.currentline += 1
theline = self.get_line()
self.xml.append('</recorder>\n')
self.xml.append('</CRDoc>')
return
while theline:
self.preprocess_state(theline)
annotator = XMLAnnotator(theline)
if self.intitle:
annotator.register_tag(self.re_title, '<title>', group='title')
# some things only appear on the first line of a paragraph
elif self.inrollcall:
# will only match on first line of the roll call
annotator.register_tag_open(self.re_rollcall, '<recorder>')
elif self.new_paragraph:
annotator.register_tag_open(self.re_longquotestart,
'<speaking quote="true" speaker="%s">' % self.current_speaker,
group='start')
if self.recorder:
annotator.register_tag_open(self.re_startofline, '<recorder>')
# annotator.register_tag_open(self.re_recorderstart, '<recorder>', 'start')
#annotator.register_tag_open(self.re_recorder_fuzzy, '<recorder>', 'start')
annotator.register_tag(self.re_newspeaker, '<speaker name="%s">' % self.current_speaker, group='name')
if self.return_from_quote_interjection(theline):
annotator.register_tag_open(self.re_longquotebody,
'<speaking quote="true" speaker="%s">' % self.current_speaker,
group='start')
if not self.recorder and not self.inlongquote:
# check the current speaker-- if it's the recorder, then
# even though this isn't a "known" recorder sentence,
# there's no other speaker so we treat it like a recorder
# comment.
if self.current_speaker == 'recorder':
annotator.register_tag_open(self.re_speaking, '<recorder>', group='start')
self.recorder = True
else:
annotator.register_tag_open(self.re_speaking, '<speaking name="%s">' % self.current_speaker,
group='start')
if not self.intitle and not self.inlongquote and not self.inrollcall:
# annotator.register_tag_open(self.re_startshortquote, '<quote speaker="%s">' % self.current_speaker)
pass
# note: the endquote tag needs to be registered BEFORE the end
# speaking tag, because the quote tag should appear before (be
# nested within) the speaking tag. a nesting functionality should
# really be implemented within the XMLAnnotator class, but this
# will do for now.
if not self.inlongquote and not self.intitle and not self.inrollcall:
if self.inquote:
# annotator.register_tag_close(self.re_endshortquote, '</speaking>')
pass
if self.paragraph_ends():
if self.inrollcall:
annotator.register_tag_close(self.re_endofline, '</recorder>')
self.inrollcall = False
elif self.recorder:
annotator.register_tag_close(self.re_endofline, '</recorder>')
elif self.inlongquote:
if self.longquote_ends():
annotator.register_tag_close(self.re_endofline, '</speaking>')
elif self.intitle:
pass
# this specific set of states usually means we're somewhere
# unrecognized, and can without these caveats can end up with
# stray </speaking> tags.
elif (self.current_speaker == 'recorder' and not (self.inlongquote or
self.inrollcall or
self.recorder or
self.inquote or
self.intitle)):
print "UNRECOGNIZED STATE (but that's ok): %s" % theline
else:
annotator.register_tag_close(self.re_endofline, '</speaking>')
# if (self.current_speaker == 'recorder' and self.inlongquote == False and self.inrollcall == False
# and self.recorder == False and self.inquote == False and self.intitle == False):
# print "UNRECOGNIZED STATE (but that's ok): %s" % theline
# annotator.register_tag(self.re_alltext, '<unknown>', group='text')
xml_line = annotator.apply()
#print xml_line
self.xml.append(xml_line)
# do some post processing
self.postprocess_state(theline)
# get the next line and do it all again
self.currentline += 1
theline = self.get_line()
while theline is not None and not theline.strip():
self.currentline += 1
theline = self.get_line()
if not theline:
# end of file
self.xml.append('</CRDoc>')
def matching_tags(self, open, close):
''' determine if the close tag matches the open tag '''
space = open.find(' ')
if space != -1:
derived_close = '</' + open[1:space] + '>'
else:
derived_close = '</' + open[1:]
if derived_close == close:
return True
else:
return False
def validate(self):
''' validate the xml in the file, checking for mismatched tags and
removing any tags if necessary. basically, it's more important for the
document to validate than to get everything perfect.'''
re_opentag = r'<[A-Za-z_]+( [a-z]+=".*?")?>'
re_closetag = r'</[A-Za-z_]+>'
re_tag = '</?.+?>'
active = []
orphans = []
for linenum, line in enumerate(self.xml):
tagiter = re.finditer(re_tag, line)
tags = [(match.group(), match.start(), match.end(), linenum) for match in tagiter]
for taginfo in tags:
tagname = taginfo[0]
if re.search(re_opentag, tagname):
active.append(taginfo)
# print active
elif re.search(re_closetag, tagname):
# print 'line: %s' % self.xml[taginfo[3]].strip('\n')
#print 'comparing %s and %s' % (active[-1][0], tagname)
if len(active) and self.matching_tags(active[-1][0], tagname):
del active[-1]
else:
print 'no match-- orphaned\n'
orphans.append(taginfo)
# append any remaining, unclosed open tags to the orphan list
orphans.extend(active)
# BUT, we don't want to remove the CRDoc tags
save = []
for orphan in orphans:
if orphan[0] == '<CRDoc>' or orphan[0] == '</CRDoc>':
print 'saving crdoc tag', orphan[0]
save.append(orphan)
for s in save:
orphans.remove(s)
'''
print 'Before Validation:\n'
print ''.join(self.xml)
print self.filepath
print '\n\n'
'''
if len(orphans):
print 'Orphaned Tags:\n'
for orphan in orphans:
print orphan, self.xml[orphan[3]]
# Capture orphans in a file, that sounds wrong, but we need it.
# This does not work on all systems. I will look into a better solution. -<NAME>
directory = "orphans"
if not os.path.exists(directory):
os.makedirs(directory)
f = open(os.path.join(directory, self.filename + '-orphans.txt'), 'a+') # Place orphans in their own dir.
# f = open(self.filename + '-orphans.txt', 'a+') # Captures orphans to their appropriate file.
print >> f, self.xml[orphan[3]]
f.close()
f = open('orphans-all.txt', 'a+')
print >> f, self.xml[orphan[3]]
f.close()
for orphan in orphans:
linenum = orphan[3]
theline = self.xml[linenum]
# we have to use start and end indices instead of replace, since
# replace will replace *all* occurences
start = orphan[1]
end = orphan[2]
self.xml[linenum] = theline[:start] + theline[end:]
'''
print '\nAfter Validation:\n'
print ''.join(self.xml)
print self.filepath
print '\n\n'
print orphans
'''
return
def longquote_ends(self):
# XXX this function is totally repeating patterns used in other
# places...
offset = 1
theline = self.get_line(offset)
while theline and not theline.strip():
offset += 1
theline = self.get_line(offset)
# there should only be NO line if it's the end of the document
if not theline:
return True
# longquote ends when the new paragraph is NOT another longquote
# paragraph (be it a new title, vote, or just regular paragraph).
if self.spaces_indented(theline) not in self.LONGQUOTE_NEW_PARA_INDENT:
return True
return False
def preprocess_state(self, theline):
''' in certain cases we need to match a regular expression AND a state,
so do some analysis to determine which tags to register. '''
return_from_interjection = self.return_from_quote_interjection(theline)
if self.is_new_paragraph(theline) or return_from_interjection:
self.new_paragraph = True
self.intitle = False
# if there's a new speaker, we don't want to
# if re.search(self.re_newspeaker, theline):
# self.newspeaker = True
# in the case of a long quote, we don't change the current speaker.
if re.search(self.re_longquotestart, theline) or return_from_interjection:
# if it's a long quote but we're already IN a long quote, then
# we don't want to mark the beginning again, so suppress the
# new paragraph state.
if self.inlongquote is True:
self.new_paragraph = False
self.inlongquote = True
else:
self.inlongquote = False
# if it's a recorder reading, then make a note.
# re_recroder_fuzzy looks for terms that indicate a
# continuation of a recorder comment only if the recorder was
# already speaking, but not otherwise.
if (re.search(self.re_recorderstart, theline)
or (self.current_speaker == 'recorder'
and re.search(self.re_recorder_fuzzy, theline))):
self.recorder = True
self.current_speaker = 'recorder'
else:
self.set_speaker(theline)
if self.current_speaker is None and self.document_first_line:
self.document_first_line = False
self.recorder = True
self.current_speaker = 'recorder'
elif re.search(self.re_rollcall, theline):
self.inrollcall = True
self.intitle = False
self.new_paragraph = False
elif not self.inlongquote and not self.inrollcall and self.is_title(theline):
self.intitle = True
self.new_paragraph = False
elif re.search(self.re_billheading, theline):
self.intitle = True
self.inlongquote = False
self.new_paragraph = False
else:
self.new_paragraph = False
self.intitle = False
# if a quote starts we are "in a quote" but we stay in that quote until
# we detect it ends.
if not self.inlongquote and re.search(self.re_startshortquote, theline):
self.inquote = True
# debugging..
# print 'in title? %s' % self.intitle
#print 'new paragraph? %s' % self.new_paragraph
'''
if self.current_speaker:
print 'current speaker: ' + self.current_speaker
else:
print 'no current speaker'
'''
#print 'in long quote? %s' % self.inlongquote
#print 'in recorder? %s' % self.recorder
#print 'in quote? %s' % self.inquote
#print 'in roll call? %s' % self.inrollcall
def postprocess_state(self, theline):
''' in certain cases where a state ends on a line, we only want to note
that after the proper tags have been registered and inserted. '''
# if we're in a long quote, the only way that we know the long quote is
# over is when a new paragraph starts and is NOT a long quote. else,
# just move along... nothing to see here.
if self.inlongquote:
return
if (not self.recorder and not self.inlongquote
and not self.intitle and not self.current_speaker):
# return
# this is a wierd state we shouldn't be in
#print ''.join(self.rawlines)
objdata = self.__dict__
del objdata['xml']
del objdata['rawlines']
#print ''
#print objdata
#print ''
message = 'Unrecognized state while parsing %s\n' % self.filepath
self.error_flag = True
raise UnrecognizedCRDoc(message)
# if there's one or more complete quotes (start and end) on a line, or
# if a single quote ends that started on a previous line, then we're
# good to go and close the state. but if there's a quote that opens,
# that doesn't close, we need to stay in this state.
if self.inquote and re.search(self.re_endshortquote, theline):
last_open_quote = theline.rfind("``")
last_close_quote = theline.rfind("''")
if last_open_quote == -1 or last_close_quote > last_open_quote:
self.inquote = False
# note that here we set self.recorder to be False whilst leaving
# self.current_speaker set to 'recorder' (which it gets set to when a
# recorder state is recognized). this half-state is used when parsing
# long bits of verbatim material included in the CR as ready by the
# recorder.
if self.recorder and self.paragraph_ends():
self.recorder = False
if self.intitle:
self.intitle = False
def return_from_quote_interjection(self, theline):
''' sometimes a paragraph in a long quote is not indented because it
was only briefly interrupted for the reader to make a comment. but we
still need to treat it like a new paragraph. '''
if not self.rawlines[self.currentline] == theline:
message = 'current line and index are not aligned'
self.error_flag = True
raise AlignmentError(message)
line_above = self.rawlines[self.currentline - 1].strip()
two_lines_above = self.rawlines[self.currentline - 2].strip()
empty = ""
if (self.spaces_indented(theline) == self.LONGQUOTE_INDENT and
line_above == empty and two_lines_above.endswith('--')):
return True
else:
return False
def paragraph_ends(self):
''' check if the current paragraph ends by looking ahead to what the
next non-empty line is. idempotent. '''
# a paragraph ending is really only indicated by the formatting which
# follows it. if a line is followed by a new paragraph, a long section
# of quoted text, or a subheading, then we know this must be the end of
# athe current paragraph. almost all of those possibilities are
# indicated by the indentation level.
offset = 1
theline = self.get_line(offset)
while theline and not theline.strip():
offset += 1
theline = self.get_line(offset)
# if the document ends here, then it's certainly also the end of the
# paragraph
if not theline:
return True
if self.inrollcall:
if self.spaces_indented(theline) == self.NEW_PARA_INDENT:
return True
else:
return False
# new para or new long quote?
if self.is_new_paragraph(theline):
return True
# if the next line is a title then this paragraph is also over.
if self.is_title(theline, offset=offset):
return True
# this strange case arises sometimes when legislators interject a
# comment into the middle of something they are quoting/reading.
local_offset = self.currentline + offset
line_above = self.rawlines[local_offset - 1].strip()
first_line_on_page = re.search(self.re_newpage, self.rawlines[local_offset - 2])
empty = ""
if self.spaces_indented(theline) == self.LONGQUOTE_INDENT and line_above == empty and not first_line_on_page:
return True
# finally, if none of these cases are true, return false.
return False
def is_centered(self, theline):
if not theline.strip():
return False
left_align = re.search('\S', theline).start()
right_align = (self.LINE_MAX_LENGTH - len(theline.strip())) / 2
# if the left and right align are the same (modulo off-by-one for
# even-length titles) then we consider it centered, and therefore a
# title.
if left_align in [right_align - 1, right_align, right_align + 1]:
return True
else:
return False
def is_title(self, theline, offset=0):
# self.current_line +offset must be the index for theline
local_offset = self.currentline + offset
if not self.rawlines[local_offset] == theline:
message = 'current line and index are not aligned'
self.error_flag = True
raise AlignmentError(message)
first_line_on_page = re.search(self.re_newpage, self.rawlines[local_offset - 2])
line_above = self.rawlines[local_offset - 1].strip('\n')
line_below = self.rawlines[local_offset + 1].strip('\n')
empty = lambda line: len(line.strip()) == 0
if re.search(self.re_allcaps, theline):
return True
if re.search(self.re_billdescription, theline):
return False
if self.is_centered(theline) and self.spaces_indented(theline) > 0:
if (empty(line_above) and self.is_centered(line_below)):
#print 'line above empty'
#print 'line above:', line_above
#print 'theline:', theline
#print 'line_below:', line_below
#print 'context:', '\n'.join(self.rawlines[local_offset-5:local_offset+5])
return True
if (empty(line_below) and self.is_centered(line_above)):
#print 'line below empty'
return True
if (self.is_centered(line_above) and self.is_centered(line_below)):
if self.inlongquote:
return False
else:
return True
if (empty(line_above) and empty(line_below)):
# the first line on a page can look like a title because
# there's an empty line separating new page designators from
# page content. but, we know exactly what those look like so
# eliminate that possibility here.
if not first_line_on_page:
return True
elif self.spaces_indented(theline) > 2:
return True
# this basically accounts for letter headers. note that the line
# lengths include a character for the \n newline character.
if (empty(line_above) and
(empty(line_below) or self.spaces_indented(line_below) in self.LONGQUOTE_NEW_PARA_INDENT
or self.spaces_indented(line_below) == self.LONGQUOTE_INDENT) and
(len(theline) == 67 or len(theline) == 66 or len(theline) == 63)):
return True
# bill headers eg like SEC. _03. SENSE OF CONGRESS.
if re.search(self.re_billheading, theline):
return True
if self.is_centered(theline) and re.search(self.re_date, theline.strip()):
return True
return False
def is_new_paragraph(self, theline):
if theline.startswith('<bullet>'):
return True
if re.search(self.re_underscore_sep, self.get_line(-1, raw=True)):
return True
if self.spaces_indented(theline) in self.LONGQUOTE_NEW_PARA_INDENT:
return True
if self.spaces_indented(theline) == self.NEW_PARA_INDENT:
return True
return False
def save(self):
''' save the xml file to disk.'''
saveas = "%s/%s" % (self.outdir, self.filename.replace('.txt', '.xml'))
savedir = os.path.dirname(saveas)
if not os.path.exists(savedir):
os.makedirs(savedir)
fp = open(saveas, 'w')
fp.write(''.join(self.xml))
fp.close()
print "saved file %s to disk" % saveas
# added for testing
def parse_to_string(infile, **kwargs):
parser = CRParser(infile, **kwargs)
parser.parse()
parser.validate()
return parser.xml
def do_parse(parser, logfile):
try:
parser.parse()
print 'flag status:', parser.error_flag
if not parser.error_flag:
parser.validate()
parser.save()
except Exception, e:
print 'flag status:', parser.error_flag
print 'Error processing file: %s: %s' % (parser.filepath, e)
today = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
logfile.write('%s: Error processing file %s\n' % (today, parser.filepath))
logfile.write('\t%s\n' % e)
logfile.write(get_stack_trace())
logfile.flush()
def parse_single(infile, **kwargs):
logfile = initialize_logfile(kwargs['logdir'])
try:
del kwargs['interactive']
except:
pass
try:
del kwargs['logdir']
except:
pass
parser = CRParser(infile, **kwargs)
do_parse(parser, logfile)
return os.path.join(kwargs['outdir'], os.path.split(infile)[1].replace('.txt', '.xml'))
def parse_directory(path, **kwargs):
logfile = initialize_logfile(kwargs['logdir'])
for file in os.listdir(path):
print file
print path
# we don't process the daily digest or front matter.
if file.find('FrontMatter') != -1 or file.find('PgD') != -1:
continue
# Makes text versions for the parser
elif file.endswith('.htm'):
old_file = os.path.join(path, file)
content = open(old_file, 'r').read()
# eliminates extra title and leaves expected space at the top
content = re.sub(r'<title>.+?</title>', '', content)
# need to eliminate particular blank lines, should sill get the tags out if expected line breaks aren't there.
extras = ['<html>\n', '<html>', '</html>', '<head>\n', '</head>\n', '<head>', '</head>', '<body><pre>\n',
'<pre>', '</pre>', '<body>', '</body>', ]
for tag in extras:
content = content.replace(tag, '')
new_name = file[:-3] + 'txt'
new_path = os.path.join(path, new_name)
text_doc = open(new_path, 'w')
text_doc = text_doc.write(content)
file = new_name
os.remove(old_file)
if not file.endswith('.txt'):
continue
if kwargs.get('interactive', False):
resp = raw_input("process file %s? (y/n/q) " % file)
if resp == 'n':
print 'skipping\n'
continue
elif resp == 'q':
sys.exit()
abspath = os.path.join(path, file)
try:
del kwargs['interactive']
except:
pass
try:
del kwargs['logdir']
except:
pass
parser = CRParser(abspath, **kwargs)
do_parse(parser, logfile)
return kwargs['outdir']
```
#### File: congressionalrecord/lib/xml_annotator.py
```python
from .regex import Regex
class XMLAnnotator(object):
def __init__(self, string):
self.regx = Regex(string)
def register_tag(self, re_string, open_tag, group=None):
''' Registers an XML tag to be inserted around a matching regular
expression. The closing tag is derived from the opening tag. This
function only registers the tags and their associated regex; apply()
must be run before the tags are inserted. If group is specified, then
the the tag is inserted around the matching group instead of the entire
regular expression. '''
close_tag = self.derive_close_tag(open_tag)
self.regx.insert_before(re_string, open_tag, group)
self.regx.insert_after(re_string, close_tag, group)
def register_tag_open(self, re_string, open_tag, group=None):
self.regx.insert_before(re_string, open_tag, group)
def register_tag_close(self, re_string, close_tag, group=None):
self.regx.insert_after(re_string, close_tag, group)
def derive_close_tag(self, open_tag):
space = open_tag.find(' ')
if space != -1:
close_tag = '</' + open_tag[1:space] + '>'
else:
close_tag = '</' + open_tag[1:]
return close_tag
def apply(self):
return self.regx.apply()
``` |
{
"source": "jpritt/boiler",
"score": 3
} |
#### File: jpritt/boiler/alignments.py
```python
import bisect
import bucket
import cross_bundle_bucket
import pairedread
import copy
import random
import time
class Alignments:
''' A set of reads aligned to a genome '''
def __init__(self, chromosomes, frag_len_cutoff=None, split_discordant=True):
''' Initialize a genome for alignments
chromosomes: A dictionary with keys corresponding to chromosome names and values corresponding to lengths
'''
self.frag_len_cutoff = frag_len_cutoff
self.split_discordant = split_discordant
self.chromosomeNames = chromosomes[0]
self.chromosomes = dict()
for i in range(len(chromosomes[0])):
self.chromosomes[chromosomes[0][i]] = chromosomes[1][i]
# Initialize exon breaks between all chromosomes
self.exons = set()
# Offset of each chromosome from the start of the genome
self.chromOffsets = dict()
# Bases are generally indexed from 1
nextOffset = 1
for i in range(len(chromosomes[0])):
self.chromOffsets[chromosomes[0][i]] = nextOffset
nextOffset += chromosomes[1][i] + 1
#print(self.chromOffsets)
# List of potential gene boundaries as tuples
self.gene_bounds = []
# If 2 reads are less than this far apart, combine them into a single gene
self.overlap_radius = 50
# paired reads for which the mate still needs to be found
self.unmatched = dict()
# Unmatched reads that span multiple bundles
self.cross_bundle_reads = dict()
# Unmatched reads *in the current bundle* that span multiple bundles
self.curr_cross_bundle_reads = dict()
# Matched mates spanning multiple bundles
self.cross_bundle_pairs = []
self.cross_bundle_buckets = dict()
self.max_cross_bundle_read_len = 0
self.unpaired = []
self.paired = []
self.read_timeA = 0.0
self.read_countA = 0
self.read_timeB = 0.0
self.read_countB = 0
self.numUnmatched = 0
self.gtf_exons = []
self.gtf_id = 0
def processRead(self, name, read, paired):
''' If read is unpaired, add it to the correct spliced or unspliced list of reads.
If read is paired, find its pair or add it to a list to be found later. Once a pair of reads is found, add the combined read to the appropriate list of reads
'''
# Update read location for chromosome
offset = self.chromOffsets[read.chrom]
read.pos += offset
if read.exons:
for i in range(len(read.exons)):
read.exons[i] = [read.exons[i][0]+offset, read.exons[i][1]+offset]
# update list of subexon bounds
alignment = read.exons
if len(alignment) > 1:
for i in range(len(alignment)-1):
self.exons.add(alignment[i][1])
self.exons.add(alignment[i+1][0])
# Update the boundaries of the current bundle
#self.update_gene_bounds(read.exons[0][0], read.exons[-1][1])
if not paired:
if not read.exons:
print(name)
self.update_gene_bounds(read.exons[0][0], read.exons[-1][1])
self.add_unpaired(read)
else: # paired read
# update pair location for chromsome
read.pairOffset += self.chromOffsets[read.pairChrom]
if read.exons:
self.update_gene_bounds(read.exons[0][0], read.exons[-1][1])
if read.pairOffset <= read.pos:
# Look for mates from the current bundle
foundMate = True
while foundMate and read.NH > 0:
i = self.find_mate(read, name, self.unmatched)
if i >= 0:
mate = self.unmatched[name][i]
if mate.exons[-1][1] > read.exons[-1][1]:
self.add_paired(read, mate)
else:
self.add_paired(mate, read)
if not read.NH == mate.NH:
print(name)
exit()
if read.NH >= mate.NH:
read.NH -= mate.NH
mate.NH = 0
del self.unmatched[name][i]
else:
mate.NH -= read.NH
read.NH = 0
else:
foundMate = False
# Look for mates from a previous bundle
foundMate = True
while foundMate and read.NH > 0:
i = self.find_mate(read, name, self.cross_bundle_reads)
if i >= 0:
mate = self.cross_bundle_reads[name][i]
self.cross_bundle_pairs.append((mate, read, min(read.NH, mate.NH)))
if not read.NH == mate.NH:
print(name)
exit()
if read.NH >= mate.NH:
read.NH -= mate.NH
mate.NH = 0
del self.cross_bundle_reads[name][i]
else:
mate.NH -= read.NH
read.NH = 0
else:
foundMate = False
if read.NH > 0:
# One of its mates has not been processed yet
if (read.pairOffset - read.pos) < self.frag_len_cutoff:
self.update_gene_bounds(read.pos, read.pairOffset)
if name in self.unmatched:
self.unmatched[name].append(read)
else:
self.unmatched[name] = [read]
def find_mate(self, read, name, unmatched):
'''
Search the list of unmatched reads for one matching the given name and location
:param read: Read information including location and mate location
:param name: Identifying name (first column of SAM information)
:param unmatched: Dictionary of unmatched reads with key = name, value = list of reads
:return: Index in unmatched of matching read, or -1 if no match found
'''
if not name in unmatched:
return -1
r = unmatched[name]
for i in range(len(r)):
match = r[i]
if read.pairOffset == match.pos and match.pairOffset == read.pos:
if not read.exons:
if not read.pos == match.pos:
print('Error matching reads with name %s' % name)
exit()
read.exons = match.exons[:]
return i
elif not match.exons:
if not read.pos == match.pos:
print('Error matching reads with name %s' % name)
exit()
match.exons = read.exons[:]
return i
elif not self.split_discordant or not self.conflicts(read.exons, match.exons):
# Return index in unmatched dictionary of match
return i
# If no match found, return -1
return -1
def update_gene_bounds(self, start, end):
'''
Update the boundaries of the current bundle to include [start, end]
'''
if not self.gene_bounds:
self.gene_bounds = [start, end]
else:
if start < self.gene_bounds[0]:
self.gene_bounds[0] = start
if end > self.gene_bounds[1]:
self.gene_bounds[1] = end
def conflicts(self, exonsA, exonsB):
'''
:param exonsA: List containing the exon bounds for gene A in the form [(x_0,y_0), (x_1,y_1),...]
:param exonsB: List containing the exon bounds for gene B in the same form as exonsA
:return: 1 if an exon in gene A overlaps an intron in gene B, 2 if vice versa, 3 if one gene range lies strictly inside the other, 0 otherwise.
'''
if (exonsA[0][0] < exonsB[0][0] and exonsA[-1][1] > exonsB[-1][1]) or (exonsB[0][0] < exonsA[0][0] and exonsB[-1][1] > exonsA[-1][1]):
# One set of exons contains the other
return 3
for e in exonsB:
if e[0] > exonsA[-1][0]:
break
for i in range(len(exonsA)-1):
if e[0] >= exonsA[-i-1][0]:
break
elif e[1] > exonsA[-i-2][1]:
# Exon in B overlaps an intron in A
return 1
countA = len(exonsA)
for i in range(countA):
e = exonsA[countA-i-1]
if e[1] < exonsB[0][1]:
break
for i in range(len(exonsB)-1):
if e[1] <= exonsB[i][1]:
break
elif e[1] > exonsB[i][1] and e[0] < exonsB[i+1][0]:
# Exon in A overlaps an intron in B
return 2
return 0
def add_unpaired(self, read):
'''
Add this read to the list of unpaired
'''
self.unpaired.append(read)
def add_paired(self, read1, read2):
'''
Create a single paired read from these two reads and add it to the paired list
'''
strand = read1.strand or read2.strand
NH = min(read1.NH, read2.NH)
p = pairedread.PairedRead(read1.chrom, read1.exons, read2.chrom, read2.exons, strand, NH)
if not self.split_discordant and read1.chrom == read2.chrom and self.conflicts(read1.exons, read2.exons):
p.discordant = True
#if not read1.name == read2.name:
# print('Names %s, %s do not match' % (read1.name, read2.name))
# exit()
#print(read1.name)
#exit()
else:
p.discordant = False
self.paired.append(p)
def finalizeUnmatched(self):
'''
Finalize unmatched (discordant) reads. We convert them to unpaired reads.
'''
for name,reads in self.unmatched.items():
if reads:
for r in reads:
if hasattr(r,'pairOffset') and r.pairOffset > self.gene_bounds[1]:
# Waiting for mate in a future bundle
if not hasattr(r, 'exonIds'):
r.exonIds, r.length = self.getExonIds(r.exons)
r.bucket_length = sum([self.exons[e+1]-self.exons[e] for e in r.exonIds])
r.startOffset = r.exons[0][0] - self.exons[r.exonIds[0]]
r.endOffset = self.exons[r.exonIds[-1]+1] - r.exons[-1][1]
if name in self.cross_bundle_reads:
self.cross_bundle_reads[name].append(r)
else:
self.cross_bundle_reads[name] = [r]
else:
self.numUnmatched += 1
if not r.exons:
print(name)
exit()
self.add_unpaired(r)
# Reset dictionary for next bundle
self.unmatched = dict()
def finalizeExons(self):
'''
Convert the set of exon boundaries to a list
'''
start = self.gene_bounds[0]
end = self.gene_bounds[1]
self.exons.add(start)
self.exons.add(end)
if self.gtf_exons:
for i in range(self.gtf_id, len(self.gtf_exons)):
e = self.gtf_exons[i]
if e > end:
break
elif e > start:
self.exons.add(e)
self.gtf_id = i
self.exons = sorted(list(self.exons))
def finalize_cross_bundle_reads(self):
'''
Process the list of reads with mates outside this bundle
'''
# Finalize cross-bundle pairs that were discovered in this bundle
for p in self.cross_bundle_pairs:
if not hasattr(p[0], 'exonIds'):
#p[0].bundle = bundle_id
p[0].exonIds, p[0].length = self.getExonIds(p[0].exons)
p[0].bucket_length = sum([self.exons[e+1]-self.exons[e] for e in p[0].exonIds])
p[0].startOffset = p[0].exons[0][0] - self.exons[p[0].exonIds[0]]
p[0].endOffset = self.exons[p[0].exonIds[-1]+1] - p[0].exons[-1][1]
if not hasattr(p[1], 'exonIds'):
#p[1].bundle = bundle_id
p[1].exonIds, p[1].length = self.getExonIds(p[1].exons)
p[1].bucket_length = sum([self.exons[e+1]-self.exons[e] for e in p[1].exonIds])
p[1].startOffset = p[1].exons[0][0] - self.exons[p[1].exonIds[0]]
p[1].endOffset = self.exons[p[1].exonIds[-1]+1] - p[1].exons[-1][1]
# Update maximum length of a read in a cross-bucket bundle (needed to efficiently store length distribution)
if p[0].length > self.max_cross_bundle_read_len:
self.max_cross_bundle_read_len = p[0].length
if p[1].length > self.max_cross_bundle_read_len:
self.max_cross_bundle_read_len = p[1].length
NH = p[2]
strand = p[0].strand or p[1].strand
if strand == '-':
strand = -1
elif strand == '+':
strand = 1
else:
strand = 0
key = str(p[0].bundle) + ' ' + ' '.join([str(e) for e in p[0].exonIds]) + ',' + str(p[1].bundle) + ' ' + ' '.join([str(e) for e in p[1].exonIds]) + ' ' + str(NH) + ' ' + str(strand)
if key in self.cross_bundle_buckets:
self.cross_bundle_buckets[key].add_pair(p[0], p[1])
else:
b = cross_bundle_bucket.CrossBundleBucket(p[0].bundle, p[0].exonIds, p[1].bundle, p[1].exonIds)
b.set_length(p[0].bucket_length + p[1].bucket_length)
b.NH = NH
b.strand = strand
b.add_pair(p[0], p[1])
self.cross_bundle_buckets[key] = b
def computeBuckets(self):
# Compute coverage levels across every bucket
partitions = dict()
# Maximum fragment length
max_len = 0
for r in self.unpaired:
partitions = self.add_unpaired_to_partition(r, partitions)
#if not r.exons:
# print(r.pos)
if r.length > max_len:
max_len = r.length
for p in self.paired:
partitions = self.add_paired_to_partition(p, partitions)
if p.length > max_len:
max_len = p.length
return partitions, max_len
def add_paired_to_partition(self, p, partitions):
'''
:param p: Paired read to add
:param partitions: List of previously computed partitions
:return: Updated partitions list
'''
self.finalize_paired_read(p)
b = self.find_key(p, partitions)
b.add_paired(p)
return partitions
def add_unpaired_to_partition(self, r, partitions):
'''
:param r: Read to add
:param partitions: List of previously computed partitions
:return: Updated partitions list
'''
self.finalize_unpaired_read(r)
b = self.find_key(r, partitions)
b.add_unpaired(r)
return partitions
def find_key(self, r, partitions):
'''
:param r: Either a Read or PairedRead object
:param partitions: List of partitions
:return: key and partition for this read
'''
exonIds = r.exonIds
strand = '0'
if r.strand == '-':
strand = '-1'
elif r.strand == '+':
strand = '1'
key = '\t'.join([str(e) for e in exonIds]) + '\t' + strand + '\t' + str(r.NH)
if not key in partitions:
covLength = 0
for i in range(len(exonIds)):
e = exonIds[i]
subexon_length = self.exons[e+1] - self.exons[e]
covLength += subexon_length
partitions[key] = bucket.Bucket(exonIds, covLength)
partitions[key].strand = r.strand
partitions[key].NH = r.NH
partitions[key].countBefore = 0
j = partitions[key]
return j
def finalize_paired_read(self, p):
# TODO: Take this out
#if p.exonsB[0][0] < p.exonsA[0][0] or p.exonsA[-1][1] > p.exonsB[-1][1]:
# print('Weird reads:')
# print(p.exonsA)
# print(p.exonsB)
# print('')
# Find what range of subexon ids contains this pair
#for i in range(len(self.exons)-1):
# if self.exons[i+1] > p.exonsA[0][0]:
# break
#for j in range(i, len(self.exons)):
# if self.exons[j] > p.exonsB[-1][1]:
# break
# Exon ids spanned by read
start_id = bisect.bisect_right(self.exons, p.exonsA[0][0]) - 1
id = start_id
p.startOffset = p.exonsA[0][0] - self.exons[id]
p.lenLeft = 0
exonIdsA = []
for exon in p.exonsA:
p.lenLeft += exon[1] - exon[0]
while self.exons[id+1] <= exon[0]:
id += 1
while self.exons[id] < exon[1]:
exonIdsA.append(id)
id += 1
id = start_id
p.lenRight = 0
exonIdsB = []
for exon in p.exonsB:
p.lenRight += exon[1] - exon[0]
while self.exons[id+1] <= exon[0]:
id += 1
while self.exons[id] < exon[1]:
exonIdsB.append(id)
id += 1
p.endOffset = self.exons[id] - p.exonsB[-1][1]
if p.discordant:
p.exonIds = exonIdsA + exonIdsB
else:
n = 0
while n < len(exonIdsA) and exonIdsA[n] < exonIdsB[0]:
n += 1
p.exonIds = exonIdsA[:n] + exonIdsB
def finalize_unpaired_read(self, r):
# Exon ids spanned by read
r.exonIds, r.length = self.getExonIds(r.exons)
r.startOffset = r.exons[0][0] - self.exons[r.exonIds[0]]
r.endOffset = self.exons[r.exonIds[-1]+1] - r.exons[-1][1]
def getExonIds(self, exons):
exonIds = []
id = bisect.bisect_right(self.exons, exons[0][0]) - 1
length = 0
for exon in exons:
length += exon[1] - exon[0]
while self.exons[id+1] <= exon[0]:
id += 1
while self.exons[id] < exon[1]:
exonIds.append(id)
id += 1
return exonIds, length
def RLE(self, vector):
rle = []
val = vector[0]
length = 0
for v in vector:
if v == val:
length += 1
else:
rle.append([val, length])
val = v
length = 1
rle.append([val, length])
return rle
def resetBundle(self):
self.exons = set()
self.gene_bounds = []
self.unpaired = []
self.paired = []
self.unmatched = dict()
self.cross_bundle_pairs = []
self.curr_cross_bundle_reads = dict()
def findReads(self, unpairedLens, pairedLens, lensLeft, lensRight, coverage, boundaries=None):
''' Find the set of reads that most closely matches the distribution of readLens and the coverage vector
'''
fragmentLens = copy.copy(unpairedLens)
numFragments = 0
for k,v in unpairedLens.items():
numFragments += v
for k,v in lensLeft.items():
if k > 0:
numFragments += v
if k in fragmentLens:
fragmentLens[k] += v
else:
fragmentLens[k] = v
for k,v in lensRight.items():
if k > 0:
numFragments += v
if k in fragmentLens:
fragmentLens[k] += v
else:
fragmentLens[k] = v
countPaired = 0
for k,v in pairedLens.items():
countPaired += v
#print(pairedLens)
#print(pairedLens)
t1 = time.time()
# If the number of potential start sites is equal to the number of reads, then we can solve it with high accuracy with v1. Otherwise use v3
starts = 0
lens_orig = dict()
for k,v in fragmentLens.items():
lens_orig[k] = v
for i in range(len(coverage)):
if i == 0 and coverage[i] > 0:
starts += coverage[i]
elif coverage[i] > coverage[i-1]:
starts += coverage[i+1] - coverage[i]
if starts == numFragments:
ta = time.time()
reads = self.findReadsInCoverage_v1(coverage, fragmentLens)
tb = time.time()
self.read_timeA += (tb - ta)
self.read_countA += 1
else:
ta = time.time()
reads = self.findReadsInCoverage_v3(coverage, fragmentLens)
tb = time.time()
self.read_timeB += (tb - ta)
self.read_countB += 1
t2 = time.time()
if boundaries:
unpaired, paired = self.findPairsWithBoundaries(reads, unpairedLens, pairedLens, boundaries)
else:
#unpaired, paired = self.findPairsGreedy(reads, pairedLens)
unpaired, paired = self.findPairsRandom(reads, pairedLens)
t3 = time.time()
#if len(paired) > countPaired:
# print('%d > %d' % (len(paired), countPaired))
# exit()
#print('')
return unpaired, paired, t2-t1, t3-t2
def printTime(self):
print('Simple version:')
print(' Total time: %f s' % self.read_timeA)
print(' Num segments: %d' % self.read_countA)
print(' Avg time: %f s' % (self.read_timeA / self.read_countA))
print('')
print('Full version:')
print(' Total time: %f s' % self.read_timeB)
print(' Num segments: %d' % self.read_countB)
print(' Avg time: %f s' % (self.read_timeB / self.read_countB))
print('')
'''
def findPairsGreedy(self, reads, pairedLens):
pairedLensSorted = sorted(pairedLens.keys(), reverse=True)
reads.sort()
numReads = len(reads)
# Keep track of which reads we have already assigned
assigned = [0] * numReads
# Map each distance to the paired reads that match it
pairDists = dict()
singleDists = dict()
# Create a distance matrix between all pairs of reads
dists = [0] * numReads
for i in range(numReads):
dists[i] = [0] * (i+1)
for j in range(i):
d = reads[i][1] - reads[j][0]
dists[i][j] = d
if d in pairDists:
pairDists[d] += 1
else:
pairDists[d] = 1
d = reads[i][1] - reads[i][0]
dists[i][i] = d
if d in singleDists:
singleDists[d] += 1
else:
singleDists[d] = 1
paired = []
countPaired = 0
for k,v in pairedLens.items():
countPaired += v
while countPaired > 0:
bestFreq = None
bestL = None
foundPair = False
# Look for unique paired read lengths
for l in pairedLensSorted:
if not (l in pairDists and pairDists[l] > 0 and pairedLens[l] > 0):
continue
expected = pairedLens[l]
freq = pairDists[l]
if freq == 0:
continue
if freq <= expected:
pairedLens[l] -= 1
i,j = self.findPairedDist(dists, assigned, numReads, l)
paired.append([reads[j], reads[i]])
assigned[i] = 1
assigned[j] = 1
if dists[i][i] > 0:
singleDists[dists[i][i]] -= 1
dists[i][i] = 0
if dists[j][j] > 0:
singleDists[dists[j][j]] -= 1
dists[j][j] = 0
for x in range(i):
if dists[i][x] > 0:
pairDists[dists[i][x]] -= 1
dists[i][x] = 0
for x in range(i+1,numReads):
if dists[x][i] > 0:
pairDists[dists[x][i]] -= 1
dists[x][i] = 0
for x in range(j):
if dists[j][x] > 0:
pairDists[dists[j][x]] -= 1
dists[j][x] = 0
for x in range(j+1,numReads):
if dists[x][j] > 0:
pairDists[dists[x][j]] -= 1
dists[x][j] = 0
foundPair = True
countPaired -= 1
break
elif bestFreq == None or (freq-expected) < bestFreq:
bestFreq = freq - expected
bestL = l
# No unique paired lengths, so choose one from the least frequent
if not foundPair:
if bestFreq == None:
break
else:
pairedLens[bestL] -= 1
i,j = self.findPairedDist(dists, assigned, numReads, bestL)
paired.append([reads[j], reads[i]])
assigned[i] = 1
assigned[j] = 1
if dists[i][i] > 0:
singleDists[dists[i][i]] -= 1
dists[i][i] = 0
if dists[j][j] > 0:
singleDists[dists[j][j]] -= 1
dists[j][j] = 0
for x in range(i):
if dists[i][x] > 0:
pairDists[dists[i][x]] -= 1
dists[i][x] = 0
for x in range(i+1,numReads):
if dists[x][i] > 0:
pairDists[dists[x][i]] -= 1
dists[x][i] = 0
for x in range(j):
if dists[j][x] > 0:
pairDists[dists[j][x]] -= 1
dists[j][x] = 0
for x in range(j+1,numReads):
if dists[x][j] > 0:
pairDists[dists[x][j]] -= 1
dists[x][j] = 0
countPaired -= 1
remaining = [0] * (numReads - sum(assigned))
i = 0
for j in range(numReads):
if not assigned[j]:
remaining[i] = reads[j]
i += 1
remainingPairedLens = dict()
for k,v in pairedLens.items():
if v > 0:
remainingPairedLens[k] = v
if countPaired > 0:
newUnpaired, newPaired = self.findPairsGreedy2(remaining, remainingPairedLens)
unpaired = newUnpaired
paired += newPaired
else:
unpaired = remaining
return unpaired, paired
def findPairsGreedy2(self, reads, pairedLens):
numReads = len(reads)
reads.sort()
pairedLensSorted = sorted(pairedLens.keys(), reverse=True)
paired = []
countPaired = 0
for k,v in pairedLens.items():
countPaired += v
countUnpaired = numReads - 2 * countPaired
# Create a distance matrix between all pairs of reads
dists = [0] * numReads
for i in range(numReads):
dists[i] = [0] * i
for j in range(i):
d = reads[i][1] - reads[j][0]
dists[i][j] = d
assigned = [0] * numReads
lenIndex = 0
while countPaired > 0:
targetL = pairedLensSorted[lenIndex]
bestL = None
bestDiff = None
bestPos = None
for i in range(numReads):
for j in range(i,0,-1):
l = dists[i][j-1]
diff = abs(l - targetL)
if l > 0 and (bestDiff == None or diff < bestDiff):
bestDiff = diff
bestL = dists[i][j-1]
bestPos = (j-1, i)
elif l > targetL:
break
if bestL == None:
break
else:
pairedLens[targetL] -= 1
if pairedLens[targetL] == 0:
lenIndex += 1
i = bestPos[0]
j = bestPos[1]
paired.append([reads[i], reads[j]])
assigned[i] = 1
assigned[j] = 1
for x in range(i):
dists[i][x] = 0
for x in range(i+1,numReads):
dists[x][i] = 0
for x in range(j):
dists[j][x] = 0
for x in range(j+1,numReads):
dists[x][j] = 0
countPaired -= 1
unpaired = [0] * countUnpaired
i = 0
for j in range(numReads):
if not assigned[j]:
unpaired[i] = reads[j]
i += 1
return unpaired, paired
def findPairedDist(self, dists, assigned, numReads, value):
for i in range(numReads):
if assigned[i]:
continue
for j in range(i):
if assigned[j]:
continue
if dists[i][j] == value:
return i,j
'''
def findPairsWithBoundaries(self, reads, unpaired_lens, paired_lens, boundaries):
'''
Use the fact that all reads must span all the subexons to improve our pairing
:param reads:
:param unpaired_lens:
:param paired_lens:
:param lens_left:
:param lens_right:
:param boundaries: Boundaries between subexons
:return:
'''
numUnpaired = 0
for k,v in unpaired_lens.items():
numUnpaired += v
numPaired = 0
for k,v in paired_lens.items():
numPaired += v
left_reads = []
spanning_reads = []
right_reads = []
for r in reads:
if r[0] < boundaries[0]:
if r[1] > boundaries[-1]:
spanning_reads.append(r)
else:
left_reads.append(r)
elif r[1] > boundaries[-1]:
right_reads.append(r)
else:
spanning_reads.append(r)
#print('Read does not overlap left or right...?')
#exit()
left_reads.sort()
spanning_reads.sort()
right_reads.sort()
if left_reads:
unique = [left_reads[0]]
left_counts = [1]
for r in left_reads[1:]:
if r == unique[-1]:
left_counts[-1] += 1
else:
unique.append(r)
left_counts.append(1)
left_reads = unique
left_bounds = [0, len(left_reads)]
else:
left_counts = []
left_bounds = [0, 0]
if spanning_reads:
unique = [spanning_reads[0]]
spanning_counts = [1]
for r in spanning_reads[1:]:
if r == unique[-1]:
spanning_counts[-1] += 1
else:
unique.append(r)
spanning_counts.append(1)
spanning_reads = unique
spanning_bounds = [0, len(spanning_reads)]
else:
spanning_counts = []
spanning_bounds = [0, 0]
if right_reads:
unique = [right_reads[0]]
right_counts = [1]
for r in right_reads[1:]:
if r == unique[-1]:
right_counts[-1] += 1
else:
unique.append(r)
right_counts.append(1)
right_reads = unique
right_bounds = [0, len(right_reads)]
else:
right_counts = []
right_bounds = [0, 0]
count_left = sum(left_counts)
count_right = sum(right_counts)
count_spanning = sum(spanning_counts)
countUnpaired = 0
for k,v in unpaired_lens.items():
countUnpaired += v
countPaired = 0
for k,v in paired_lens.items():
countPaired += v
paired = []
unpaired = None
unmatched = []
while countPaired > 0:
if count_spanning == 0 and (count_left == 0 or count_right == 0):
break
if count_left == 0 and count_right == 0:
if countPaired % 2 == 0:
p = self.findLeftPairRandom(spanning_reads, paired_lens, spanning_counts, spanning_bounds)
else:
p = self.findRightPairRandom(spanning_reads, paired_lens, spanning_counts, spanning_bounds)
if len(p) == 2:
paired.append([spanning_reads[p[0]][:], spanning_reads[p[1]][:]])
countPaired -= 1
count_spanning -= 2
else:
unmatched.append(spanning_reads[p[0]][:])
count_spanning -= 1
elif count_left >= count_right:
p = self.findLeftPair(left_reads, spanning_reads, right_reads, left_counts, spanning_counts, right_counts, left_bounds, spanning_bounds, right_bounds, paired_lens)
count_left -= 1
if len(p) == 3:
if p[1] < 0:
paired.append([left_reads[p[0]][:], right_reads[p[2]][:]])
count_right -= 1
else:
paired.append([left_reads[p[0]][:], spanning_reads[p[1]][:]])
count_spanning -= 1
countPaired -= 1
else:
unmatched.append(left_reads[p[0]][:])
else:
p = self.findRightPair(left_reads, spanning_reads, right_reads, left_counts, spanning_counts, right_counts, left_bounds, spanning_bounds, right_bounds, paired_lens)
count_right -= 1
if len(p) == 3:
if p[1] < 0:
paired.append([left_reads[p[0]][:], right_reads[p[2]][:]])
count_left -= 1
else:
paired.append([spanning_reads[p[1]][:], right_reads[p[2]][:]])
count_spanning -= 1
countPaired -= 1
else:
unmatched.append(right_reads[p[0]][:])
for i in range(left_bounds[0], left_bounds[1]):
for _ in range(left_counts[i]):
unmatched.append([left_reads[i][0], left_reads[i][1]])
for i in range(spanning_bounds[0], spanning_bounds[1]):
for _ in range(spanning_counts[i]):
unmatched.append([spanning_reads[i][0], spanning_reads[i][1]])
for i in range(right_bounds[0], right_bounds[1]):
for _ in range(right_counts[i]):
unmatched.append([right_reads[i][0], right_reads[i][1]])
unmatched.sort()
i = 0
j = len(unmatched)
for _ in range(countPaired):
if i < j-1:
paired.append([unmatched[i], unmatched[j-1]])
i += 1
j -= 1
if i < j:
if unpaired:
unpaired += unmatched[i:j]
else:
unpaired = unmatched[i:j]
elif not unpaired:
unpaired = []
return unpaired, paired
def findLeftPair(self, left_reads, spanning_reads, right_reads, left_counts, spanning_counts, right_counts, left_bounds, spanning_bounds, right_bounds, paired_lens):
'''
:param left_reads:
:param spanning_reads:
:param right_reads:
:param paired_lens:
:return:
'''
i = left_bounds[0]
start = left_reads[i][0]
left_counts[left_bounds[0]] -= 1
while left_bounds[0] < left_bounds[1] and left_counts[left_bounds[0]] == 0:
left_bounds[0] += 1
# Look for a match among right reads
for j in range(right_bounds[0],right_bounds[1]):
if right_counts[j] == 0:
continue
l = right_reads[j][1] - start
if l in paired_lens:
if paired_lens[l] > 1:
paired_lens[l] -= 1
else:
del paired_lens[l]
right_counts[j] -= 1
while right_bounds[1] > right_bounds[0] and right_counts[right_bounds[1]-1] == 0:
right_bounds[1] -= 1
while right_bounds[0] < right_bounds[1] and right_counts[right_bounds[0]] == 0:
right_bounds[0] += 1
return [i, -1, j]
# Look for a match among left reads
for j in range(spanning_bounds[0],spanning_bounds[1]):
if spanning_counts[j] == 0:
continue
l = spanning_reads[j][1] - start
if l in paired_lens:
if paired_lens[l] > 1:
paired_lens[l] -= 1
else:
del paired_lens[l]
spanning_counts[j] -= 1
while spanning_bounds[1] > spanning_bounds[0] and spanning_counts[spanning_bounds[1]-1] == 0:
spanning_bounds[1] -= 1
while spanning_bounds[0] < spanning_bounds[1] and spanning_counts[spanning_bounds[0]] == 0:
spanning_bounds[0] += 1
return [i, j, -1]
return [i]
def findRightPair(self, left_reads, spanning_reads, right_reads, left_counts, spanning_counts, right_counts, left_bounds, spanning_bounds, right_bounds, paired_lens):
'''
:param left_reads:
:param spanning_reads:
:param right_reads:
:param paired_lens:
:return:
'''
j = right_bounds[1]-1
end = right_reads[j][1]
right_counts[right_bounds[1]-1] -= 1
while right_bounds[0] < right_bounds[1] and right_counts[right_bounds[1]-1] == 0:
right_bounds[1] -= 1
# Look for a match among left reads
for i in range(left_bounds[0],left_bounds[1]):
if left_counts[i] == 0:
continue
l = end - left_reads[i][1]
if l in paired_lens:
if paired_lens[l] > 1:
paired_lens[l] -= 1
else:
del paired_lens[l]
left_counts[i] -= 1
while left_bounds[1] > left_bounds[0] and left_counts[left_bounds[1]-1] == 0:
left_bounds[1] -= 1
while left_bounds[0] < left_bounds[1] and left_counts[left_bounds[0]] == 0:
left_bounds[0] += 1
return [i, -1, j]
# Look for a match among left reads
for i in range(spanning_bounds[0],spanning_bounds[1]):
if spanning_counts[i] == 0:
continue
l = end - spanning_reads[i][0]
if l in paired_lens:
if paired_lens[l] > 1:
paired_lens[l] -= 1
else:
del paired_lens[l]
spanning_counts[i] -= 1
while spanning_bounds[1] > spanning_bounds[0] and spanning_counts[spanning_bounds[1]-1] == 0:
spanning_bounds[1] -= 1
while spanning_bounds[0] < spanning_bounds[1] and spanning_counts[spanning_bounds[0]] == 0:
spanning_bounds[0] += 1
return [-1, i, j]
return [j]
def findPairsRandom(self, reads, paired_lens, debug=False):
#print('Pairing %d reads' % len(reads))
countPairs = 0
for k,v in paired_lens.items():
countPairs += v
reads.sort()
unique_reads = [reads[0]]
read_counts = [1]
for r in reads[1:]:
if r == unique_reads[-1]:
read_counts[-1] += 1
else:
unique_reads.append(r)
read_counts.append(1)
# Index of first and last reads in array that have not been used yet
read_bounds = [0, len(read_counts)]
paired = []
unmatched = []
unmatched_counts = []
while countPairs > 0 and read_bounds[0] < read_bounds[1]:
p = self.findLeftPairRandom(unique_reads, paired_lens, read_counts, read_bounds)
if len(p) == 2:
paired.append([unique_reads[p[0]][:], unique_reads[p[1]][:]])
countPairs -= 1
else:
self.add_to_unmatched(unmatched, unmatched_counts, unique_reads[p[0]], 1)
if countPairs == 0 or read_bounds[0] >= read_bounds[1]:
break
p = self.findRightPairRandom(unique_reads, paired_lens, read_counts, read_bounds)
if debug:
print(p)
if len(p) == 2:
paired.append([unique_reads[p[0]][:], unique_reads[p[1]][:]])
countPairs -= 1
else:
self.add_to_unmatched(unmatched, unmatched_counts, unique_reads[p[0]], 1)
# Add remaining reads to unmatched
for i in range(read_bounds[0], read_bounds[1]):
if read_counts[i] > 0:
self.add_to_unmatched(unmatched, unmatched_counts, unique_reads[i], read_counts[i])
num_remaining = sum(unmatched_counts)
bounds = [0, len(unmatched)]
paired_lens_sorted = sorted(paired_lens)
while countPairs > 0 and num_remaining > 1:
p = self.findClosestLeftPair(unmatched, unmatched_counts, bounds, paired_lens_sorted)
paired.append(p)
countPairs -= 1
num_remaining -= 2
if countPairs == 0 or num_remaining < 2:
break
p = self.findClosestRightPair(unmatched, unmatched_counts, bounds, paired_lens_sorted)
paired.append(p)
countPairs -= 1
num_remaining -= 2
unpaired = [0] * sum(unmatched_counts)
id = 0
for i in range(bounds[0], bounds[1]):
for _ in range(unmatched_counts[i]):
unpaired[id] = [unmatched[i][0], unmatched[i][1]]
id += 1
#print('Done!')
return unpaired, paired
def add_to_unmatched(self, unmatched, counts, read, num):
i = bisect.bisect_left(unmatched, read)
if i < len(unmatched) and unmatched[i] == read:
counts[i] += num
else:
unmatched.insert(i, read)
counts.insert(i, num)
def findLeftPairRandom(self, reads, paired_lens, read_counts, read_bounds):
i = read_bounds[0]
read_counts[i] -= 1
while read_bounds[0] < read_bounds[1] and read_counts[read_bounds[0]] == 0:
read_bounds[0] += 1
for j in range(read_bounds[1]-1, read_bounds[0]-1, -1):
if read_counts[j] == 0:
continue
l = reads[j][1] - reads[i][0]
if l in paired_lens:
if paired_lens[l] > 1:
paired_lens[l] -= 1
else:
del paired_lens[l]
read_counts[j] -= 1
while read_bounds[1] > read_bounds[0] and read_counts[read_bounds[1]-1] == 0:
read_bounds[1] -= 1
while read_bounds[0] < read_bounds[1] and read_counts[read_bounds[0]] == 0:
read_bounds[0] += 1
return [i, j]
return [i]
def findRightPairRandom(self, reads, paired_lens, read_counts, read_bounds):
j = read_bounds[1]-1
read_counts[j] -= 1
while read_bounds[1] > read_bounds[0] and read_counts[read_bounds[1]-1] == 0:
read_bounds[1] -= 1
for i in range(read_bounds[0], read_bounds[1]):
if read_counts[i] == 0:
continue
l = reads[j][1] - reads[i][0]
if l in paired_lens:
if paired_lens[l] > 1:
paired_lens[l] -= 1
else:
del paired_lens[l]
read_counts[i] -= 1
while read_bounds[0] < read_bounds[1] and read_counts[read_bounds[0]] == 0:
read_bounds[0] += 1
while read_bounds[1] > read_bounds[0] and read_counts[read_bounds[1]-1] == 0:
read_bounds[1] -= 1
return [i,j]
return [j]
def findClosestLeftPair(self, reads, counts, bounds, paired_lens_sorted):
i = bounds[0]
start = reads[i][0]
counts[i] -= 1
while bounds[0] < bounds[1] and counts[bounds[0]] == 0:
bounds[0] += 1
num_lens = len(paired_lens_sorted)
# Distance to closest match
closestD = None
# Index of closest match
closestJ = None
# Length of closest match
closestL = None
for j in range(bounds[0], bounds[1]):
if counts[j] == 0:
continue
l = reads[j][1] - start
id = bisect.bisect_left(paired_lens_sorted, l)
if id < num_lens:
d = abs(l - paired_lens_sorted[id])
if d == 0:
closestD = 0
closestJ = j
closestL = paired_lens_sorted[id]
break
elif closestD == None or d < closestD:
closestD = d
closestJ = j
closestL = paired_lens_sorted[id]
if id > 0:
d = abs(l - paired_lens_sorted[id-1])
if closestD == None or d < closestD:
closestD = d
closestJ = j
closestL = paired_lens_sorted[id-1]
if closestD == None:
print('Error! Trying to pair only 1 read?')
exit()
pair = [reads[i][:], reads[closestJ][:]]
counts[closestJ] -= 1
while bounds[0] < bounds[1] and counts[bounds[0]] == 0:
bounds[0] += 1
while bounds[0] < bounds[1] and counts[bounds[1]-1] == 0:
bounds[1] -= 1
return pair
def findClosestRightPair(self, reads, counts, bounds, paired_lens_sorted):
j = bounds[1]-1
end = reads[j][1]
counts[j] -= 1
while bounds[0] < bounds[1] and counts[bounds[1]-1] == 0:
bounds[1] -= 1
num_lens = len(paired_lens_sorted)
# Distance to closest match
closestD = None
# Index of closest match
closestI = None
# Length of closest match
closestL = None
for i in range(bounds[1]-1, bounds[0]-1, -1):
if counts[i] == 0:
continue
l = end - reads[i][1]
id = bisect.bisect_left(paired_lens_sorted, l)
if id < num_lens:
d = abs(l - paired_lens_sorted[id])
if d == 0:
closestD = 0
closestI = i
closestL = paired_lens_sorted[id]
break
elif closestD == None or d < closestD:
closestD = d
closestI = i
closestL = paired_lens_sorted[id]
if id > 0:
d = abs(l - paired_lens_sorted[id-1])
if closestD == None or d < closestD:
closestD = d
closestI = i
closestL = paired_lens_sorted[id-1]
if closestD == None:
print('Error! Trying to pair only 1 read?')
exit()
pair = [reads[closestI][:], reads[j][:]]
counts[closestI] -= 1
while bounds[0] < bounds[1] and counts[bounds[0]] == 0:
bounds[0] += 1
while bounds[0] < bounds[1] and counts[bounds[1]-1] == 0:
bounds[1] -= 1
return pair
def findPairsGreedy(self, reads, paired_lens):
countPairs = 0
for k,v in paired_lens.items():
countPairs += v
reads.sort()
unique_reads = [reads[0]]
read_counts = [1]
for r in reads[1:]:
if r == unique_reads[-1]:
read_counts[-1] += 1
else:
unique_reads.append(r)
read_counts.append(1)
possible = list(range(len(reads)))
paired = []
unmatched = []
while countPairs > 0 and len(possible) > 1:
p = self.findPairGreedy(unique_reads, read_counts, paired_lens, possible, unmatched)
if p:
paired.append(p)
countPairs -= 1
paired_lens_sorted = sorted(paired_lens)
for _ in range(min(countPairs, len(unmatched)/2)):
p = self.findApproxPairGreedy(unique_reads, read_counts, paired_lens_sorted, unmatched)
paired.append(p)
countPairs -= 1
unpaired = []
for i in range(len(read_counts)):
for _ in range(read_counts[i]):
unpaired.append(unique_reads[i][:])
return unpaired, paired
def findPairGreedy(self, reads, counts, paired_lens, possible, unmatched):
n = len(possible)
id1 = random.randint(n-1)
r1 = reads[possible[id1]]
for id2 in range(id1):
r2 = reads[possible[id2]]
l = r1[1] - r2[0]
if l in paired_lens:
pair = [r2[:], r1[:]]
counts[possible[id1]] -= 1
if counts[possible[id1]] == 0:
del possible[id1]
counts[possible[id2]] -= 1
if counts[possible[id2]] == 0:
del possible[id2]
if paired_lens[l] > 1:
paired_lens[l] -= 1
else:
del paired_lens[l]
return pair
for id2 in range(id1+1, n):
r2 = reads[possible[id2]]
l = r2[1] - r1[0]
if l in paired_lens:
pair = [r1[:], r2[:]]
counts[possible[id2]] -= 1
if counts[possible[id2]] == 0:
del possible[id2]
counts[possible[id1]] -= 1
if counts[possible[id1]] == 0:
del possible[id1]
if paired_lens[l] > 1:
paired_lens[l] -= 1
else:
del paired_lens[l]
return pair
# No pair found for this read
unmatched.append(id1)
del possible[id1]
return None
def findApproxPairGreedy(self, reads, counts, paired_lens_sorted, possible):
n = len(possible)
num_lens = len(paired_lens_sorted)
id1 = random.randint(n-1)
r1 = reads[possible[id1]]
closestD = None
closestId = None
for id2 in range(id1):
r2 = reads[possible[id2]]
l = r1[1] - r2[0]
id = bisect.bisect_left(paired_lens_sorted, l)
if id < num_lens:
d = abs(l - paired_lens_sorted[id])
if closestD == None or d < closestD:
closestD = d
closestId = id2
if id > 0:
d = abs(l - paired_lens_sorted[id-1])
if closestD == None or d < closestD:
closestD = d
closestId = id2
for id2 in range(id1+1, n):
r2 = reads[possible[id2]]
l = r2[1] - r1[0]
id = bisect.bisect_left(paired_lens_sorted, l)
if id < num_lens:
d = abs(l - paired_lens_sorted[id])
if closestD == None or d < closestD:
closestD = d
closestId = id2
if id > 0:
d = abs(l - paired_lens_sorted[id-1])
if closestD == None or d < closestD:
closestD = d
closestId = id2
pair = [r1[:], reads[possible[closestId]][:]]
if closestId > id1:
counts[possible[closestId]] -= 1
if counts[possible[closestId]] == 0:
del possible[closestId]
counts[possible[id1]] -= 1
if counts[possible[id1]] == 0:
del possible[id1]
else:
counts[possible[id1]] -= 1
if counts[possible[id1]] == 0:
del possible[id1]
counts[possible[closestId]] -= 1
if counts[possible[closestId]] == 0:
del possible[closestId]
return pair
'''
def findPairs(self, reads, pairedLens):
if len(pairedLens) == 0:
return reads, []
length = 0
for r in reads:
if r[1] > length:
length = r[1]
paired = []
unpaired = []
reads.sort()
# Sort read pairedLensSorted lengths
pairedLensSorted = sorted(pairedLens, reverse=True)
starts = [0] * (length+1)
ends = [0] * (length+1)
for r in reads:
starts[r[0]] += 1
ends[r[1]] += 1
unmatched = []
i = 0
j = length
while i < j and starts[i] <= 0:
i += 1
while j > i and ends[j] <= 0:
j -= 1
while i < j and len(pairedLensSorted) > 0:
id1 = 0
while not reads[id1][0] == i:
id1 += 1
startRead = reads[id1]
starts[i] -= 1
ends[reads[id1][1]] -= 1
del reads[id1]
foundRead = False
for r in range(len(pairedLensSorted)):
l = pairedLensSorted[r]
if i+l <= length and ends[i+l] > 0:
pairedLens[l] -= 1
if pairedLens[l] == 0:
del pairedLensSorted[r]
foundRead = True
# Add paired read to list
id2 = 0
while not reads[id2][1] == i+l:
id2 += 1
starts[reads[id2][0]] -= 1
ends[i+l] -= 1
if reads[id2][0] < startRead[0]:
paired += [[reads[id2], startRead]]
else:
paired += [[startRead, reads[id2]]]
#paired += [[startRead, reads[id2]]]
del reads[id2]
break
if not foundRead:
unmatched += [startRead]
while i < j and starts[i] <= 0:
i += 1
while j > i and ends[j] <= 0:
j -= 1
if j > i and len(pairedLensSorted) > 0:
id1 = 0
while not reads[id1][1] == j:
id1 += 1
startRead = reads[id1]
starts[reads[id1][0]] -= 1
ends[j] -= 1
del reads[id1]
foundRead = False
for r in range(len(pairedLensSorted)):
l = pairedLensSorted[r]
if j-l >= 0 and starts[j-l] > 0:
pairedLens[l] -= 1
if pairedLens[l] == 0:
del pairedLensSorted[r]
foundRead = True
# Add paired read to list
id2 = 0
while not reads[id2][0] == j-l:
id2 += 1
starts[j-l] -= 1
ends[reads[id2][1]] -= 1
if reads[id2][0] < startRead[0]:
paired += [[reads[id2], startRead]]
else:
paired += [[startRead, reads[id2]]]
#paired += [[reads[id2], startRead]]
del reads[id2]
break
if not foundRead:
unmatched += [startRead]
while i < j and starts[i] <= 0:
i += 1
while j > i and ends[j] <= 0:
j -= 1
# Pair up remaining reads until we meet the quota of paired-end reads
paired_lens_new = dict()
for k,v in pairedLens.items():
if v > 0:
paired_lens_new[k] = v
u, p = self.findPairsGreedy(unmatched+reads, paired_lens_new)
unpaired = u
paired += p
return unpaired, paired
'''
def findReadsInCoverage_v1(self, coverage, readLens, boundaries=None):
''' Given a coverage vector, return a set of reads the closely fits the coverage vector as well the distribution of read lengths.
The algorithm creates new reads greedily, starting from both ends at once to ensure the ends of the vector are well marked.
This algorithm is guaranteed to return a set of reads that covers every base at least to the corresponding depth of the coverage vector.
In many cases the algorithm will overcompensate by creating extra reads to make sure every base in the coverage vector is covered.
In such cases new reads have length equal to the median read length in the input distribution.
coverage: Coverage vector containing the accumulation of many reads
readLens: Dictionary containing the distribution of all read lengths in the coverage vector
boundaries: Start and end points, of which all reads must cross at least 1
'''
lens = readLens.keys()
# Find max and mode read lengths
maxLen = max(lens)
# Read lengths sorted by frequency, largest to smallest
lensSorted = sorted(readLens, key=readLens.get, reverse=True)
reads = []
# start and end of coverage window
# Keep finding reads from both ends until they meet in the middle
start = 0
while coverage[start] <= 0:
start += 1
end = len(coverage)
while coverage[end-1] <= 0:
end -= 1
while end > start:
# find a read from the beginning
readStart = start
readEnd = start
closestEndpoint = None
if boundaries and readStart >= boundaries[0]:
minLen = boundaries[1]+1 - start
else:
minLen = 1
for length in range(minLen, maxLen+1):
if (readStart+length == end) or (readStart+length < end and coverage[readStart + length] < coverage[readStart + length - 1]):
if length in readLens:
readEnd = readStart + length
reads.append([readStart, readEnd])
readLens[length] -= 1
# reorder sorted lengths
for i in range(len(lensSorted)):
if lensSorted[i] == length:
break
j = i+1
while j < len(readLens) and readLens[lensSorted[j]] > readLens[lensSorted[i]]:
j += 1
if j > i+1:
lensSorted = lensSorted[:i] + lensSorted[i+1:j] + [lensSorted[i]] + lensSorted[j:]
if readLens[length] == 0:
del readLens[length]
break
else:
if closestEndpoint == None:
closestEndpoint = readStart + length
# Don't extend into section where coverage is 0
if (readStart+length) >= len(coverage) or coverage[readStart+length] == 0:
break
if readEnd == readStart:
if closestEndpoint == None:
lenId = 0
length = lensSorted[lenId]
readEnd = readStart + length
while readEnd > len(coverage) and lenId < (len(lensSorted)-1):
lenId += 1
length = lensSorted[lenId]
readEnd = readStart + length
if readEnd > len(coverage):
# No read lengths fit within the end of the vector
readEnd = len(coverage)
reads.append([readStart, readEnd])
if length in readLens:
readLens[length] -= 1
# reorder sorted lengths
for i in range(len(lensSorted)):
if lensSorted[i] == length:
break
j = i+1
while j < len(readLens) and readLens[lensSorted[j]] > readLens[lensSorted[i]]:
j += 1
if j > i+1:
lensSorted = lensSorted[:i] + lensSorted[i+1:j] + [lensSorted[i]] + lensSorted[j:]
if readLens[length] == 0:
del readLens[length]
else:
readEnd = closestEndpoint
reads.append([readStart, readEnd])
# Update coverage vector
for i in range(readStart, readEnd):
coverage[i] -= 1
# update start
while start < end and coverage[start] <= 0:
start += 1
while end > start and coverage[end-1] <= 0:
end -= 1
if end > start:
readEnd = end
readStart = end
closestEndpoint = None
if boundaries and readEnd <= boundaries[1]:
minLen = readEnd+1 - boundaries[0]
else:
minLen = 1
for length in range(minLen, maxLen+1):
if (readEnd-length == start) or (readEnd-length > start and coverage[readEnd - length] > coverage[readEnd - length - 1]):
if length in readLens:
readStart = readEnd - length
reads.append([readStart, readEnd])
readLens[length] -= 1
# reorder sorted lengths
for i in range(len(lensSorted)):
if lensSorted[i] == length:
break
j = i+1
while j < len(readLens) and readLens[lensSorted[j]] > readLens[lensSorted[i]]:
j += 1
if j > i+1:
lensSorted = lensSorted[:i] + lensSorted[i+1:j] + [lensSorted[i]] + lensSorted[j:]
if readLens[length] == 0:
del readLens[length]
break
else:
if closestEndpoint == None:
closestEndpoint = readEnd - length
if readStart == readEnd:
if closestEndpoint == None:
length = lensSorted[0]
readStart = readEnd - length
reads.append([readStart, readEnd])
if length in readLens:
readLens[length] -= 1
# reorder sorted lengths
for i in range(len(lensSorted)):
if lensSorted[i] == length:
break
j = i+1
while j < len(readLens) and readLens[lensSorted[j]] > readLens[lensSorted[i]]:
j += 1
if j > i+1:
lensSorted = lensSorted[:i] + lensSorted[i+1:j] + [lensSorted[i]] + lensSorted[j:]
if readLens[length] == 0:
del readLens[length]
else:
readStart = closestEndpoint
reads.append([readStart, readEnd])
for i in range(readStart, readEnd):
coverage[i] -= 1
# update end
while coverage[end-1] <= 0 and end > start:
end -= 1
while coverage[start] <= 0 and start < end:
start += 1
return reads
def findReadsInCoverage_v3(self, cov, frag_lens):
reads = []
num_reads = 0
start = 0
end = len(cov)
while start < end and cov[start] <= 0:
start += 1
while start < end and cov[end-1] <= 0:
end -= 1
min_len = None
max_len = None
mode_len = 0
mode_freq = 0
for l,f in frag_lens.items():
if not min_len or l < min_len:
min_len = l
if not max_len or l > max_len:
max_len = l
if f > mode_freq:
mode_freq = f
mode_len = l
while start < end:
start, end, num_reads = self.findReadLeft(cov, start, end, reads, min_len, max_len, mode_len, num_reads)
#if start < end:
# start, end = self.findReadRight(cov, start, end, reads, min_len, max_len, mode_len)
reads = self.finalizeCovReads(reads, min_len, max_len, mode_len)
return reads
def finalizeCovReads(self, reads, min_len, max_len, mode_len):
'''
Split sections of end-to-end reads and return the list of all individual read boundaries
:param reads:
:param min_len:
:param max_len:
:param mode_len:
:return:
'''
final = []
for r in reads:
if r[2] == 1:
final.append([r[0], r[1]])
else:
pos = r[0]
for i in range(r[2]):
total_len = r[1] - pos
min_left = total_len - max_len * (r[2]-i-1)
max_left = total_len - min_len * (r[2]-i-1)
l = max(min(mode_len, max_left), min_left)
final.append([pos, pos+l])
pos += l
return final
def findReadsEndingAt(self, reads, end):
r = []
for i in range(len(reads)-1, -1, -1):
if reads[i][1] == end:
r.append(i)
return r
def findReadsStartingAt(self, reads, start):
r = []
for i in range(len(reads)-1, -1, -1):
if reads[i][0] == start:
r.append(i)
return r
def findNextGap(self, cov, start, end, modeLen):
gap = False
for i in range(min(modeLen,end-start)):
if cov[start+i] > 0:
if gap:
return [gap_start, i]
else:
if not gap:
gap_start = i
gap = True
if gap:
return [gap_start, i+1]
else:
return None
def extendReadRight(self, cov, start, stop, reads, max_len, num_reads):
'''
Extend the right end of a read ending at 'start' up to 'stop'
'''
if num_reads == 0:
return False
j = 0
for i in range(num_reads-1, -1, -1):
if reads[i][1] < stop:
j = i
break
for i in range(j, -1, -1):
r = reads[i]
if r[1] < start:
return False
elif r[1] == start and (r[1] - r[0]) < (max_len * r[2]):
r[1] = min(max_len*r[2] + r[0], stop)
if r[1] < stop:
# Recalculate new position for read
for new_pos in range(j, i-1, -1):
if reads[new_pos][1] < r[1]:
break
else:
new_pos = j
if i < new_pos:
self.move_read(reads, i, new_pos)
for i in range(start, r[1]):
cov[i] -= 1
return True
return False
def shortenReadRight(self, cov, start, stop, reads, max_shorten, min_len, num_reads):
'''
Shorten the right end of a read ending at 'stop' down to 'start'
'''
if num_reads == 0:
return False
for i in range(num_reads-1, -1, -1):
r = reads[i]
if r[1] < stop:
return False
elif (start - r[0]) >= (min_len * r[2]):
new_pos = 0
for j in range(i, -1, -1):
if reads[j][1] < start:
new_pos = j+1
break
for n in range(start, r[1]):
cov[n] += 1
r[1] = start
self.move_read(reads, i, new_pos)
return True
return False
def add_read(self, reads, new_read, num_reads):
if num_reads == 0:
reads.append([new_read[0], new_read[1], 1])
return 1
# After adding new read, this should be its position in the array
new_pos = -1
for i in range(num_reads-1, -1, -1):
if reads[i][1] < new_read[1]:
new_pos = i
break
for i in range(new_pos, -1, -1):
if reads[i][1] == new_read[0]:
# We can extend this read rather than adding a new one
reads[i][1] = new_read[1]
reads[i][2] += 1
# Move read from i to j
if i < new_pos:
self.move_read(reads, i, new_pos)
return num_reads
elif reads[i][1] < new_read[0]:
break
# We have to add a new read
r = [new_read[0], new_read[1], 1]
reads.append(r)
# Move read from end to j
if new_pos+1 < num_reads:
self.move_read(reads, num_reads, new_pos+1)
return num_reads+1
def move_read(self, reads, i, j):
r = reads[i]
if i < j:
for x in range(i, j):
reads[x] = reads[x+1]
reads[j] = r
elif i > j:
for x in range(i, j, -1):
reads[x] = reads[x-1]
reads[j] = r
def findReadLeft(self, cov, start, end, reads, min_len, max_len, mode_len, num_reads):
gap = self.findNextGap(cov, start, end, mode_len)
len_range = max_len - min_len
if not gap:
readEnd = min(start+mode_len,end)
if readEnd-start < min_len:
if not self.extendReadRight(cov, start, readEnd, reads, max_len, num_reads):
# We want to make sure that coverage ends at the right place
num_reads = self.add_read(reads, [readEnd - min_len, readEnd], num_reads)
for i in range(start, readEnd):
cov[i] -= 1
else:
#print('Adding read [%d, %d]' % (start, readEnd))
num_reads = self.add_read(reads, [start, readEnd], num_reads)
for i in range(start, readEnd):
cov[i] -= 1
else:
r = self.extendReadRight(cov, start, start+gap[0], reads, max_len, num_reads)
if not r:
r = self.shortenReadRight(cov, start+gap[0], start+gap[1], reads, len_range, min_len, num_reads)
if not r:
if gap[0] >= min_len:
readEnd = start+gap[0]
num_reads = self.add_read(reads, [start, readEnd], num_reads)
elif gap[0] < min_len/2:
# Skip bases
readEnd = start+gap[0]
else:
readEnd = min(start+mode_len, end)
num_reads = self.add_read(reads, [start, readEnd], num_reads)
for i in range(start, readEnd):
cov[i] -= 1
while start < end and cov[start] <= 0:
start += 1
while start < end and cov[end-1] <= 0:
end -= 1
return start, end, num_reads
def getChromosome(self, index):
''' Return chromosome name containing the given index from the whole-genome vector
'''
for c in self.chromosomeNames:
if self.chromosomes[c] > index:
return c
else:
index -= self.chromosomes[c]
def writeSAM(self, filehandle, unpaired, paired, header=True, force_xs=False, readId=0):
''' Write all alignments to a SAM file
'''
# write header
if header:
filehandle.write('@HD\tVN:1.0\tSO:unsorted\n')
for c in self.chromosomeNames:
filehandle.write('@SQ\tSN:' + str(c) + '\tLN:' + str(self.chromosomes[c]) + '\n')
for read in unpaired:
invalid = False
for i in range(len(read.exons)-1):
if read.exons[i+1][0] < read.exons[i][1]:
invalid = True
break
if invalid:
continue
exons = read.exons
cigar = [str(exons[0][1] - exons[0][0]) + 'M']
spliced = False
for i in range(1, len(exons)):
if exons[i][0] == exons[i-1][1]:
prevLen = int(cigar[-1][:-1])
cigar[-1] = str(prevLen + exons[i][1] - exons[i][0]) + 'M'
else:
spliced = True
cigar += [str(exons[i][0] - exons[i-1][1]) + 'N']
cigar += [str(exons[i][1] - exons[i][0]) + 'M']
cigar = ''.join(cigar)
chrom = read.chrom
offset = self.chromOffsets[chrom]
if force_xs and spliced and not read.strand:
#print('Assigning random XS value to spliced unpaired read')
if random.randint(0,1) == 0:
read.strand = '+'
else:
read.strand = '-'
if read.strand:
filehandle.write(chrom+':'+str(readId) + '\t0\t' + chrom + '\t' + str(exons[0][0]-offset) + '\t50\t' + cigar + '\t*\t0\t0\t*\t*\tXS:A:' + read.strand + '\tNH:i:' + str(int(read.NH)) + '\n')
else:
filehandle.write(chrom+':'+str(readId) + '\t0\t' + chrom + '\t' + str(exons[0][0]-offset) + '\t50\t' + cigar + '\t*\t0\t0\t*\t*\tNH:i:' + str(int(read.NH)) + '\n')
readId += 1
for pair in paired:
invalid = False
for i in range(len(pair.exonsA)-1):
if pair.exonsA[i+1][0] < pair.exonsA[i][1]:
invalid = True
break
if invalid:
continue
for i in range(len(pair.exonsB)-1):
if pair.exonsB[i+1][0] < pair.exonsB[i][1]:
invalid = True
break
if invalid:
continue
exonsA = pair.exonsA
cigarA = [str(exonsA[0][1] - exonsA[0][0]) + 'M']
spliced = False
for i in range(1, len(exonsA)):
if exonsA[i][0] == exonsA[i-1][1]:
prevLen = int(cigarA[-1][:-1])
cigarA[-1] = str(prevLen + exonsA[i][1] - exonsA[i][0]) + 'M'
else:
spliced = True
cigarA += [str(exonsA[i][0] - exonsA[i-1][1]) + 'N']
cigarA += [str(exonsA[i][1] - exonsA[i][0]) + 'M']
cigarA = ''.join(cigarA)
exonsB = pair.exonsB
cigarB = [str(exonsB[0][1] - exonsB[0][0]) + 'M']
for i in range(1, len(exonsB)):
if exonsB[i][0] == exonsB[i-1][1]:
prevLen = int(cigarB[-1][:-1])
cigarB[-1] = str(prevLen + exonsB[i][1] - exonsB[i][0]) + 'M'
else:
spliced = True
cigarB += [str(exonsB[i][0] - exonsB[i-1][1]) + 'N']
cigarB += [str(exonsB[i][1] - exonsB[i][0]) + 'M']
cigarB = ''.join(cigarB)
# Distance from start of first read to end of second read
totalLen = max(exonsA[-1][1],exonsB[-1][1]) - exonsA[0][0]
chromA = pair.chromA
chromB = pair.chromB
offsetA = self.chromOffsets[chromA]
if not chromA == chromB:
offsetB = self.chromOffsets[chromB]
if force_xs and spliced and not pair.strand:
#print('Assigning random XS value to spliced paired read')
if random.randint(0,1) == 0:
pair.strand = '+'
else:
pair.strand = '-'
#if chromA == chromB and self.conflicts(exonsA, exonsB):
# if pair.strand:
# filehandle.write(chromA+':'+str(readId) + '\t0\t' + chromA + '\t' + str(exonsA[0][0]-offsetA) + '\t50\t' + cigarA + '\t*\t0\t0\t*\t*\tNH:i:' + str(pair.NH) + '\tXS:A:' + pair.strand + '\n')
# readId += 1
# filehandle.write(chromB+':'+str(readId) + '\t0\t' + chromB + '\t' + str(exonsB[0][0]-offsetA) + '\t50\t' + cigarB + '\t*\t0\t0\t*\t*\tNH:i:' + str(pair.NH) + '\tXS:A:' + pair.strand + '\n')
# else:
# filehandle.write(chromA+':'+str(readId) + '\t0\t' + chromA + '\t' + str(exonsA[0][0]-offsetA) + '\t50\t' + cigarA + '\t*\t0\t0\t*\t*\tNH:i:' + str(pair.NH) + '\n')
# readId += 1
# filehandle.write(chromB+':'+str(readId) + '\t0\t' + chromB + '\t' + str(exonsB[0][0]-offsetA) + '\t50\t' + cigarB + '\t*\t0\t0\t*\t*\tNH:i:' + str(pair.NH) + '\n')
#else:
if chromB == chromA:
filehandle.write(chromA+':'+str(readId) + '\t81\t' + chromA + '\t' + str(exonsA[0][0]-offsetA) + '\t50\t' + cigarA + '\t=\t' + str(exonsB[0][0]-offsetA) + '\t' + str(totalLen) + '\t*\t*\tNH:i:' + str(int(pair.NH)))
else:
filehandle.write(chromA+':'+str(readId) + '\t81\t' + chromA + '\t' + str(exonsA[0][0]-offsetA) + '\t50\t' + cigarA + '\t' + chromB + '\t' + str(exonsB[0][0]-offsetB) + '\t0\t*\t*\tNH:i:' + str(int(pair.NH)))
if pair.strand:# and 'N' in cigarA:
filehandle.write('\tXS:A:' + pair.strand)
filehandle.write('\n')
if chromB == chromA:
filehandle.write(chromA+':'+str(readId) + '\t161\t' + chromB + '\t' + str(exonsB[0][0]-offsetA) + '\t50\t' + cigarB + '\t=\t' + str(exonsA[0][0]-offsetA) + '\t' + str(-totalLen) + '\t*\t*\tNH:i:' + str(int(pair.NH)))
else:
filehandle.write(chromA+':'+str(readId) + '\t161\t' + chromB + '\t' + str(exonsB[0][0]-offsetB) + '\t50\t' + cigarB + '\t' + chromA + '\t' + str(exonsA[0][0]-offsetA) + '\t0\t*\t*\tNH:i:' + str(int(pair.NH)))
if pair.strand:# and 'N' in cigarB:
filehandle.write('\tXS:A:' + pair.strand)
filehandle.write('\n')
readId += 1
return readId
```
#### File: jpritt/boiler/boiler.py
```python
import sys
import argparse
import time
import logging
import os
VERSION = '1.0.1'
def go(args):
if sys.version_info < (3,0):
print('Boiler requires Python version 3 or better to run')
exit()
if args.command == 'compress':
import compress
if args.preprocess:
modes = ['hisat']
if args.preprocess.lower() == 'hisat':
print('Preprocessing HISAT alignments')
import enumeratePairs
prefix = args.alignments[:args.alignments.index('.')] + '.processed'
enumeratePairs.processHISAT(args.alignments, prefix + '.sam')
os.system('samtools view -bS ' + prefix + '.sam | samtools sort - ' + prefix)
os.system('samtools view -h -o ' + prefix + '.sam ' + prefix + '.bam')
else:
print('Preprocessing mode not recognized: %s' % args.preprocess)
print('Supported preprocessing modes include: ' + ', '.join(modes))
exit()
args.alignments = prefix + '.sam'
if args.verbose:
print('Compressing')
start = time.time()
compressor = compress.Compressor(args.frag_len_cutoff)
compressor.compress(args.alignments, args.compressed, args.gtf, None, args.frag_len_z_cutoff, args.split_diff_strands, args.split_discordant)
if args.verbose:
end = time.time()
print('Compression took %0.3f s' % (end-start))
elif args.command == 'query':
import expand
expander = expand.Expander()
if args.counts:
if not args.gtf:
print('Missing required argument for counts query: --gtf path/to/reference.gtf')
exit()
if not args.output:
print('Counts query cannot write to standard output -- please include a file prefix for output')
exit()
exon_counts, junc_counts = expander.getCounts(args.compressed, args.gtf)
expander.write_counts(exon_counts, junc_counts, args.output)
else:
if not args.chrom:
print('Missing required argument for query: --chrom chromosome')
exit()
if args.output:
#logging.info('Opening %s to write results' % args.output)
try:
f = open(args.output, 'w')
except IOError:
#logging.info('Couldn\'t open file %s for writing. Using standard out instead.' % args.output)
f = sys.stdout
else:
#logging.warning('Writing results to standard out')
f = sys.stdout
if args.bundles:
bundles = expander.getGeneBounds(args.compressed, args.chrom, args.start, args.end)
for b in bundles:
f.write(str(b[0])+'\t'+str(b[1])+'\n')
if args.coverage:
cov = expander.getCoverage(args.compressed, args.chrom, args.start, args.end)
f.write(','.join([str(c) for c in cov]) + '\n')
if args.reads:
aligned, unpaired, paired = expander.getReads(args.compressed, args.chrom, args.start, args.end)
aligned.writeSAM(f, unpaired, paired, False, False, 0)
elif args.command == 'decompress':
import expand
if args.verbose:
print('Decompressing')
start = time.time()
expander = expand.Expander(args.force_xs)
expander.expand(args.compressed, args.expanded)
if args.verbose:
end = time.time()
print('Decompression took %0.3f s' % (end-start))
if __name__ == '__main__':
if '--version' in sys.argv:
print('Boiler v' + VERSION)
sys.exit(0)
# Check for Python version
version = sys.version_info[0]
if version < 3:
print('Python 3 is required to run Boiler')
exit()
# Print file's docstring if -h is invoked
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter, prog='Boiler')
subparsers = parser.add_subparsers(help='Commands', dest='command')
parser_compress = subparsers.add_parser('compress', help="Compress a SAM file")
parser_compress.add_argument("-c", "--frag-len-cutoff", type=int, help='Store any fragments longer than this in a bundle-spanning bucket')
parser_compress.add_argument("-z", "--frag-len-z-cutoff", type=float, help='Store any fragments above this z-score in a bundle-spanning bucket')
parser_compress.add_argument("-s", "--split-diff-strands", action="store_true", help='Split any pairs with different XS values')
parser_compress.add_argument("-d", "--split-discordant", action="store_true", help='Treat discordant pairs as unpaired reads')
parser_compress.add_argument("-p", "--preprocess", type=str, help="Set to 'tophat' to preprocess TopHat alignments, 'hisat' to preprocess HISAT alignments")
parser_compress.add_argument("-g", "--gtf", type=str, help="Path to reference GTF to improve compression accuracy (this will result in larger file size)")
parser_compress.add_argument("-v", "--verbose", help="Print timing information", action="store_true")
parser_compress.add_argument("alignments", type=str, help='Full path of SAM file containing aligned reads')
parser_compress.add_argument("compressed", type=str, nargs='?', default='compressed.bin', help="Compressed filename. Default: compressed.bin")
parser_query = subparsers.add_parser('query', help="Query compressed file")
group = parser_query.add_mutually_exclusive_group()
group.add_argument('-b', '--bundles', help="Query bundles", action="store_true")
group.add_argument('-c', '--coverage', help="Query coverage", action="store_true")
group.add_argument('-r', '--reads', help="Query reads", action="store_true")
group.add_argument('-f', '--counts', help='Query read counts over exons and splice junctions in a GTF', action="store_true")
parser_query.add_argument('--chrom', help="Chromosome to query", type=str)
parser_query.add_argument('--start', help="Beginning of range to query", type=int)
parser_query.add_argument('--end', help="End of range to query", type=int)
parser_query.add_argument('--gtf', help="Path to reference GTF over which to query counts")
parser_query.add_argument('compressed', help="Path to compressed file created by Boiler", type=str)
parser_query.add_argument('output', nargs='?', default=None, help="File to write result to. Default: Standard out")
parser_decompress = subparsers.add_parser('decompress', help="Decompress to a SAM file")
parser_decompress.add_argument("-f", "--force-xs", help="If we decompress a spliced read with no XS value, assign it a random one (so Cufflinks can run)", action="store_true")
parser_decompress.add_argument("-v", "--verbose", help="Print timing information", action="store_true")
parser_decompress.add_argument("compressed", type=str, help="Compressed filename")
parser_decompress.add_argument("expanded", type=str, nargs='?', default='expanded.sam', help="Write decompressed SAM to this filename. Default: expanded.sam")
args = parser.parse_args(sys.argv[1:])
go(args)
```
#### File: jpritt/boiler/compress.py
```python
import alignments
import re
import read
import binaryIO
import math
import os
import preprocess
import time
class Compressor:
aligned = None
# 0 - zlib
# 1 - lzma
# 2 - bz2
compressMethod = 0
covSize = 0
totalSize = 0
def __init__(self, frag_len_cutoff):
if self.compressMethod == 0:
self.zlib = __import__('zlib')
elif self.compressMethod == 1:
self.lzma = __import__('lzma')
elif self.compressMethod == 2:
self.bz2 = __import__('bz2')
if frag_len_cutoff:
print('Set fragment length cutoff to %d' % frag_len_cutoff)
self.frag_len_cutoff = frag_len_cutoff
def compress(self, samFilename, compressedFilename, gtf, min_filename, frag_len_z_cutoff, split_diff_strands, split_discordant):
''' Compresses the alignments to 2 files, one for unspliced and one for spliced
file_prefix: Prefix for all output file names
'''
self.p = preprocess.Preprocessor(samFilename, frag_len_z_cutoff, split_diff_strands)
if not self.frag_len_cutoff:
self.frag_len_cutoff = self.p.frag_len_cutoff
print('Using fragment length cutoff of ' + str(self.frag_len_cutoff))
if split_diff_strands:
print('Splitting mates on different strands')
else:
print('Not splitting mates on different strands')
if split_discordant:
print('Splitting discordant')
else:
print('Not splitting discordant')
# Reads on different strands that should be unpaired
self.diff_strand_unpaired = self.p.unpaired
del self.p
# Read header
header = ''
with open(samFilename, 'r') as f:
for line in f:
if line[0] == '@':
header += line
else:
break
self.chromosomes = self.parseSAMHeader(header)
self.aligned = alignments.Alignments(self.chromosomes, self.frag_len_cutoff, split_discordant)
if gtf:
self.aligned.gtf_exons = self.parseGTF(gtf, self.aligned.chromOffsets)
self.compressByBundle(samFilename, compressedFilename, min_filename)
#print('%d unmatched' % self.aligned.numUnmatched)
print('Approximately %d / %d = %f%% of compressed file is coverage' % (self.covSize, self.totalSize, 100.0*float(self.covSize)/float(self.totalSize)))
print('Finished compressing')
def compressByBundle(self, input_name, compressed_name, intermediate_name=None):
'''
Read a sorted SAM file and compress in segments determined by clusters of reads
:param filename:
:return:
'''
# If coverage is 0 for at least this many bases end of a potential gene
overlapRadius = 50
spliced_index = []
bundles = []
first = True
bundle_id = 0
read_id = 0
diff_strand_unpaired_id = 0
num_diff_strand_unpaired = len(self.diff_strand_unpaired)
firstR = None
with open(input_name, 'r') as filehandle:
id = 0
start_id = 0
for line in filehandle:
# Check if header line
if line[0] == '@':
continue
row = line.strip().split('\t')
if row[2] == '*':
# HISAT includes unmapped reads at the end of the file; we just skip them
continue
if not row[2] in self.chromosomes[0]:
print('Error! Chromosome ' + str(row[2]) + ' not found!')
exit()
# Starting position of this read
start = self.aligned.chromOffsets[row[2]] + int(row[3])
if self.aligned.gene_bounds and start > (self.aligned.gene_bounds[-1] + overlapRadius):
# Compress most recent bundle
self.aligned.finalizeExons()
self.aligned.finalizeUnmatched()
self.aligned.finalize_cross_bundle_reads()
#if self.aligned.gene_bounds[0] < 100480943 and self.aligned.gene_bounds[1] > 100478955:
# print(bundle_id)
# print(self.aligned.gene_bounds)
# print(self.aligned.exons)
# print(self.aligned.gene_bounds[0] - self.aligned.chromOffsets['X'])
# print(self.aligned.gene_bounds[1] - self.aligned.chromOffsets['X'])
# exit()
bundle_id += 1
start_id = id
bundles.append(self.aligned.exons)
# Write to intermediate file
if intermediate_name:
if first:
# If it's the first bundle, write the header as well
with open(intermediate_name, 'w') as f1:
read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, True, False, read_id)
else:
with open(intermediate_name, 'a') as f1:
read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, False, False, read_id)
junctions, maxReadLen = self.aligned.computeBuckets()
self.sortedJuncs = sorted(junctions.keys())
# Compress bundle to temporary file
if first:
mode = 'wb'
else:
mode = 'ab'
with open('temp.bin', mode) as f:
l = self.compressBundle(junctions, maxReadLen, f)
spliced_index.append(l)
# Start new bundle
self.aligned.resetBundle()
self.aligned.exons.add(start)
first = False
# Process read
if row[5] == '*':
# HISAT occasionally prints * as the cigar string when it is identical to its mate
#print('No cigar string')
#print(row[0])
#exit()
exons = None
else:
exons = self.parseCigar(row[5], int(row[3]))
# find XS (strand) and NH values
strand = None
NH = 1
for r in row[11 : len(row)]:
if r[0:5] == 'XS:A:' or r[0:5] == 'XS:a:':
strand = r[5]
elif r[0:3] == 'NH:':
NH = int(r[5:])
flags = int(row[1])
if flags & 4:
# Read is unmapped
continue
r = read.Read(row[2], int(row[3]), exons, strand, NH)
#r.name = row[0]
if row[6] == '*' or (flags & 8):
paired = False
elif diff_strand_unpaired_id < num_diff_strand_unpaired and id == self.diff_strand_unpaired[diff_strand_unpaired_id]:
#if not row[6] == '*':
# print('\t'.join(row))
paired = False
diff_strand_unpaired_id += 1
else:
paired = True
r.bundle = bundle_id
r.pairOffset = int(row[7])
if row[6] == '=':
r.pairChrom = row[2]
else:
r.pairChrom = row[6]
self.aligned.processRead(row[0], r, paired)
id += 1
# Compress final cluster
self.aligned.finalizeExons()
self.aligned.finalizeUnmatched()
self.aligned.finalize_cross_bundle_reads()
bundle_id += 1
bundles.append(self.aligned.exons)
# Write to intermediate file
if intermediate_name:
if first:
# If it's the first bundle, write the header as well
with open(intermediate_name, 'w') as f1:
read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, True, False, read_id)
first = False
else:
with open(intermediate_name, 'a') as f1:
read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, False, False, read_id)
junctions, maxReadLen = self.aligned.computeBuckets()
self.sortedJuncs = sorted(junctions.keys())
# Compress bundle to temporary file
if first:
mode = 'wb'
else:
mode = 'ab'
with open('temp.bin', mode) as f:
l = self.compressBundle(junctions, maxReadLen, f)
spliced_index.append(l)
leftovers = 0
for k,v in self.aligned.cross_bundle_reads.items():
#if len(v) > 0:
# print(k)
# print(v)
# exit()
leftovers += len(v)
print('%d cross-bundle reads unmatched' % leftovers)
bundle_lens = [c[-1]-c[0] for c in bundles]
print('Minimum bundle length: %d' % min(bundle_lens))
print('Maximum bundle length: %d' % max(bundle_lens))
print('Average bundle length: %d'% (sum(bundle_lens) / len(bundle_lens)))
# Write index information and append spliced and unspliced files
with open(compressed_name, 'wb') as f:
s = binaryIO.writeChroms(self.chromosomes)
s += binaryIO.writeClusters(bundles)
s += binaryIO.writeList(spliced_index)
f.write(s)
# Compress bundle-spanning buckets
self.compressCrossBundle(self.aligned.cross_bundle_buckets, self.aligned.max_cross_bundle_read_len, bundle_id, f)
# Move contents of temporary file to output file
with open('temp.bin', 'rb') as f2:
f.write(f2.read())
os.remove('temp.bin')
def compressBundle(self, junctions, maxReadLen, filehandle):
# Determine the number of bytes for read lengths
readLenBytes = binaryIO.findNumBytes(maxReadLen)
cluster = binaryIO.valToBinary(1, readLenBytes)
cluster += binaryIO.writeJunctionsList(self.sortedJuncs, 2)
self.totalSize += len(cluster)
# TODO: No need for junc_lens?
junc_lens = []
junc_string = b''
for j in self.sortedJuncs:
#if self.aligned.exons[0] == 100476370 and j == [2, None, 1]:
#
s, c, t = binaryIO.writeJunction(readLenBytes, junctions[j])
self.covSize += c
self.totalSize += t
junc_lens.append(len(s))
junc_string += s
#cluster += binaryIO.writeList(junc_lens)
cluster += junc_string
# Write to file
start = filehandle.tell()
filehandle.write(self.compressString(cluster))
# return length of cluster in file
return filehandle.tell() - start
def compressCrossBundle(self, cross_bundle_buckets, maxReadLen, num_bundles, filehandle):
'''
Compress the bundle-spanning buckets
'''
readLenBytes = binaryIO.findNumBytes(maxReadLen)
bundleIdBytes = binaryIO.findNumBytes(num_bundles)
buckets_sorted = sorted(cross_bundle_buckets.keys())
if len(buckets_sorted) > 0:
print('%d cross-bundle buckets' % len(buckets_sorted))
pos = filehandle.tell()
chunk_size = 20
num_chunks = math.ceil(len(buckets_sorted) / chunk_size)
chunk_lens = [0] * num_chunks
index = binaryIO.valToBinary(4, len(buckets_sorted))
index += binaryIO.valToBinary(2, chunk_size)
index += binaryIO.valToBinary(1, readLenBytes)
index += binaryIO.writeCrossBundleBucketNames(bundleIdBytes, cross_bundle_buckets, buckets_sorted)
self.totalSize += len(index)
main = b''
chunk = b''
chunk_id = 0
for i in range(len(buckets_sorted)):
b = buckets_sorted[i]
ch, c, t = binaryIO.writeCrossBundleBucket(readLenBytes, cross_bundle_buckets[b])
chunk += ch
self.covSize += c
self.totalSize += t
if (i+1) % chunk_size == 0:
compressed = self.compressString(chunk)
chunk_lens[chunk_id] = len(compressed)
chunk_id += 1
main += compressed
chunk = b''
if len(chunk) > 0:
compressed = self.compressString(chunk)
chunk_lens[chunk_id] = len(compressed)
main += compressed
index += binaryIO.writeList(chunk_lens)
index = self.compressString(index)
length = len(index)
numBytes = binaryIO.findNumBytes(length)
binaryIO.writeVal(filehandle, 1, numBytes)
binaryIO.writeVal(filehandle, numBytes, length)
filehandle.write(index)
filehandle.write(main)
print('Compressed size: %d' % (filehandle.tell() - pos))
else:
binaryIO.writeVal(filehandle, 1, 1)
binaryIO.writeVal(filehandle, 1, 0)
def parseCigar(self, cigar, offset):
''' Parse the cigar string starting at the given index of the genome
Returns a list of offsets for each exonic region of the read [(start1, end1), (start2, end2), ...]
'''
exons = []
newExon = True
# Parse cigar string
match = re.search("\D", cigar)
while match:
index = match.start()
length = int(''.join(cigar[:index]))
if cigar[index] == 'N':
# Separates contiguous exons, so set boolean to start a new one
newExon = True
elif cigar[index] == 'M':
# If in the middle of a contiguous exon, append the length to it, otherwise start a new exon
if newExon:
exons.append([offset, offset+length])
newExon = False
else:
exons[-1][1] += length
elif cigar[index] == 'D':
# If in the middle of a contiguous exon, append the deleted length to it
if not newExon:
exons[-1][1] += length
# Skip soft clipping
if not cigar[index] == 'S':
offset += length
cigar = cigar[index+1:]
match = re.search("\D", cigar)
return exons
def parseSAMHeader(self, header):
# In the order they appear in the header
chromNames = []
chromLens = []
# Dictionary contains chromosome lengths for lookup
for line in header.split('\n'):
if line[0:3] == '@SQ':
row = line.strip().split('\t')
chromNames.append(row[1][3:])
chromLens.append(int(row[2][3:]))
return [chromNames, chromLens]
def parseGTF(self, gtf, chromOffsets):
exons = set()
with open(gtf, 'r') as f:
for line in f:
row = line.rstrip().split('\t')
if row[2] == 'exon':
exons.add(int(row[3]) + chromOffsets[row[0]])
exons.add(int(row[4]) + chromOffsets[row[0]])
return sorted(list(exons))
def compressString(self, s):
''' Use a predefined python library to compress the given string.
Return the compressed string '''
if self.compressMethod == 0:
return self.zlib.compress(s)
elif self.compressMethod == 1:
return self.lzma.compress(s)
elif self.compressMethod == 2:
return self.bz2.compress(s)
```
#### File: jpritt/boiler/enumeratePairs.py
```python
import argparse
import sys
def oneToOne(readsA, readsB):
if not len(readsA) == len(readsB):
return False
rA = [(int(r[3]), int(r[7])) for r in readsA]
rB = [(int(r[7]), int(r[3])) for r in readsB]
for r in rA:
if r in rB:
i = rB.index(r)
del rB[i]
else:
return False
return True
def writeReads(f_out, readsA, readsB):
numA = len(readsA)
numB = len(readsB)
if numA == 0:
print('Error! No left mates found for %s' % readsB[0][0])
exit()
if numB == 0:
print('Error! No right mates found for %s' % readsA[0][0])
exit()
chromsA = [r[2] for r in readsA]
startsA = [r[3] for r in readsA]
chromsB = [r[2] for r in readsB]
startsB = [r[3] for r in readsB]
NH = str(numA * numB)
for r in readsA:
# Set NH value
NH_set = False
for i in range(11, len(r)):
if r[i][:3] == 'NH:':
r[i] = 'NH:i:' + NH
NH_set = True
break
if not NH_set:
r.append('NH:i:' + NH)
# Set cigar string if necessary
if r[5] == '*':
for i in range(numB):
if readsB[i][2] == r[2] and readsB[i][3] == r[3] and readsB[i][7] == r[7] and readsB[i][3] == r[7]:
r[5] = readsB[i][5]
break
if r[5] == '*':
print('Unable to set cigar string for %s' % r[0])
exit()
# Pair with all mates
for i in range(numB):
if chromsB[i] == r[2]:
r[6] = '='
else:
r[6] = chromsB[i]
r[7] = startsB[i]
f_out.write('\t'.join(r) + '\n')
for r in readsB:
# Set NH value
NH_set = False
for i in range(11, len(r)):
if r[i][:3] == 'NH:':
r[i] = 'NH:i:' + NH
NH_set = True
break
if not NH_set:
r.append('NH:i:' + NH)
# Set cigar string if necessary
if r[5] == '*':
for i in range(numA):
if readsA[i][2] == r[2] and readsA[i][3] == r[3] and readsA[i][7] == r[7] and readsA[i][3] == r[7]:
r[5] = readsA[i][5]
break
if r[5] == '*':
print('Unable to set cigar string for %s' % r[0])
exit()
# Pair with all mates
for i in range(numA):
if chromsA[i] == r[2]:
r[6] = '='
else:
r[6] = chromsA[i]
r[7] = startsA[i]
f_out.write('\t'.join(r) + '\n')
def processHISAT(input, output):
with open(input, 'r') as f_in:
with open(output, 'w') as f_out:
currName = None
readsA = []
readsB = []
last = None
for line in f_in:
if line[0] == '@':
f_out.write(line)
continue
row = line.rstrip().split('\t')
if not currName == row[0]:
if readsA:
if oneToOne(readsA, readsB):
NH = str(len(readsA))
for r in readsA:
for i in range(11, len(r)):
if r[i][:2] == 'NH':
r[i] = 'NH:i:' + NH
break
f_out.write('\t'.join(r) + '\n')
NH = str(len(readsB))
for r in readsB:
for i in range(11, len(r)):
if r[i][:2] == 'NH':
r[i] = 'NH:i:' + NH
break
f_out.write('\t'.join(r) + '\n')
else:
writeReads(f_out, readsA, readsB)
currName = row[0]
readsA = []
readsB = []
if row[2] == '*' and row[3] == '0' and row[5] == '*':
continue
if row[6] == '*':
#print(row[0])
#exit()
f_out.write(line)
flags = int(row[1])
if (flags & 64):
readsA.append(row)
elif (flags & 128):
readsB.append(row)
else:
print('Read does not have a template flag set!')
print(line)
exit()
last = row[:]
if readsA:
if oneToOne(readsA, readsB):
NH = str(len(readsA))
for r in readsA:
for i in range(11, len(r)):
if r[i][:2] == 'NH':
r[i] = 'NH:i:' + NH
break
f_out.write('\t'.join(r) + '\n')
NH = str(len(readsB))
for r in readsB:
for i in range(11, len(r)):
if r[i][:2] == 'NH':
r[i] = 'NH:i:' + NH
break
f_out.write('\t'.join(r) + '\n')
else:
writeReads(f_out, readsA, readsB)
if __name__ == '__main__':
# Print file's docstring if -h is invoked
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--input', type=str, required=True, help='Full path of SAM file to process')
parser.add_argument('--output', type=str, required=True, help='Path and filename of SAM file to write')
args = parser.parse_args(sys.argv[1:])
processHISAT(args.input, args.output)
```
#### File: jpritt/boiler/iterator.py
```python
import binaryIO
class BIterator:
def __init__(self, compressed):
'''
:param compressed: Path to Boiler-compressed file
'''
try:
self.f = open(compressed, 'rb')
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
# Read index
self.chromosomes = binaryIO.readChroms(self.f)
self.bundles = binaryIO.readClusters(self.f)
self.spliced_index = binaryIO.readListFromFile(f)
self.curr_cluster = 0
self.num_clusters = len(self.clusters)
# Read index for cross-bundle buckets
num_bundles = len(self.bundles)
bundleIdBytes = binaryIO.findNumBytes(num_bundles)
numBytes = binaryIO.readVal(self.f, 1)
length = binaryIO.readVal(self.f, numBytes)
index = self.expandString(self.f.read(length))
self.num_buckets, startPos = binaryIO.binaryToVal(index, 4, start=0)
self.cross_bundle_chunk_size, startPos = binaryIO.binaryToVal(index, 2, startPos)
self.readLenBytes, startPos = binaryIO.binaryToVal(index, 1, startPos)
self.cross_bundle_buckets, startPos = binaryIO.readCrossBundleBucketNames(index, self.num_buckets, bundleIdBytes, startPos)
self.cross_bundle_chunk_lens, startPos = binaryIO.readList(index, startPos)
self.cross_bundle_start = self.f.tell()
def get_coverage(self):
if self.curr_cluster >= self.num_clusters:
return None
if self.cross_buckets_length > 0:
coverage = self.getAllCrossBucketsCoverage()
def get_alignments(self):
if self.curr_cluster >= self.num_clusters:
return None
def next(self):
self.curr_cluster += 1
if self.curr_cluster >= self.num_clusters:
return None
else:
return self.clusters[self.curr_cluster]
def getAllCrossBucketsCoverage(self):
self.f.seek(self.cross_bundle_start)
curr_bucket = 0
skip = 0
for l in self.cross_bundle_chunk_lens:
buckets_in_chunk = min(self.cross_bundle_chunk_size, self.num_buckets-curr_bucket)
relevant = [0] * buckets_in_chunk
last_relevant = -1
for i in range(buckets_in_chunk):
bundleA = self.cross_bundle_buckets[i+curr_bucket].bundleA
bundleB = self.cross_bundle_buckets[i+curr_bucket].bundleB
if (bundleA == self.curr_cluster) or (bundleB == self.curr_cluster):
relevant[i] = 1
last_relevant = i
if last_relevant == -1:
skip += l
else:
if skip > 0:
self.f.seek(skip, 1)
skip = 0
chunk = self.expandString(self.f.read(l))
startPos = 0
for i in range(last_relevant+1):
if relevant[i]:
b = self.cross_bundle_buckets[i+curr_bucket]
startPos = binaryIO.readCrossBundleBucket(chunk, b, self.readLenBytes, startPos)
b.coverage = self.RLEtoVector(b.coverage)
exonsA = self.bundles[b.bundleA]
exonsB = self.bundles[b.bundleB]
# Is this necessary?
exon_bounds = [(exonsA[e], exonsA[e+1]) for e in b.exonIdsA] + [(exonsB[e], exonsB[e+1]) for e in b.exonIdsB]
boundaries = [0]
for n in range(len(exon_bounds)):
boundaries.append(boundaries[-1] + exon_bounds[n][1]-exon_bounds[n][0])
coverage = self.getBucketCoverage(b, coverage, range_start, range_end, exon_bounds, boundaries)
else:
startPos = binaryIO.skipCrossBundleBucket(chunk, self.readLenBytes, startPos)
curr_bucket += buckets_in_chunk
if skip > 0:
self.f.seek(skip, 1)
return coverage
def getBucketCoverage(self, bucket, coverage, subexon_bounds, boundaries):
NH = float(bucket.NH)
for i in range(len(boundaries)-1):
if subexon_bounds[i][1] < range_start:
continue
elif subexon_bounds[i][0] >= range_end:
break
if subexon_bounds[i][0] <= range_start:
subexon_start_id = range_start - subexon_bounds[i][0]
range_start_id = 0
else:
subexon_start_id = 0
range_start_id = subexon_bounds[i][0] - range_start
if subexon_bounds[i][1] <= range_end:
subexon_end_id = subexon_bounds[i][1] - subexon_bounds[i][0]
range_end_id = subexon_bounds[i][1] - range_start
else:
subexon_end_id = range_end - subexon_bounds[i][0]
range_end_id = range_end - range_start
# Add junc_coverage[subexon_start_id:subexon_end_id] to coverage[range_start_id:range_end_id]
range_id = range_start_id
for j in range(boundaries[i]+subexon_start_id, boundaries[i]+subexon_end_id):
c = bucket.coverage[j]
if c > 0:
coverage[range_id] += c / NH
range_id += 1
return coverage
def expandString(self, s):
''' Use a predefined python library to expand the given string.
Return the decompressed string '''
if self.compressMethod == 0:
return self.zlib.decompress(s)
elif self.compressMethod == 1:
return self.lzma.decompress(s)
elif self.compressMethod == 2:
return self.bz2.decompress(s)
```
#### File: jpritt/boiler/testMem.py
```python
import resource
import subprocess
import threading
import sys
max_mem = 0
def finish():
print('Maximum memory: %0.1f MB' % (max_mem / 1000.0))
#exit()
sam_prefix = sys.argv[2]
def run():
if sys.argv[1] == 'test':
proc = subprocess.Popen(['./test.py'])
elif sys.argv[1] == 'cram':
print('Running CRAM')
#proc = subprocess.Popen(['java', '-Xmx16g', '-jar', '/scratch0/langmead-fs1/shared/cramtools/cramtools-3.0.jar', 'cram', '-I', sam_prefix+'.bam', '-R', '/scratch0/langmead-fs1/user/jacob/genomes/Drosophila_melanogaster/Ensembl/BDGP5/Sequence/WholeGenomeFasta/genome.fa', '-O', 'compressed/compressed.cram'])
proc = subprocess.Popen(['java', '-Xmx16g', '-jar', '/scratch0/langmead-fs1/shared/cramtools/cramtools-3.0.jar', 'cram', '-I', sam_prefix+'.bam', '-R', '/scratch0/langmead-fs1/shared/references/hg19/fasta/hg19.fa', '-O', 'compressed/compressed.cram'])
elif sys.argv[1] == 'goby':
print('Running Goby')
proc = subprocess.Popen(['goby', '16g', 'sam-to-compact', '-i', sam_prefix+'.bam', '-o', 'compressed/compressed.goby'])
else:
print('Running Boiler')
proc = subprocess.Popen(['/scratch0/langmead-fs1/shared/pypy3-2.4-linux_x86_64-portable/bin/pypy3', '/scratch0/langmead-fs1/user/jacob/compress-alignments/boiler.py', 'compress', '--frag-len-z-cutoff', '0.125', '--split-discordant', '--split-diff-strands', sam_prefix+'.sam', 'compressed/compressed.bin'])
proc.wait()
finish()
return
'''
max_mem = 0
while proc.poll() is None:
mem = resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss
print(mem)
if mem > max_mem:
max_mem = mem
print(max_mem)
'''
thread = threading.Thread(target=run)
thread.start()
while thread.isAlive():
mem = resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss
if mem > max_mem:
max_mem = mem
``` |
{
"source": "JPro173/sbedecoder",
"score": 2
} |
#### File: sbedecoder/scripts/mdp_base64_decoder.py
```python
import sys
import os.path
from sbedecoder import MDPSchema
from sbedecoder import MDPMessageFactory
from sbedecoder import SBEParser
import mdp.prettyprinter
import mdp.secdef
import mdp.decode
import base64
import gzip
import re
from datetime import datetime
def process_file(base64data_filename, mdp_parser, secdef, pretty_print, print_data, skip_fields):
re_binascii_replay = re.compile(r'(\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}.\d{6})\s-\s((?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?)$')
with gzip.open(base64data_filename, 'rb') if base64data_filename.endswith('.gz') else open(base64data_filename, 'rb') as packets:
line_number = 0
for packet_bytes in packets:
packet = packet_bytes.decode("utf-8")
line_number += 1
m = re_binascii_replay.match(packet)
if m:
packet_data_ts = m.group(1)
packet_dt = datetime.strptime(packet_data_ts, '%Y-%m-%d %H:%M:%S.%f')
packet_data_binascii = m.group(2)
packet_data = base64.b64decode(packet_data_binascii)
if print_data:
print('data: {}'.format(packet_data_binascii))
mdp.decode.decode_packet(mdp_parser, packet_dt, packet_data, skip_fields, print_data, pretty_print, secdef, line_number)
def process_command_line():
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
parser = ArgumentParser(
description='Parse a text file containing base64 encoded CME MDP3 market\n'
'data based on a SBE xml schema file. File format is:\n'
'\n'
' YYYY-MM-DD HH:MM:SS.ffffff - data\n'
'\n'
'where "data" is the base64 encoded dta\n',
formatter_class=RawDescriptionHelpFormatter
)
parser.add_argument('base64data',
help='Name of the base64 encoded file to process')
parser.add_argument('-s', '--schema', default='templates_FixBinary.xml',
help='Name of the SBE schema xml file')
default_skip_fields = 'message_size,block_length,template_id,schema_id,version'
parser.add_argument('-f', '--skip-fields', default=default_skip_fields,
help='Don\'t print these message fields (default={})'.format(default_skip_fields))
parser.add_argument('--print-data', action='store_true',
help='Print the data as an ascii hex string (default: %(default)s)')
parser.add_argument('--pretty', action='store_true',
help='Print the message with a pretty format')
parser.add_argument('--secdef',
help='Name of the security definition file for augmenting logs with symbols')
args = parser.parse_args()
# check number of arguments, verify values, etc.:
if not os.path.isfile(args.schema):
parser.error('sbe schema xml file \'{}\' not found'.format(args.schema))
return args
def main(argv=None):
args = process_command_line()
# Read in the schema xml as a dictionary and construct the various schema objects
mdp_schema = MDPSchema()
mdp_schema.parse(args.schema)
msg_factory = MDPMessageFactory(mdp_schema)
mdp_parser = SBEParser(msg_factory)
secdef = None
if args.secdef:
secdef = mdp.secdef.SecDef()
secdef.load(args.secdef)
skip_fields = set(args.skip_fields.split(','))
process_file(args.base64data, mdp_parser, secdef, args.pretty, args.print_data, skip_fields)
return 0 # success
if __name__ == '__main__':
status = main()
sys.exit(status)
``` |
Subsets and Splits